repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
ibmsoe/tensorflow
refs/heads/master
tensorflow/python/kernel_tests/stack_op_test.py
65
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Pack Op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import variables from tensorflow.python.platform import test def np_split_squeeze(array, axis): axis_len = array.shape[axis] return [ np.squeeze( arr, axis=(axis,)) for arr in np.split( array, axis_len, axis=axis) ] class StackOpTest(test.TestCase): def testSimple(self): np.random.seed(7) with self.test_session(use_gpu=True): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): data = np.random.randn(*shape) # Convert [data[0], data[1], ...] separately to tensorflow # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) # Pack back into a single tensorflow tensor c = array_ops.stack(xs) self.assertAllEqual(c.eval(), data) c = array_ops.parallel_stack(xs) self.assertAllEqual(c.eval(), data) def testConst(self): np.random.seed(7) with self.test_session(use_gpu=True): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): data = np.random.randn(*shape).astype(np.float32) # Pack back into a single tensorflow tensor directly using np array c = array_ops.stack(data) # This is implemented via a Const: self.assertEqual(c.op.type, "Const") self.assertAllEqual(c.eval(), data) c = array_ops.parallel_stack(data) self.assertAllEqual(c.eval(), data) # Python lists also work for 1-D case: if len(shape) == 1: data_list = list(data) cl = array_ops.stack(data_list) self.assertEqual(cl.op.type, "Const") self.assertAllEqual(cl.eval(), data) cl = array_ops.parallel_stack(data_list) self.assertAllEqual(cl.eval(), data) # Verify that shape induction works with shapes produced via const stack a = constant_op.constant([1, 2, 3, 4, 5, 6]) b = array_ops.reshape(a, array_ops.stack([2, 3])) self.assertAllEqual(b.get_shape(), [2, 3]) def testGradientsAxis0(self): np.random.seed(7) for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): data = np.random.randn(*shape) shapes = [shape[1:]] * shape[0] with self.test_session(use_gpu=True): # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) c = array_ops.stack(xs) err = gradient_checker.compute_gradient_error(xs, shapes, c, shape) self.assertLess(err, 1e-6) def testGradientsAxis1(self): np.random.seed(7) for shape in (2, 3), (3, 2), (4, 3, 2): data = np.random.randn(*shape) shapes = [shape[1:]] * shape[0] out_shape = list(shape[1:]) out_shape.insert(1, shape[0]) with self.test_session(use_gpu=True): # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) c = array_ops.stack(xs, axis=1) err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape) self.assertLess(err, 1e-6) def testZeroSize(self): # Verify that stack doesn't crash for zero size inputs with self.test_session(use_gpu=True): for shape in (0,), (3, 0), (0, 3): x = np.zeros((2,) + shape).astype(np.int32) p = array_ops.stack(list(x)).eval() self.assertAllEqual(p, x) p = array_ops.parallel_stack(list(x)).eval() self.assertAllEqual(p, x) def testAxis0Default(self): with self.test_session(use_gpu=True): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] stacked = array_ops.stack(t).eval() parallel_stacked = array_ops.parallel_stack(t).eval() self.assertAllEqual(stacked, np.array([[1, 2, 3], [4, 5, 6]])) self.assertAllEqual(parallel_stacked, np.array([[1, 2, 3], [4, 5, 6]])) def testAgainstNumpy(self): # For 1 to 5 dimensions. for i in range(1, 6): expected = np.random.random(np.random.permutation(i) + 1) # For all the possible axis to split it, including negative indices. for j in range(-i, i): test_arrays = np_split_squeeze(expected, j) with self.test_session(use_gpu=True): actual_pack = array_ops.stack(test_arrays, axis=j) self.assertEqual(expected.shape, actual_pack.get_shape()) actual_pack = actual_pack.eval() actual_stack = array_ops.stack(test_arrays, axis=j) self.assertEqual(expected.shape, actual_stack.get_shape()) actual_stack = actual_stack.eval() self.assertNDArrayNear(expected, actual_stack, 1e-6) def testDimOutOfRange(self): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"): array_ops.stack(t, axis=2) def testDimOutOfNegativeRange(self): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"): array_ops.stack(t, axis=-3) class AutomaticPackingTest(test.TestCase): def testSimple(self): with self.test_session(use_gpu=True): self.assertAllEqual( [1, 0, 2], ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval()) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor( [[0, 0, 0], [0, constant_op.constant(1), 0], [0, 0, 0]]).eval()) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor( [[0, 0, 0], constant_op.constant([0, 1, 0]), [0, 0, 0]]).eval()) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor([ constant_op.constant([0, 0, 0]), constant_op.constant([0, 1, 0]), constant_op.constant([0, 0, 0]) ]).eval()) def testWithNDArray(self): with self.test_session(use_gpu=True): result = ops.convert_to_tensor([[[0., 0.], constant_op.constant([1., 1.])], np.array( [[2., 2.], [3., 3.]], dtype=np.float32)]) self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]], result.eval()) def testVariable(self): with self.test_session(use_gpu=True): v = variables.Variable(17) result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]]) v.initializer.run() self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]], result.eval()) v.assign(38).op.run() self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]], result.eval()) def testDtype(self): t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) self.assertEqual(dtypes.float32, t_0.dtype) t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant( [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]]) self.assertEqual(dtypes.float64, t_1.dtype) t_2 = ops.convert_to_tensor( [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64) self.assertEqual(dtypes.float64, t_2.dtype) with self.assertRaises(TypeError): ops.convert_to_tensor([ constant_op.constant( [0., 0., 0.], dtype=dtypes.float32), constant_op.constant( [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.] ]) with self.assertRaises(TypeError): ops.convert_to_tensor( [[0., 0., 0.], constant_op.constant( [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]], dtype=dtypes.float32) with self.assertRaises(TypeError): ops.convert_to_tensor( [constant_op.constant( [0., 0., 0.], dtype=dtypes.float64)], dtype=dtypes.float32) def testPlaceholder(self): with self.test_session(use_gpu=True): # Test using placeholder with a defined shape. ph_0 = array_ops.placeholder(dtypes.int32, shape=[]) result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]]) self.assertAllEqual( [[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1})) self.assertAllEqual( [[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2})) # Test using placeholder with an undefined shape. ph_1 = array_ops.placeholder(dtypes.int32) result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]]) self.assertAllEqual( [[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1})) self.assertAllEqual( [[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2})) def testShapeErrors(self): # Static shape error. ph_0 = array_ops.placeholder(dtypes.int32, shape=[1]) with self.assertRaises(ValueError): ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]]) # Dynamic shape error. ph_1 = array_ops.placeholder(dtypes.int32) result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]]) with self.test_session(use_gpu=True): with self.assertRaises(errors_impl.InvalidArgumentError): result_1.eval(feed_dict={ph_1: [1]}) if __name__ == "__main__": test.main()
alberts/check_mk
refs/heads/master
web/plugins/wato/check_mk_configuration.py
1
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. # .--Global Settings-----------------------------------------------------. # | ____ _ _ _ ____ _ _ _ | # | / ___| | ___ | |__ __ _| | / ___| ___| |_| |_(_)_ __ __ _ ___ | # || | _| |/ _ \| '_ \ / _` | | \___ \ / _ \ __| __| | '_ \ / _` / __| | # || |_| | | (_) | |_) | (_| | | ___) | __/ |_| |_| | | | | (_| \__ \ | # | \____|_|\___/|_.__/ \__,_|_| |____/ \___|\__|\__|_|_| |_|\__, |___/ | # | |___/ | # +----------------------------------------------------------------------+ # | Global configuration settings for main.mk and multisite.mk | # '----------------------------------------------------------------------' deprecated = _("Deprecated") group = _("Status GUI (Multisite)") register_configvar(group, "debug", Checkbox(title = _("Debug mode"), label = _("enable debug mode"), help = _("When Multisite is running in debug mode, internal Python error messages " "are being displayed and various debug information in other places is " "also available."), default_value = False), domain = "multisite") register_configvar(group, "profile", Checkbox( title = _("Profile requests"), label = _("enable profile mode"), help = _("It is possible to profile the rendering process of Multisite pages. This " "Is done using the Python module cProfile. When enabled two files are placed " "into the Multisite var directory named <code>multisite.profile</code> and " "<code>multisite.profile.py</code>. By executing the later file you can get " "runtime statistics about the last processed page."), default_value = False ), domain = "multisite" ) register_configvar(group, "debug_livestatus_queries", Checkbox(title = _("Debug Livestatus queries"), label = _("enable debug of Livestatus queries"), help = _("With this option turned on all Livestatus queries made by Multisite " "in order to render views are being displayed."), default_value = False), domain = "multisite") register_configvar(group, "buffered_http_stream", Checkbox(title = _("Buffered HTTP stream"), label = _("enable buffering"), help = _("When buffering the HTTP stream is enabled, then Multisite " "will not send single TCP segments for each particle of HTML " "output but try to make larger segments. This saves bandwidth " "especially when using HTTPS. On the backside there might be " "a higher latency for the beginning of pages being displayed."), default_value = True), domain = "multisite") register_configvar(group, "selection_livetime", Integer( title = _('Checkbox Selection Livetime'), help = _('This option defines the maximum age of unmodified checkbox selections stored for users. ' 'If a user modifies the selection in a view, these selections are persisted for the currently ' 'open view. When a view is re-opened a new selection is used. The old one remains on the ' 'server until the livetime is exceeded.'), minvalue = 1, default_value = 3600, ), domain = "multisite", ) register_configvar(group, "show_livestatus_errors", Checkbox(title = _("Show MK Livestatus error messages"), label = _("show errors"), help = _("This option controls whether error messages from unreachable sites are shown in the output of " "views. Those error messages shall alert you that not all data from all sites has been shown. " "Other people - however - find those messages distracting. "), default_value = True), domain = "multisite") register_configvar(group, "enable_sounds", Checkbox(title = _("Enable sounds in views"), label = _("enable sounds"), help = _("If sounds are enabled then the user will be alarmed by problems shown " "in a Multisite status view if that view has been configured for sounds. " "From the views shipped in with Multisite all problem views have sounds " "enabled."), default_value = False), domain = "multisite") register_configvar(group, "context_buttons_to_show", Optional( Integer( title = _("show"), label = _("buttons"), minvalue = 1, maxvalue = 100, size = 2), default_value = 5, title = _("Number of context buttons to show"), label = _("Show only most frequently used buttons"), help = _("If this option is enabled, then Multisite only show the most " "used context buttons and hides the rest. Which buttons are used " "how often is computed separately per user.")), domain = "multisite") register_configvar(group, "soft_query_limit", Integer(title = _("Soft query limit"), help = _("Whenever the number of returned datasets of a view would exceed this " "limit, a warning is being displayed and no further data is being shown. " "A normal user can override this limit with one mouse click."), minvalue = 1, default_value = 1000), domain = "multisite") register_configvar(group, "hard_query_limit", Integer(title = _("Hard query limit"), help = _("Whenever the number of returned datasets of a view would exceed this " "limit, an error message is shown. The normal user cannot override " "the hard limit. The purpose of the hard limit is to secure the server " "against useless queries with huge result sets."), minvalue = 1, default_value = 5000), domain = "multisite") register_configvar(group, "quicksearch_dropdown_limit", Integer(title = _("Number of elements to show in Quicksearch"), help = _("When typing a texts in the Quicksearch snapin, a dropdown will " "appear listing all matching host names containing that text. " "That list is limited in size so that the dropdown will not get " "too large when you have a huge number of lists. "), minvalue = 1, default_value = 80), domain = "multisite") register_configvar(group, "table_row_limit", Integer(title = _("Limit the number of rows shown in tables"), help = _("Several pages which use tables to show data in rows, like the " "\"Users\" configuration page, can be configured to show " "only a limited number of rows when accessing the pages."), minvalue = 1, default_value = 100, unit = _('rows')), domain = "multisite") register_configvar(group, "start_url", TextAscii(title = _("Start-URL to display in main frame"), help = _("When you point your browser to the Multisite GUI, usually the dashboard " "is shown in the main (right) frame. You can replace this with any other " "URL you like here."), size = 80, default_value = "dashboard.py", attrencode = True), domain = "multisite") register_configvar(group, "page_heading", TextUnicode(title = _("HTML-Title of HTML Multisite GUI"), help = _("This title will be displayed in your browser's title bar or tab. If you are " "using OMD then you can embed a <tt>%s</tt>. This will be replaced by the name " "of the OMD site."), size = 80, default_value = u"Check_MK %s", attrencode = True), domain = "multisite") register_configvar(group, "pagetitle_date_format", DropdownChoice( title = _("Date format for page titles"), help = _("When enabled, the headline of each page also displays "\ "the date in addition the time."), choices = [ (None, _("Do not display a date")), ('yyyy-mm-dd', _("YYYY-MM-DD")), ('dd.mm.yyyy', _("DD.MM.YYYY")), ], default_value = None ), domain = "multisite") register_configvar(group, "escape_plugin_output", Checkbox(title = _("Escape HTML codes in plugin output"), label = _("Prevent loading HTML from plugin output or log messages"), help = _("By default, for security reasons, Multisite does not interpret any HTML " "code received from external sources, like plugin output or log messages. " "If you are really sure what you are doing and need to have HTML codes, like " "links rendered, disable this option. Be aware, you might open the way " "for several injection attacks."), default_value = True), domain = "multisite") register_configvar(group, "multisite_draw_ruleicon", Checkbox(title = _("Show icon linking to WATO parameter editor for services"), label = _("Show WATO icon"), help = _("When enabled a rule editor icon is displayed for each " "service in the multisite views. It is only displayed if the user " "does have the permission to edit rules."), default_value = True), domain = "multisite") def wato_host_tag_group_choices(): # We add to the choices: # 1. All host tag groups with their id # 2. All *topics* that: # - consist only of checkbox tags # - contain at least two entries choices = [] by_topic = {} for entry in config.wato_host_tags: tgid = entry[0] topic, tit = parse_hosttag_title(entry[1]) choices.append((tgid, tit)) by_topic.setdefault(topic, []).append(entry) # Now search for checkbox-only-topics for topic, entries in by_topic.items(): for entry in entries: tgid, title, tags = entry if len(tags) != 1: break else: if len(entries) > 1: choices.append(("topic:" + topic, _("Topic") + ": " + topic)) return choices register_configvar(group, "virtual_host_trees", ListOf( Tuple( elements = [ TextUnicode( title = _("Title of the tree"), allow_empty = False, ), DualListChoice( allow_empty = False, custom_order = True, choices = wato_host_tag_group_choices, ) ] ), add_label = _("Create new virtual host tree configuration"), title = _("Virtual Host Trees"), help = _("Here you can define tree configurations for the snapin <i>Virtual Host-Trees</i>. " "These trees organize your hosts based on their values in certain host tag groups. " "Each host tag group you select will create one level in the tree."), ), domain = "multisite", ) register_configvar(group, "reschedule_timeout", Float(title = _("Timeout for rescheduling checks in Multisite"), help = _("When you reschedule a check by clicking on the &quot;arrow&quot;-icon " "then Multisite will use this number of seconds as a timeout. If the " "monitoring core has not executed the check within this time, an error " "will be displayed and the page not reloaded."), minvalue = 1.0, default_value = 10.0, unit = "sec", display_format = "%.1f"), domain = "multisite") register_configvar(group, "sidebar_update_interval", Float(title = _("Interval of sidebar status updates"), help = _("The information provided by the sidebar snapins is refreshed in a regular " "interval. You can change the refresh interval to fit your needs here. This " "value means that all snapnis which request a regular refresh are updated " "in this interval."), minvalue = 10.0, default_value = 30.0, unit = "sec", display_format = "%.1f"), domain = "multisite") register_configvar(group, "sidebar_notify_interval", Optional( Float( minvalue = 10.0, default_value = 60.0, unit = "sec", display_format = "%.1f" ), title = _("Interval of sidebar popup notification updates"), help = _("The sidebar can be configured to regularly check for pending popup notififcations. " "This is disabled by default."), none_label = _('(disabled)'), ), domain = "multisite") register_configvar(group, "adhoc_downtime", Optional( Dictionary( optional_keys = False, elements = [ ("duration", Integer( title = _("Duration"), help = _("The duration in minutes of the adhoc downtime."), minvalue = 1, unit = _("minutes"), default_value = 60, )), ("comment", TextUnicode( title = _("Adhoc comment"), help = _("The comment which is automatically sent with an adhoc downtime"), size = 80, allow_empty = False, attrencode = True, )), ], ), title = _("Adhoc downtime"), label = _("Enable adhoc downtime"), help = _("This setting allows to set an adhoc downtime comment and its duration. " "When enabled a new button <i>Adhoc downtime for __ minutes</i> will " "be available in the command form."), ), domain = "multisite", ) register_configvar(group, "bi_precompile_on_demand", Checkbox(title = _("Precompile aggregations on demand"), label = _("Only precompile on demand"), help = _( "By default all aggregations in Check_MK BI are precompiled on first " "usage of a BI or host/service related dialog in the status GUI. " "In case of large environments with many BI aggregations this complete " "precompilation might take too much time (several seconds). It is now possible " "to change the precompilation to be executed on demand. BI only precompiles the " "aggregations which are really requested by the users." ), default_value = False), domain = "multisite") register_configvar(group, "bi_compile_log", Optional( Filename( label = _("Absolute path to log file"), default = defaults.var_dir + '/web/bi-compile.log', ), title = _("Logfile for BI compilation diagnostics"), label = _("Activate logging of BI compilations into a logfile"), help = _("If this option is used and set to a filename, Check_MK BI will create a logfile " "containing details about compiling BI aggregations. This includes statistics and " "details for each executed compilation.")), domain = "multisite") register_configvar(group, "auth_by_http_header", Optional( TextAscii( label = _("HTTP Header Variable"), help = _("Configure the name of the environment variable to read " "from the incoming HTTP requests"), default_value = 'REMOTE_USER', attrencode = True, ), title = _("Authenticate users by incoming HTTP requests"), label = _("Activate HTTP header authentication (Warning: Only activate " "in trusted environments, see help for details)"), help = _("If this option is enabled, multisite reads the configured HTTP header " "variable from the incoming HTTP request and simply takes the string " "in this variable as name of the authenticated user. " "Be warned: Only allow access from trusted ip addresses " "(Apache <tt>Allow from</tt>), like proxy " "servers, to this webpage. A user with access to this page could simply fake " "the authentication information. This option can be useful to " " realize authentication in reverse proxy environments.") ), domain = "multisite") register_configvar(group, "staleness_threshold", Float( title = _('Staleness value to mark hosts / services stale'), help = _('The staleness value of a host / service is calculated by measuring the ' 'configured check intervals a check result is old. A value of 1.5 means the ' 'current check result has been gathered one and a half check intervals of an object. ' 'This would mean 90 seconds in case of a check which is checked each 60 seconds.'), minvalue = 1, default_value = 1.5, ), domain = "multisite", ) register_configvar(group, "user_localizations", Transform( ListOf( Tuple( elements = [ TextUnicode(title = _("Original Text"), size = 40), Dictionary( title = _("Translations"), elements = [ ( l or "en", TextUnicode(title = a, size = 32) ) for (l,a) in get_languages() ], columns = 2, ), ], ), title = _("Custom localizations"), movable = False, totext = _("%d translations"), default_value = sorted(default_user_localizations.items()), ), forth = lambda d: sorted(d.items()), back = lambda l: dict(l), ), domain = "multisite", ) # Helper that retrieves the list of hostgroups via Livestatus # use alias by default but fallback to name if no alias defined def list_hostgroups(): groups = dict(html.live.query("GET hostgroups\nCache: reload\nColumns: name alias\n")) return [ (name, groups[name] or name) for name in groups.keys() ] register_configvar(group, "topology_default_filter_group", Optional(DropdownChoice( choices = list_hostgroups, sorted = True, ), title = _("Network Topology: Default Filter Group"), help = _("By default the network topology view shows you the parent / child relations " "of all hosts within your local site. The list can be filtered based on hostgroup " "memberships by the users. You can define a default group to use for filtering " "which is used when a user opens the network topology view."), none_label = _("Show all hosts when opening the network topology view"), default_value = None, ), domain = "multisite" ) #. # .--WATO----------------------------------------------------------------. # | __ ___ _____ ___ | # | \ \ / / \|_ _/ _ \ | # | \ \ /\ / / _ \ | || | | | | # | \ V V / ___ \| || |_| | | # | \_/\_/_/ \_\_| \___/ | # | | # +----------------------------------------------------------------------+ # | Global Configuration for WATO | # '----------------------------------------------------------------------' group = _("Configuration GUI (WATO)") register_configvar(group, "wato_max_snapshots", Integer(title = _("Number of configuration snapshots to keep"), help = _("Whenever you successfully activate changes a snapshot of the configuration " "will be created. You can also create snapshots manually. WATO will delete old " "snapshots when the maximum number of snapshots is reached."), minvalue = 1, default_value = 50), domain = "multisite") register_configvar(group, "wato_activation_method", DropdownChoice( title = _("WATO restart mode for Nagios"), help = _("Should WATO restart or reload Nagios when activating changes"), choices = [ ('restart', _("Restart")), ('reload' , _("Reload") ), ]), domain = "multisite" ) register_configvar(group, "wato_legacy_eval", Checkbox( title = _("Use unsafe legacy encoding for distributed WATO"), help = _("The current implementation of WATO uses a Python module called <tt>ast</tt> for the " "communication between sites. Previous versions of Check_MK used an insecure encoding " "named <tt>pickle</tt>. Even in the current version WATO falls back to <tt>pickle</tt> " "if your Python version is not recent enough. This is at least the case for RedHat/CentOS 5.X " "and Debian 5.0. In a mixed environment you can force using the legacy <tt>pickle</tt> format " "in order to create compatibility."), ), domain = "multisite" ) register_configvar(group, "wato_hide_filenames", Checkbox(title = _("Hide internal folder names in WATO"), label = _("hide folder names"), help = _("When enabled, then the internal names of WATO folder in the filesystem " "are not shown. They will automatically be derived from the name of the folder " "when a new folder is being created. Disable this option if you want to see and " "set the filenames manually."), default_value = True), domain = "multisite") register_configvar(group, "wato_upload_insecure_snapshots", Checkbox(title = _("Allow upload of insecure WATO snapshots"), label = _("upload insecure snapshots"), help = _("When enabled, insecure snapshots are allowed. Please keep in mind that the upload " "of unverified snapshots represents a security risk, since the content of a snapshot is executed " "during runtime. Any manipulations in the content - either willingly or unwillingly (XSS attack) " "- pose a serious security risk."), default_value = False), domain = "multisite") register_configvar(group, "wato_hide_hosttags", Checkbox(title = _("Hide hosttags in WATO folder view"), label = _("hide hosttags"), help = _("When enabled, hosttags are no longer shown within the WATO folder view"), default_value = False), domain = "multisite") register_configvar(group, "wato_hide_varnames", Checkbox(title = _("Hide names of configuration variables"), label = _("hide variable names"), help = _("When enabled, internal configuration variable names of Check_MK are hidden " "from the user (for example in the rule editor)"), default_value = True), domain = "multisite") register_configvar(group, "wato_hide_help_in_lists", Checkbox(title = _("Hide help text of rules in list views"), label = _("hide help text"), help = _("When disabled, WATO shows the help texts of rules also in the list views."), default_value = True), domain = "multisite") register_configvar(group, "wato_use_git", Checkbox(title = _("Use GIT version control for WATO"), label = _("enable GIT version control"), help = _("When enabled, all changes of configuration files are tracked with the " "version control system GIT. You need to make sure that git is installed " "on your Nagios server. The version history currently cannot be viewed " "via the web GUI. Please use git command line tools within your Check_MK " "configuration directory."), default_value = False), domain = "multisite") #. # .--User Management-----------------------------------------------------. # | _ _ __ __ _ | # | | | | |___ ___ _ __ | \/ | __ _ _ __ ___ | |_ | # | | | | / __|/ _ \ '__| | |\/| |/ _` | '_ ` _ \| __| | # | | |_| \__ \ __/ | | | | | (_| | | | | | | |_ | # | \___/|___/\___|_| |_| |_|\__, |_| |_| |_|\__| | # | |___/ | # +----------------------------------------------------------------------+ # | Global settings for users and LDAP connector. | # '----------------------------------------------------------------------' group = _("User Management") register_configvar(group, "user_connectors", ListChoice( title = _('Enabled User Connectors'), help = _('The Multisite User Management code is modularized, the modules ' 'are called user connectors. A user connector can hook into multisite ' 'at several places where users are handled. Examples are the authentication ' 'or saving of user accounts. Here you can enable one or several connectors ' 'to extend or replace the default authentication mechanism (htpasswd) e.g. ' 'with ldap based mechanism.'), default_value = [ 'htpasswd' ], choices = userdb.list_user_connectors, allow_empty = False, ), domain = "multisite", ) register_configvar(group, "userdb_automatic_sync", ListChoice( title = _('Automatic User Synchronization'), help = _('By default the users are synchronized automatically in several situations. ' 'The sync is started when opening the "Users" page in configuration and ' 'during each page rendering. Each connector can then specify if it wants to perform ' 'any actions. For example the LDAP connector will start the sync once the cached user ' 'information are too old.'), default_value = [ 'wato_users', 'page', 'wato_pre_activate_changes', 'wato_snapshot_pushed' ], choices = [ ('page', _('During regular page processing')), ('wato_users', _('When opening the users\' configuration page')), ('wato_pre_activate_changes', _('Before activating the changed configuration')), ('wato_snapshot_pushed', _('On a remote site, when it receives a new configuration')), ], allow_empty = True, ), domain = "multisite", ) register_configvar(group, "ldap_connection", Dictionary( title = _("LDAP Connection Settings"), help = _("This section configures all LDAP specific connection options. These options " "are used by the LDAP user connector."), elements = [ ("server", TextAscii( title = _("LDAP Server"), help = _("Set the host address of the LDAP server. Might be an IP address or " "resolvable hostname."), allow_empty = False, attrencode = True, )), ('failover_servers', ListOfStrings( title = _('Failover Servers'), help = _('When the connection to the first server fails with connect specific errors ' 'like timeouts or some other network related problems, the connect mechanism ' 'will try to use this server instead of the server configured above. If you ' 'use persistent connections (default), the connection is being used until the ' 'LDAP is not reachable or the local webserver is restarted.'), allow_empty = False, )), ("port", Integer( title = _("TCP Port"), help = _("This variable allows to specify the TCP port to " "be used to connect to the LDAP server. "), minvalue = 1, maxvalue = 65535, default_value = 389, )), ("use_ssl", FixedValue( title = _("Use SSL"), help = _("Connect to the LDAP server with a SSL encrypted connection. You might need " "to configure the OpenLDAP installation on your monitoring server to accept " "the certificates of the LDAP server. This is normally done via system wide " "configuration of the CA certificate which signed the certificate of the LDAP " "server. Please refer to the <a target=\"_blank\" " "href=\"http://mathias-kettner.de/checkmk_multisite_ldap_integration.html\">" "documentation</a> for details."), value = True, totext = _("Encrypt the network connection using SSL."), )), ("no_persistent", FixedValue( title = _("No persistent connection"), help = _("The connection to the LDAP server is not persisted."), value = True, totext = _("Don't use persistent LDAP connections."), )), ("connect_timeout", Float( title = _("Connect Timeout (sec)"), help = _("Timeout for the initial connection to the LDAP server in seconds."), minvalue = 1.0, default_value = 2.0, )), ("version", DropdownChoice( title = _("LDAP Version"), help = _("Select the LDAP version the LDAP server is serving. Most modern " "servers use LDAP version 3."), choices = [ (2, "2"), (3, "3") ], default_value = 3, )), ("type", DropdownChoice( title = _("Directory Type"), help = _("Select the software the LDAP directory is based on. Depending on " "the selection e.g. the attribute names used in LDAP queries will " "be altered."), choices = [ ("ad", _("Active Directory")), ("openldap", _("OpenLDAP")), ("389directoryserver", _("389 Directory Server")), ], )), ("bind", Tuple( title = _("Bind Credentials"), help = _("Set the credentials to be used to connect to the LDAP server. The " "used account must not be allowed to do any changes in the directory " "the whole connection is read only. " "In some environment an anonymous connect/bind is allowed, in this " "case you don't have to configure anything here." "It must be possible to list all needed user and group objects from the " "directory."), elements = [ LDAPDistinguishedName( title = _("Bind DN"), help = _("Specify the distinguished name to be used to bind to " "the LDAP directory, e. g. <tt>CN=ldap,OU=users,DC=example,DC=com</tt>"), size = 63, ), Password( title = _("Bind Password"), help = _("Specify the password to be used to bind to " "the LDAP directory."), ), ], )), ("page_size", Integer( title = _("Page Size"), help = _("LDAP searches can be performed in paginated mode, for example to improve " "the performance. This enables pagination and configures the size of the pages."), minvalue = 1, default_value = 1000, )), ("response_timeout", Integer( title = _("Response Timeout (sec)"), help = _("Timeout for LDAP query responses."), minvalue = 0, default_value = 5, )), ], optional_keys = ['no_persistent', 'use_ssl', 'bind', 'page_size', 'response_timeout', 'failover_servers'], default_keys = ['page_size'] ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "ldap_userspec", Dictionary( title = _("LDAP User Settings"), help = _("This section configures all user related LDAP options. These options " "are used by the LDAP user connector to find the needed users in the LDAP directory."), elements = [ ("dn", LDAPDistinguishedName( title = _("User Base DN"), help = _("Give a base distinguished name here, e. g. <tt>OU=users,DC=example,DC=com</tt><br> " "All user accounts to synchronize must be located below this one."), size = 80, )), ("scope", DropdownChoice( title = _("Search Scope"), help = _("Scope to be used in LDAP searches. In most cases <i>Search whole subtree below " "the base DN</i> is the best choice. " "It searches for matching objects recursively."), choices = [ ("sub", _("Search whole subtree below the base DN")), ("base", _("Search only the entry at the base DN")), ("one", _("Search all entries one level below the base DN")), ], default_value = "sub", )), ("filter", TextAscii( title = _("Search Filter"), help = _("Using this option you can define an optional LDAP filter which is used during " "LDAP searches. It can be used to only handle a subset of the users below the given " "base DN.<br><br>Some common examples:<br><br> " "All user objects in LDAP:<br> " "<tt>(&(objectclass=user)(objectcategory=person))</tt><br> " "Members of a group:<br> " "<tt>(&(objectclass=user)(objectcategory=person)(memberof=CN=cmk-users,OU=groups,DC=example,DC=com))</tt><br>"), size = 80, default_value = lambda: userdb.ldap_filter('users', False), attrencode = True, )), ("filter_group", LDAPDistinguishedName( title = _("Filter Group (Only use in special situations)"), help = _("Using this option you can define the DN of a group object which is used to filter the users. " "Only members of this group will then be synchronized. This is a filter which can be " "used to extend capabilities of the regular \"Search Filter\". Using the search filter " "you can only define filters which directly apply to the user objects. To filter by " "group memberships, you can use the <tt>memberOf</tt> attribute of the user objects in some " "directories. But some directories do not have such attributes because the memberships " "are stored in the group objects as e.g. <tt>member</tt> attributes. You should use the " "regular search filter whenever possible and only use this filter when it is really " "neccessary. Finally you can say, you should not use this option when using Active Directory. " "This option is neccessary in OpenLDAP directories when you like to filter by group membership.<br><br>" "If using, give a plain distinguished name of a group here, e. g. " "<tt>CN=cmk-users,OU=groups,DC=example,DC=com</tt>"), size = 80, )), ("user_id", TextAscii( title = _("User-ID Attribute"), help = _("The attribute used to identify the individual users. It must have " "unique values to make an user identifyable by the value of this " "attribute."), default_value = lambda: userdb.ldap_attr('user_id'), attrencode = True, )), ("lower_user_ids", FixedValue( title = _("Lower Case User-IDs"), help = _("Convert imported User-IDs to lower case during synchronisation."), value = True, totext = _("Enforce lower case User-IDs."), )), ("user_id_umlauts", DropdownChoice( title = _("Umlauts in User-IDs"), help = _("Multisite does not support umlauts in User-IDs at the moment. To deal " "with LDAP users having umlauts in their User-IDs you have the following " "choices."), choices = [ ("replace", _("Replace umlauts like \"&uuml;\" with \"ue\"")), ("skip", _("Skip users with umlauts in their User-IDs")), ], default_value = "replace", )), ], optional_keys = ['filter', 'filter_group', 'user_id', 'lower_user_ids', ], ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "ldap_groupspec", Dictionary( title = _("LDAP Group Settings"), help = _("This section configures all group related LDAP options. These options " "are only needed when using group related attribute synchonisation plugins."), elements = [ ("dn", LDAPDistinguishedName( title = _("Group Base DN"), help = _("Give a base distinguished name here, e. g. <tt>OU=groups,DC=example,DC=com</tt><br> " "All groups used must be located below this one."), size = 80, )), ("scope", DropdownChoice( title = _("Search Scope"), help = _("Scope to be used in group related LDAP searches. In most cases " "<i>Search whole subtree below the base DN</i> " "is the best choice. It searches for matching objects in the given base " "recursively."), choices = [ ("sub", _("Search whole subtree below the base DN")), ("base", _("Search only the entry at the base DN")), ("one", _("Search all entries one level below the base DN")), ], default_value = "sub", )), ("filter", TextAscii( title = _("Search Filter"), help = _("Using this option you can define an optional LDAP filter which is used " "during group related LDAP searches. It can be used to only handle a " "subset of the groups below the given base DN.<br><br>" "e.g. <tt>(objectclass=group)</tt>"), size = 80, default_value = lambda: userdb.ldap_filter('groups', False), attrencode = True, )), ("member", TextAscii( title = _("Member Attribute"), help = _("The attribute used to identify users group memberships."), default_value = lambda: userdb.ldap_attr('member'), attrencode = True, )), ], optional_keys = ['filter', 'member'], ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "ldap_active_plugins", Dictionary( title = _('LDAP Attribute Sync Plugins'), help = _('It is possible to fetch several attributes of users, like Email or full names, ' 'from the LDAP directory. This is done by plugins which can individually enabled ' 'or disabled. When enabling a plugin, it is used upon the next synchonisation of ' 'user accounts for gathering their attributes. The user options which get imported ' 'into Check_MK from LDAP will be locked in WATO.'), elements = userdb.ldap_attribute_plugins_elements, default_keys = ['email', 'alias', 'auth_expire' ], ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "ldap_cache_livetime", Age( title = _('LDAP Cache Livetime'), help = _('This option defines the maximum age for using the cached LDAP data. The time of the ' 'last LDAP synchronisation is saved and checked on every request to the multisite ' 'interface. Once the cache gets outdated, a new synchronisation job is started.<br><br>' 'Please note: Passwords of the users are never stored in WATO and therefor never cached!'), minvalue = 1, default_value = 300, ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "ldap_debug_log", Checkbox( title = _("LDAP connection diagnostics"), label = _("Activate logging of LDAP transactions"), help = _("If this option is enabled, Check_MK will create a log file in " "<tt>var/log/ldap.log</tt> within your site in OMD environments. " "You should enable this option only for debugging."), default_value = False ), domain = "multisite", in_global_settings = False, ) register_configvar(group, "lock_on_logon_failures", Optional( Integer( label = _("Number of logon failures to lock the account"), default_value = 3, minvalue = 1, ), none_value = False, title = _("Lock user accounts after N logon failures"), label = _("Activate automatic locking of user accounts"), help = _("This options enables automatic locking of user account after " "N logon failures. One successful login resets the failure counter.") ), domain = "multisite" ) register_configvar(group, "password_policy", Dictionary( title = _('htpasswd: Password Policy'), help = _('You can define some rules to which each user password ahers. By default ' 'all passwords are accepted, even ones which are made of only a single character, ' 'which is obviously a bad idea. Using this option you can enforce your users ' 'to choose more secure passwords.'), elements = [ ('min_length', Integer( title = _("Minimum password length"), minvalue = 1, )), ('num_groups', Integer( title = _("Number of character groups to use"), minvalue = 1, maxvalue = 4, help = _("Force the user to choose a password that contains characters from at least " "this number of different character groups. " "Character groups are: <ul>" "<li>lowercase letters</li>" "<li>uppercase letters</li>" "<li>digits</li>" "<li>special characters such as an underscore or dash</li>" "</ul>"), )), ('max_age', Age( title = _("Maximum age of passwords"), minvalue = 1, display = ["days"], )), ], ), domain = "multisite", ) def list_roles(): roles = userdb.load_roles() return [ (i, r["alias"]) for i, r in roles.items() ] def list_contactgroups(): contact_groups = userdb.load_group_information().get("contact", {}) entries = [ (c, g['alias']) for c, g in contact_groups.items() ] entries.sort() return entries register_configvar(group, "default_user_profile", Dictionary( title = _("Default User Profile"), help = _("With this option you can specify the attributes a user which is created during " "its initial login gets added. For example, the default is to add the role \"user\" " "to all automatically created users."), elements = [ ('roles', ListChoice( title = _('User Roles'), help = _('Specify the initial roles of an automatically created user.'), default_value = [ 'user' ], choices = list_roles, )), ('contactgroups', ListChoice( title = _('Contact groups'), help = _('Specify the initial contact groups of an automatically created user.'), default_value = [], choices = list_contactgroups, )), ], optional_keys = [], ), domain = "multisite", ) register_configvar(group, "save_user_access_times", Checkbox( title = _("Save last access times of users"), label = _("Save the time of the latest user activity"), help = _("When enabled, the time of the last access is stored for each user. The last " "activity is shown on the users page."), default_value = False ), domain = "multisite" ) register_configvar(group, "export_folder_permissions", Checkbox( title = _("Export WATO folder permissions"), label = _("Make WATO folder permissions usable e.g. by NagVis"), help = _("It is possible to create maps representing the WATO folder hierarchy within " "NagVis by naming the maps like the folders are named internally. To make the " "restriction of access to the maps as comfortable as possible, the permissions " "configured within WATO can be exported to NagVis."), default_value = False, ), domain = "multisite" ) #. # .--Check_MK------------------------------------------------------------. # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | |_____| | # +----------------------------------------------------------------------+ # | Operation mode of Check_MK | # '----------------------------------------------------------------------' group = _("Operation mode of Check_MK") register_configvar(group, "use_new_descriptions_for", ListChoice( title = _("Use new service descriptions"), help = _("In order to make Check_MK more consistent, " "the descriptions of several services have been renamed in newer " "Check_MK versions. One example is the filesystem services that have " "been renamed from <tt>fs_</tt> into <tt>Filesystem</tt>. But since renaming " "of existing services has many implications - including existing rules, performance " "data and availability history - these renamings are disabled per default for " "existing installations. Here you can switch to the new descriptions for " "selected check types"), choices = [ ( "df", _("Used space in filesystems")), ( "df_netapp", _("NetApp Filers: Used Space in Filesystems")), ( "df_netapp32", _("NetApp Filers: Used space in Filesystem Using 32-Bit Counters")), ( "esx_vsphere_datastores", _("VMWare ESX host systems: Used space")), ( "hr_fs", _("Used space in filesystems via SNMP")), ( "vms_diskstat.df", _("Disk space on OpenVMS")), ( "zfsget", _("Used space in ZFS pools and filesystems")), ( "ps", _("State and Count of Processes") ), ( "ps.perf", _("State and Count of Processes (with additional performance data)")), ( "wmic_process", _("Resource consumption of windows processes")), ( "services", _("Windows Services")), ( "logwatch", _("Check logfiles for relevant new messages")), ( "cmk-inventory", _("Monitor hosts for unchecked services (Check_MK Discovery)")), ( "hyperv_vms", _("Hyper-V Server: State of VMs")), ( "ibm_svc_mdiskgrp", _("IBM SVC / Storwize V3700 / V7000: Status and Usage of MDisksGrps")), ( "ibm_svc_system", _("IBM SVC / V7000: System Info")), ( "ibm_svc_systemstats.diskio", _("IBM SVC / V7000: Disk Throughput for Drives/MDisks/VDisks in Total")), ( "ibm_svc_systemstats.iops", _("IBM SVC / V7000: IO operations/sec for Drives/MDisks/VDisks in Total")), ( "ibm_svc_systemstats.disk_latency", _("IBM SVC / V7000: Latency for Drives/MDisks/VDisks in Total")), ( "ibm_svc_systemstats.cache", _("IBM SVC / V7000: Cache Usage in Total")), ], render_orientation = "vertical", ), need_restart = True ) register_configvar(group, "tcp_connect_timeout", Float(title = _("Agent TCP connect timeout (sec)"), help = _("Timeout for TCP connect to agent in seconds. If the agent does " "not respond within this time, it is considered to be unreachable. " "Note: This does <b>not</b> limit the time the agent needs to " "generate its output."), minvalue = 1.0), need_restart = True) register_configvar(group, "simulation_mode", Checkbox(title = _("Simulation mode"), label = _("Run in simulation mode"), help = _("This boolean variable allows you to bring check_mk into a dry run mode. " "No hosts will be contacted, no DNS lookups will take place and data is read " "from cache files that have been created during normal operation or have " "been copied here from another monitoring site.")), need_restart = True) register_configvar(group, "restart_locking", DropdownChoice( title = _("Simultanous activation of changes"), help = _("When two users simultanously try to activate the changes then " "you can decide to abort with an error (default) or have the requests " "serialized. It is also possible - but not recommended - to turn " "off locking altogether."), choices = [ ('abort', _("Abort with an error")), ('wait' , _("Wait until the other has finished") ), (None , _("Disable locking") ), ]), need_restart = False ) register_configvar(group, "agent_simulator", Checkbox(title = _("SNMP Agent Simulator"), label = _("Process stored SNMP walks with agent simulator"), help = _("When using stored SNMP walks you can place inline code generating " "dynamic simulation data. This feature can be activated here. There " "is a big chance that you will never need this feature...")), need_restart = True) register_configvar(group, "delay_precompile", Checkbox(title = _("Delay precompiling of host checks"), label = _("delay precompiling"), help = _("If you enable this option, then Check_MK will not directly Python-bytecompile " "all host checks when activating the configuration and restarting Nagios. " "Instead it will delay this to the first " "time the host is actually checked being by Nagios.<p>This reduces the time needed " "for the operation, but on the other hand will lead to a slightly higher load " "of Nagios for the first couple of minutes after the restart. "))) register_configvar(group, "cluster_max_cachefile_age", Integer(title = _("Maximum cache file age for clusters"), label = _("seconds"), help = _("The number of seconds a cache file may be old if check_mk should " "use it instead of getting information from the target hosts while " "checking a cluster. Per default this is enabled and set to 90 seconds. " "If your check cycle is not set to a larger value than one minute then " "you should increase this accordingly.")), need_restart = True) register_configvar(group, "piggyback_max_cachefile_age", Age(title = _("Maximum age for piggyback files"), help = _("The maximum age for piggy back data from another host to be valid for monitoring. " "Older files are deleted before processing them. Please make sure that this age is " "at least as large as you normal check interval for piggy hosts.")), need_restart = True) register_configvar(group, "check_submission", DropdownChoice( title = _("Check submission method"), help = _("If you set this to <b>Nagios command pipe</b>, then Check_MK will write its " "check results into the Nagios command pipe. This is the classical way. " "Choosing <b>Create check files</b> " "skips one phase in the Nagios core and directly creates Nagios check files. " "This reduces the overhead but might not be compatible with other monitoring " "cores."), choices = [ ("pipe", _("Nagios command pipe")), ("file", _("Create check files")) ]), need_restart = True) register_configvar(group, "check_mk_perfdata_with_times", Checkbox(title = _("Check_MK with times performance data"), label = _("Return process times within performance data"), help = _("Enabling this option results in additional performance data " "for the Check_MK output, giving information regarding the process times. " "It provides the following fields: user_time, system_time, children_user_time " "and children_system_time")), need_restart = True) register_configvar(group, "use_dns_cache", Checkbox( title = _("Use DNS lookup cache"), label = _("Prevent DNS lookups by use of a cache file"), help = _("When this option is enabled (which is the default), then Check_MK tries to " "prevent IP address lookups during the configuration generation. This can speed " "up this process greatly when you have a larger number of hosts. The cache is stored " "in a simple file. Note: when the cache is enabled then changes of the IP address " "of a host in your name server will not be detected immediately. If you need an " "immediate update then simply disable the cache once, activate the changes and " "enabled it again. OMD based installations automatically update the cache once " "a day."), default_value = True, ), need_restart = True ) register_configvar(group, "use_inline_snmp", Checkbox( title = _("Use Inline SNMP"), label = _("Enable inline SNMP (directly use net-snmp libraries)"), help = _("By default Check_MK uses command line calls of Net-SNMP tools like snmpget or " "snmpwalk to gather SNMP information. For each request a new command line " "program is being executed. It is now possible to use the inline SNMP implementation " "which calls the net-snmp libraries directly via its python bindings. This " "should increase the performance of SNMP checks in a significant way. The inline " "SNMP mode is a feature which improves the performance for large installations and " "only available via our subscription."), default_value = False ), need_restart = True ) register_configvar(group, "record_inline_snmp_stats", Checkbox( title = _("Record statistics of Inline SNMP"), label = _("Enable recording of Inline SNMP statistics"), help = _("When you have enabled Inline SNMP, you can use this flag to enable recording of " "some performance related values. The recorded values are stored in a single file " "at <tt>var/check_mk/snmp.stats</tt>.<br><br>" "<i>Please note:</i> Only enable this for a short period, because it will " "decrease the performance of your monitoring."), default_value = False ), need_restart = True ) group = _("Service discovery") register_configvar(group, "inventory_check_interval", Optional( Integer(title = _("Perform service discovery check every"), unit = _("minutes"), min_value = 1, default_value = 720), title = _("Enable regular service discovery checks"), help = _("If enabled, Check_MK will create one additional service per host " "that does a regular check, if the service discovery would find new services " "currently un-monitored.")), need_restart = True) register_configvar(group, "inventory_check_severity", DropdownChoice( title = _("Severity of failed service discovery check"), help = _("Please select which alarm state the service discovery check services " "shall assume in case that un-monitored services are found."), choices = [ (0, _("OK - do not alert, just display")), (1, _("Warning") ), (2, _("Critical") ), (3, _("Unknown") ), ], default_value = 1)) register_configvar(group, "inventory_check_do_scan", DropdownChoice( title = _("Service discovery check for SNMP devices"), choices = [ ( True, _("Perform full SNMP scan always, detect new check types") ), ( False, _("Just rely on existing check files, detect new items only") ) ] )) register_configvar(group, "inventory_check_autotrigger", Checkbox( title = _("Service discovery triggers service discovery check"), label = _("Automatically schedule service discovery check after service configuration changes"), help = _("When this option is enabled then after each change of the service " "configuration of a host via WATO - may it be via manual changes or a bulk " "discovery - the service discovery check is automatically rescheduled in order " "to reflect the new service state correctly immediately."), default_value = True, )) _if_portstate_choices = [ ( '1', 'up(1)'), ( '2', 'down(2)'), ( '3', 'testing(3)'), ( '4', 'unknown(4)'), ( '5', 'dormant(5)') , ( '6', 'notPresent(6)'), ( '7', 'lowerLayerDown(7)'), ( '8', 'degraded(8)'), ] _brocade_fcport_adm_choices = [ ( 1, 'online(1)'), ( 2, 'offline(2)'), ( 3, 'testing(3)'), ( 4, 'faulty(4)'), ] _brocade_fcport_op_choices = [ ( 0, 'unkown(0)'), ( 1, 'online(1)'), ( 2, 'offline(2)'), ( 3, 'testing(3)'), ( 4, 'faulty(4)'), ] _brocade_fcport_phy_choices = [ ( 1, 'noCard(1)'), ( 2, 'noTransceiver(2)'), ( 3, 'laserFault(3)'), ( 4, 'noLight(4)'), ( 5, 'noSync(5)'), ( 6, 'inSync(6)'), ( 7, 'portFault(7)'), ( 8, 'diagFault(8)'), ( 9, 'lockRef(9)'), ( 10, 'validating(10)'), ( 11, 'invalidModule(11)'), ( 14, 'noSigDet(14)'), ( 255, 'unkown(255)'), ] _if_porttype_choices = [ ("1", "other(1)" ), ("2", "regular1822(2)" ), ("3", "hdh1822(3)" ), ("4", "ddnX25(4)" ), ("5", "rfc877x25(5)" ), ("6", "ethernetCsmacd(6)" ), ("7", "iso88023Csmacd(7)" ), ("8", "iso88024TokenBus(8)" ), ("9", "iso88025TokenRing(9)" ), ("10", "iso88026Man(10)" ), ("11", "starLan(11)" ), ("12", "proteon10Mbit(12)" ), ("13", "proteon80Mbit(13)" ), ("14", "hyperchannel(14)" ), ("15", "fddi(15)" ), ("16", "lapb(16)" ), ("17", "sdlc(17)" ), ("18", "ds1(18)" ), ("19", "e1(19)" ), ("20", "basicISDN(20)" ), ("21", "primaryISDN(21)" ), ("22", "propPointToPointSerial(22)" ), ("23", "ppp(23)" ), ("24", "softwareLoopback(24)" ), ("25", "eon(25)" ), ("26", "ethernet3Mbit(26)" ), ("27", "nsip(27)" ), ("28", "slip(28)" ), ("29", "ultra(29)" ), ("30", "ds3(30)" ), ("31", "sip(31)" ), ("32", "frameRelay(32)" ), ("33", "rs232(33)" ), ("34", "para(34)" ), ("35", "arcnet(35)" ), ("36", "arcnetPlus(36)" ), ("37", "atm(37)" ), ("38", "miox25(38)" ), ("39", "sonet(39)" ), ("40", "x25ple(40)" ), ("41", "iso88022llc(41)" ), ("42", "localTalk(42)" ), ("43", "smdsDxi(43)" ), ("44", "frameRelayService(44)" ), ("45", "v35(45)" ), ("46", "hssi(46)" ), ("47", "hippi(47)" ), ("48", "modem(48)" ), ("49", "aal5(49)" ), ("50", "sonetPath(50)" ), ("51", "sonetVT(51)" ), ("52", "smdsIcip(52)" ), ("53", "propVirtual(53)" ), ("54", "propMultiplexor(54)" ), ("55", "ieee80212(55)" ), ("56", "fibreChannel(56)" ), ("57", "hippiInterface(57)" ), ("58", "frameRelayInterconnect(58)" ), ("59", "aflane8023(59)" ), ("60", "aflane8025(60)" ), ("61", "cctEmul(61)" ), ("62", "fastEther(62)" ), ("63", "isdn(63)" ), ("64", "v11(64)" ), ("65", "v36(65)" ), ("66", "g703at64k(66)" ), ("67", "g703at2mb(67)" ), ("68", "qllc(68)" ), ("69", "fastEtherFX(69)" ), ("70", "channel(70)" ), ("71", "ieee80211(71)" ), ("72", "ibm370parChan(72)" ), ("73", "escon(73)" ), ("74", "dlsw(74)" ), ("75", "isdns(75)" ), ("76", "isdnu(76)" ), ("77", "lapd(77)" ), ("78", "ipSwitch(78)" ), ("79", "rsrb(79)" ), ("80", "atmLogical(80)" ), ("81", "ds0(81)" ), ("82", "ds0Bundle(82)" ), ("83", "bsc(83)" ), ("84", "async(84)" ), ("85", "cnr(85)" ), ("86", "iso88025Dtr(86)" ), ("87", "eplrs(87)" ), ("88", "arap(88)" ), ("89", "propCnls(89)" ), ("90", "hostPad(90)" ), ("91", "termPad(91)" ), ("92", "frameRelayMPI(92)" ), ("93", "x213(93)" ), ("94", "adsl(94)" ), ("95", "radsl(95)" ), ("96", "sdsl(96)" ), ("97", "vdsl(97)" ), ("98", "iso88025CRFPInt(98)" ), ("99", "myrinet(99)" ), ("100", "voiceEM(100)" ), ("101", "voiceFXO(101)" ), ("102", "voiceFXS(102)" ), ("103", "voiceEncap(103)" ), ("104", "voiceOverIp(104)" ), ("105", "atmDxi(105)" ), ("106", "atmFuni(106)" ), ("107", "atmIma(107)" ), ("108", "pppMultilinkBundle(108)" ), ("109", "ipOverCdlc(109)" ), ("110", "ipOverClaw(110)" ), ("111", "stackToStack(111)" ), ("112", "virtualIpAddress(112)" ), ("113", "mpc(113)" ), ("114", "ipOverAtm(114)" ), ("115", "iso88025Fiber(115)" ), ("116", "tdlc(116)" ), ("117", "gigabitEthernet(117)" ), ("118", "hdlc(118)" ), ("119", "lapf(119)" ), ("120", "v37(120)" ), ("121", "x25mlp(121)" ), ("122", "x25huntGroup(122)" ), ("123", "trasnpHdlc(123)" ), ("124", "interleave(124)" ), ("125", "fast(125)" ), ("126", "ip(126)" ), ("127", "docsCableMaclayer(127)" ), ( "128", "docsCableDownstream(128)" ), ("129", "docsCableUpstream(129)" ), ("130", "a12MppSwitch(130)" ), ("131", "tunnel(131)" ), ("132", "coffee(132)" ), ("133", "ces(133)" ), ("134", "atmSubInterface(134)" ), ("135", "l2vlan(135)" ), ("136", "l3ipvlan(136)" ), ("137", "l3ipxvlan(137)" ), ("138", "digitalPowerline(138)" ), ("139", "mediaMailOverIp(139)" ), ("140", "dtm(140)" ), ("141", "dcn(141)" ), ("142", "ipForward(142)" ), ("143", "msdsl(143)" ), ("144", "ieee1394(144)" ), ( "145", "if-gsn(145)" ), ("146", "dvbRccMacLayer(146)" ), ("147", "dvbRccDownstream(147)" ), ("148", "dvbRccUpstream(148)" ), ("149", "atmVirtual(149)" ), ("150", "mplsTunnel(150)" ), ("151", "srp(151)" ), ("152", "voiceOverAtm(152)" ), ("153", "voiceOverFrameRelay(153)" ), ("154", "idsl(154)" ), ( "155", "compositeLink(155)" ), ("156", "ss7SigLink(156)" ), ("157", "propWirelessP2P(157)" ), ("158", "frForward(158)" ), ("159", "rfc1483(159)" ), ("160", "usb(160)" ), ("161", "ieee8023adLag(161)" ), ("162", "bgppolicyaccounting(162)" ), ("163", "frf16MfrBundle(163)" ), ("164", "h323Gatekeeper(164)" ), ("165", "h323Proxy(165)" ), ("166", "mpls(166)" ), ("167", "mfSigLink(167)" ), ("168", "hdsl2(168)" ), ("169", "shdsl(169)" ), ("170", "ds1FDL(170)" ), ("171", "pos(171)" ), ("172", "dvbAsiIn(172)" ), ("173", "dvbAsiOut(173)" ), ("174", "plc(174)" ), ("175", "nfas(175)" ), ( "176", "tr008(176)" ), ("177", "gr303RDT(177)" ), ("178", "gr303IDT(178)" ), ("179", "isup(179)" ), ("180", "propDocsWirelessMaclayer(180)" ), ("181", "propDocsWirelessDownstream(181)" ), ("182", "propDocsWirelessUpstream(182)" ), ("183", "hiperlan2(183)" ), ("184", "propBWAp2Mp(184)" ), ("185", "sonetOverheadChannel(185)" ), ("186", "digitalWrapperOverheadChannel(186)" ), ("187", "aal2(187)" ), ("188", "radioMAC(188)" ), ("189", "atmRadio(189)" ), ("190", "imt(190)" ), ("191", "mvl(191)" ), ("192", "reachDSL(192)" ), ("193", "frDlciEndPt(193)" ), ("194", "atmVciEndPt(194)" ), ("195", "opticalChannel(195)" ), ("196", "opticalTransport(196)" ), ("197", "propAtm(197)" ), ("198", "voiceOverCable(198)" ), ("199", "infiniband(199)" ), ("200", "teLink(200)" ), ("201", "q2931(201)" ), ("202", "virtualTg(202)" ), ("203", "sipTg(203)" ), ("204", "sipSig(204)" ), ( "205", "docsCableUpstreamChannel(205)" ), ("206", "econet(206)" ), ("207", "pon155(207)" ), ("208", "pon622(208)" ), ("209", "bridge(209)" ), ("210", "linegroup(210)" ), ("211", "voiceEMFGD(211)" ), ("212", "voiceFGDEANA(212)" ), ("213", "voiceDID(213)" ), ("214", "mpegTransport(214)" ), ("215", "sixToFour(215)" ), ("216", "gtp(216)" ), ("217", "pdnEtherLoop1(217)" ), ("218", "pdnEtherLoop2(218)" ), ("219", "opticalChannelGroup(219)" ), ("220", "homepna(220)" ), ("221", "gfp(221)" ), ("222", "ciscoISLvlan(222)" ), ("223", "actelisMetaLOOP(223)" ), ("224", "fcipLink(224)" ), ("225", "rpr(225)" ), ("226", "qam(226)" ), ("227", "lmp(227)" ), ("228", "cblVectaStar(228)" ), ("229", "docsCableMCmtsDownstream(229)" ), ("230", "adsl2(230)" ), ] register_configvar(group, "if_inventory_pad_portnumbers", Checkbox(title = _("Pad port numbers with zeroes"), label = _("pad port numbers"), help = _("If this option is activated then Check_MK will pad port numbers of " "network interfaces with zeroes so that all port descriptions from " "all ports of a host or switch have the same length and thus sort " "currectly in the GUI. In versions prior to 1.1.13i3 there was no " "padding. You can switch back to the old behaviour by disabling this " "option. This will retain the old service descriptions and the old " "performance data."))) register_configvar(deprecated, "if_inventory_uses_description", Checkbox(title = _("Use description as service name for network interface checks"), label = _("use description"), help = _("This option lets Check_MK use the interface description as item instead " "of the port number. If no description is available then the port number is " "used anyway."))) register_configvar(deprecated, "if_inventory_uses_alias", Checkbox(title = _("Use alias as service name for network interface checks"), label = _("use alias"), help = _("This option lets Check_MK use the alias of the port (ifAlias) as item instead " "of the port number. If no alias is available then the port number is used " "anyway."))) register_configvar(deprecated, "if_inventory_portstates", ListChoice(title = _("Network interface port states to inventorize"), help = _("When doing inventory on switches or other devices with network interfaces " "then only ports found in one of the configured port states will be added to the monitoring."), choices = _if_portstate_choices)) register_configvar(deprecated, "if_inventory_porttypes", ListChoice(title = _("Network interface port types to inventorize"), help = _("When doing inventory on switches or other devices with network interfaces " "then only ports of the specified types will be created services for."), choices = _if_porttype_choices, columns = 3)) register_configvar(deprecated, "diskstat_inventory_mode", DropdownChoice( title = _("Inventory mode for disk IO checks"), help = _("When doing inventory the various disk IO checks can either create " "a single check per host, one check per device or a separate check " "for read and written bytes."), choices = [ ('rule' , _("controlled by ruleset Inventory mode for Disk IO check") ), ('summary', _("one summary check per host")), ('single' , _("one check per individual disk/LUN") ), ('legacy' , _("one check for read, one for write") ), ], default_value = 'rule', ), ) register_configvar(group, "win_dhcp_pools_inventorize_empty", Checkbox( title = _("Inventorize empty windows dhcp pools"), help = _("You can activate the inventorization of " "dhcp pools, which have no ip addresses in it"), ), need_restart = True ) group = _("Check configuration") register_configvar(deprecated, "if_inventory_monitor_state", Checkbox(title = _("Monitor port state of network interfaces"), label = _("monitor port state"), help = _("When this option is active then during inventory of networking interfaces " "(and switch ports) the current operational state of the port will " "automatically be coded as a check parameter into the check. That way the check " "will get warning or critical when the state changes. This setting can later " "by overridden on a per-host and per-port base by defining special check " "parameters via a rule."))) register_configvar(deprecated, "if_inventory_monitor_speed", Checkbox(title = _("Monitor port speed of network interfaces"), label = _("monitor port speed"), help = _("When this option is active then during inventory of networking interfaces " "(and switch ports) the current speed setting of the port will " "automatically be coded as a check parameter into the check. That way the check " "will get warning or critical when speed later changes (for example from " "100 Mbit/s to 10 Mbit/s). This setting can later " "by overridden on a per-host and per-port base by defining special check " "parameters via a rule."))) register_configvar(group, "logwatch_service_output", DropdownChoice( title = _("Service output for logwatch"), help = _("You can change the plugin output of logwatch " "to show only the count of messages or also " "to show the last worst message"), choices = [ ( 'default' , _("Show count and last message") ), ( 'count', _("Show only count")), ], default_value = 'default', ), need_restart = True ) register_configvar(group, "printer_supply_some_remaining_status", DropdownChoice( title = _("Printer supply some remaining status"), help = _("Set the reported nagios state when the fill state " "is something between empty and small " "remaining capacity"), choices = [ ( 0, _("OK") ), ( 1, _("Warning")), ( 2, _("Critical")), ], default_value = 2, ), ) register_configvar(deprecated, "printer_supply_default_levels", Tuple( title = _("Printer supply default levels"), help = _("Set global default levels for warning and critical. "), elements = [ Integer( title = _("Warning at"), minvalue = 1, ), Integer( title = _("Critical at"), minvalue = 1, ), ], )) #. # .--Rulesets------------------------------------------------------------. # | ____ _ _ | # | | _ \ _ _| | ___ ___ ___| |_ ___ | # | | |_) | | | | |/ _ \/ __|/ _ \ __/ __| | # | | _ <| |_| | | __/\__ \ __/ |_\__ \ | # | |_| \_\\__,_|_|\___||___/\___|\__|___/ | # | | # +----------------------------------------------------------------------+ # | Rulesets for hosts and services except check parameter rules. | # '----------------------------------------------------------------------' register_rulegroup("grouping", _("Grouping"), _("Assignment of host & services to host, service and contacts groups. ")) group = "grouping" register_rule(group, "host_groups", GroupSelection( "host", title = _("Assignment of hosts to host groups"), help = _("Hosts can be grouped together into host groups. The most common use case " "is to put hosts which belong together in a host group to make it possible " "to get them listed together in the status GUI.")), match = "all") register_rule(group, "service_groups", GroupSelection( "service", title = _("Assignment of services to service groups")), match = "all", itemtype = "service") register_rule(group, "host_contactgroups", GroupSelection( "contact", title = _("Assignment of hosts to contact groups")), match = "all") register_rule(group, "service_contactgroups", GroupSelection( "contact", title = _("Assignment of services to contact groups")), match = "all", itemtype = "service") register_rulegroup("monconf", _("Monitoring Configuration"), _("Intervals for checking, retries, clustering, configuration for inventory and similar")) group = "monconf/" + _("Service Checks") register_rule(group, "extra_service_conf:max_check_attempts", Integer(title = _("Maximum number of check attempts for service"), help = _("The maximum number of failed checks until a service problem state will " "be considered as <u>hard</u>. Only hard state trigger notifications. "), minvalue = 1), itemtype = "service") register_rule(group, "extra_service_conf:check_interval", Transform( Age(minvalue=1, default_value=60), forth = lambda v: int(v * 60), back = lambda v: float(v) / 60.0, title = _("Normal check interval for service checks"), help = _("Check_MK usually uses an interval of one minute for the active Check_MK " "check and for legacy checks. Here you can specify a larger interval. Please " "note, that this setting only applies to active checks (those with the " "%s reschedule button). If you want to change the check interval of " "the Check_MK service only, specify <tt><b>Check_MK$</b></tt> in the list " "of services.") % '<img class="icon docu" src="images/icon_reload.gif">'), itemtype = "service") register_rule(group, "extra_service_conf:retry_interval", Transform( Age(minvalue=1, default_value=60), forth = lambda v: int(v * 60), back = lambda v: float(v) / 60.0, title = _("Retry check interval for service checks"), help = _("This setting is relevant if you have set the maximum number of check " "attempts to a number greater than one. In case a service check is not OK " "and the maximum number of check attempts is not yet reached, it will be " "rescheduled with this interval. The retry interval is usually set to a smaller " "value than the normal interval.<br><br>This setting only applies to " "active checks.")), itemtype = "service") register_rule(group, "extra_service_conf:check_period", TimeperiodSelection( title = _("Check period for active services"), help = _("If you specify a notification period for a service then active checks " "of that service will only be done in that period. Please note, that the " "checks driven by Check_MK are passive checks and are not affected by this " "rule. You can use the rule for the active Check_MK check, however.")), itemtype = "service") register_rule(group, "check_periods", TimeperiodSelection( title = _("Check period for passive Check_MK services"), help = _("If you specify a notification period for a Check_MK service then " "results will be processed only within this period.")), itemtype = "service") register_rule(group, "extra_service_conf:process_perf_data", DropdownChoice( title = _("Enable/disable processing of perfdata for services"), help = _("This setting allows you to disable the processing of perfdata for a " "service completely."), choices = [ ("1", _("Enable processing of perfdata")), ("0", _("Disable processing of perfdata")) ], ), itemtype = "service") register_rule(group, "extra_service_conf:passive_checks_enabled", DropdownChoice( title = _("Enable/disable passive checks for services"), help = _("This setting allows you to disable the processing of passiv check results for a " "service."), choices = [ ("1", _("Enable processing of passive check results")), ("0", _("Disable processing of passive check results")) ], ), itemtype = "service") register_rule(group, "extra_service_conf:active_checks_enabled", DropdownChoice( title = _("Enable/disable active checks for services"), help = _("This setting allows you to disable or enable " "active checks for a service."), choices = [ ("1", _("Enable active checks")), ("0", _("Disable active checks")) ], ), itemtype = "service") group = "monconf/" + _("Host Checks") register_rule(group, "extra_host_conf:max_check_attempts", Integer(title = _("Maximum number of check attempts for host"), help = _("The maximum number of failed host checks until the host will be considered " "in a hard down state"), minvalue = 1)) register_rule(group, "extra_host_conf:check_interval", Transform( Age(minvalue=1, default_value=60), forth = lambda v: int(v * 60), back = lambda v: float(v) / 60.0, title = _("Normal check interval for host checks"), help = _("The default interval is set to one minute. Here you can specify a larger " "interval. The host is contacted in this interval on a regular base. The host " "check is also being executed when a problematic service state is detected to check " "wether or not the service problem is resulting from a host problem.") ) ) register_rule(group, "extra_host_conf:retry_interval", Transform( Age(minvalue=1, default_value=60), forth = lambda v: int(v * 60), back = lambda v: float(v) / 60.0, title = _("Retry check interval for host checks"), help = _("This setting is relevant if you have set the maximum number of check " "attempts to a number greater than one. In case a host check is not UP " "and the maximum number of check attempts is not yet reached, it will be " "rescheduled with this interval. The retry interval is usually set to a smaller " "value than the normal interval."), ) ) register_rule(group, "extra_host_conf:check_period", TimeperiodSelection( title = _("Check period for hosts"), help = _("If you specify a check period for a host then active checks of that " "host will only take place within that period. In the rest of the time " "the state of the host will stay at its last status.")), ) register_rule( group, "host_check_commands", CascadingDropdown( title = _("Host Check Command"), help = _("Usually Check_MK uses a series of PING (ICMP echo request) in order to determine " "whether a host is up. In some cases this is not possible, however. With this rule " "you can specify an alternative way of determining the host's state."), choices = [ ( "ping", _("PING (active check with ICMP echo request)") ), ( "smart", _("Smart PING (only with Check_MK Micro Core)") ), ( "tcp" , _("TCP Connect"), Integer(label = _("to port:"), minvalue=1, maxvalue=65535, default_value=80 )), ( "ok", _("Always assume host to be up") ), ( "agent", _("Use the status of the Check_MK Agent") ), ( "service", _("Use the status of the service..."), TextUnicode( label = ":", size = 45, allow_empty = False, attrencode = True, )), ( "custom", _("Use a custom check plugin..."), PluginCommandLine() ), ], default_value = "ping", orientation = "horizontal", ), match = 'first' ) register_rule(group, "extra_host_conf:check_command", TextAscii( title = _("Internal Command for Hosts Check"), label = _("Command:"), help = _("This ruleset is deprecated and will be removed soon: " "it changes the default check_command for a host check. You need to " "define that command manually in your monitoring configuration."), attrencode = True, ), ) group = "monconf/" + _("Notifications") register_rule(group, "extra_host_conf:notifications_enabled", DropdownChoice( title = _("Enable/disable notifications for hosts"), help = _("This setting allows you to disable notifications about problems of a " "host completely. Per default all notifications are enabled. Sometimes " "it is more convenient to just disable notifications then to remove a " "host completely from the monitoring. Note: this setting has no effect " "on the notifications of service problems of a host."), choices = [ ("1", _("Enable host notifications")), ("0", _("Disable host notifications")) ], )) register_rule(group, "extra_service_conf:notifications_enabled", DropdownChoice( title = _("Enable/disable notifications for services"), help = _("This setting allows you to disable notifications about problems of a " "service completely. Per default all notifications are enabled."), choices = [ ("1", _("Enable service notifications")), ("0", _("Disable service notifications")) ], ), itemtype = "service" ) register_rule(group, "extra_host_conf:notification_options", Transform( ListChoice( choices = [ ( "d", _("Host goes down")), ( "u", _("Host gets unreachble")), ( "r", _("Host goes up again")), ( "f", _("Start or end of flapping state")), ( "s", _("Start or end of a scheduled downtime")), ], default_value = [ "d", "u", "r", "f", "s" ], ), title = _("Notified events for hosts"), help = _("This ruleset allows you to restrict notifications of host problems to certain " "states, e.g. only notify on DOWN, but not on UNREACHABLE. Please select the types " "of events that should initiate notifications. Please note that several other " "filters must also be passed in order for notifications to finally being sent out."), forth = lambda x: x != 'n' and x.split(",") or [], back = lambda x: ",".join(x) or "n", ), ) register_rule(group, "extra_service_conf:notification_options", Transform( ListChoice( choices = [ ("w", _("Service goes into warning state")), ("u", _("Service goes into unknown state")), ("c", _("Service goes into critical state")), ("r", _("Service recovers to OK")), ("f", _("Start or end of flapping state")), ("s", _("Start or end of a scheduled downtime")), ], default_value = [ "w", "u", "c", "r", "f", "s" ], ), title = _("Notified events for services"), help = _("This ruleset allows you to restrict notifications of service problems to certain " "states, e.g. only notify on CRIT, but not on WARN. Please select the types " "of events that should initiate notifications. Please note that several other " "filters must also be passed in order for notifications to finally being sent out."), forth = lambda x: x != 'n' and x.split(",") or [], back = lambda x: ",".join(x) or "n", ), itemtype = "service" ) register_rule(group, "extra_host_conf:notification_period", TimeperiodSelection( title = _("Notification period for hosts"), help = _("If you specify a notification period for a host then notifications " "about problems of that host (not of its services!) will only be sent " "if those problems occur within the notification period. Also you can " "filter out problems in the problems views for objects not being in " "their notification period (you can think of the notification period " "as the 'service time'.")), ) register_rule(group, "extra_service_conf:notification_period", TimeperiodSelection( title = _("Notification period for services"), help = _("If you specify a notification period for a service then notifications " "about that service will only be sent " "if those problems occur within the notification period. Also you can " "filter out problems in the problems views for objects not being in " "their notification period (you can think of the notification period " "as the 'service time'.")), itemtype = "service") register_rule(group, "extra_host_conf:first_notification_delay", Integer( minvalue = 0, default_value = 60, label = _("Delay:"), unit = _("minutes"), title = _("Delay host notifications"), help = _("This setting delays notifications about host problems by the " "specified amount of time. If the host is up again within that " "time, no notification will be sent out."), ), factory_default = 0, ) register_rule(group, "extra_service_conf:first_notification_delay", Integer( minvalue = 0, default_value = 60, label = _("Delay:"), unit = _("minutes"), title = _("Delay service notifications"), help = _("This setting delays notifications about service problems by the " "specified amount of time. If the service is OK again within that " "time, no notification will be sent out."), ), factory_default = 0, itemtype = "service") register_rule(group, "extra_host_conf:notification_interval", Optional( Integer( minvalue = 1, default_value = 120, label = _("Interval:"), unit = _("minutes")), title = _("Periodic notifications during host problems"), help = _("If you enable periodic notifications, then during a problem state " "of the host notifications will be sent out in regular intervals " "until the problem is acknowledged."), label = _("Enable periodic notifications"), none_label = _("disabled"), none_value = 0, ) ) register_rule(group, "extra_service_conf:notification_interval", Optional( Integer( minvalue = 1, default_value = 120, label = _("Interval:"), unit = _("minutes")), title = _("Periodic notifications during service problems"), help = _("If you enable periodic notifications, then during a problem state " "of the service notifications will be sent out in regular intervals " "until the problem is acknowledged."), label = _("Enable periodic notifications"), none_label = _("disabled"), none_value = 0, ), itemtype = "service") register_rule(group, "extra_host_conf:flap_detection_enabled", DropdownChoice( title = _("Enable/disable flapping detection for hosts"), help = _("This setting allows you to disable the flapping detection for a " "host completely."), choices = [ ("1", _("Enable flap detection")), ("0", _("Disable flap detection")) ], )) register_rule(group, "extra_service_conf:flap_detection_enabled", DropdownChoice( title = _("Enable/disable flapping detection for services"), help = _("This setting allows you to disable the flapping detection for a " "service completely."), choices = [ ("1", _("Enable flap detection")), ("0", _("Disable flap detection")) ], ), itemtype = "service") register_rule(group, "extra_service_conf:notes_url", TextAscii( label = _("URL:"), title = _("Notes URL for Services"), help = _("With this setting you can set links to documentations " "for each service"), attrencode = True, ), itemtype = "service") register_rule(group, "extra_host_conf:notes_url", TextAscii( label = _("URL:"), title = _("Notes URL for Hosts"), help = _("With this setting you can set links to documentations " "for Hosts"), attrencode = True, ), ) register_rule(group, "extra_service_conf:display_name", TextUnicode( title = _("Alternative display name for Services"), help = _("This rule set allows you to specify an alternative name " "to be displayed for certain services. This name is available as " "a column when creating new views or modifying existing ones. " "It is always visible in the details view of a service. In the " "availability reporting there is an option for using that name " "instead of the normal service description. It does <b>not</b> automatically " "replace the normal service name in all views.<br><br><b>Note</b>: The " "purpose of this rule set is to define unique names for several well-known " "services. It cannot rename services in general."), size = 64, attrencode = True, ), itemtype = "service") group = "monconf/" + _("Inventory and Check_MK settings") register_rule(group, "only_hosts", title = _("Hosts to be monitored"), help = _("By adding rules to this ruleset you can define a subset of your hosts " "to be actually monitored. As long as the rule set is empty " "all configured hosts will be monitored. As soon as you add at least one " "rule, only hosts with a matching rule will be monitored."), optional = True, # only_hosts is None per default ) register_rule(group, "ignored_services", title = _("Disabled services"), help = _("Services that are declared as <u>disabled</u> by this rule set will not be added " "to a host during inventory (automatic service detection). Services that already " "exist will continued to be monitored but be marked as obsolete in the service " "list of a host."), itemtype = "service") register_rule(group, "ignored_checks", CheckTypeSelection( title = _("Disabled checks"), help = _("This ruleset is similar to 'Disabled services', but selects checks to be disabled " "by their <b>type</b>. This allows you to disable certain technical implementations " "such as filesystem checks via SNMP on hosts that also have the Check_MK agent " "installed."), )) register_rule(group, "clustered_services", title = _("Clustered services"), help = _("When you define HA clusters in WATO then you also have to specify " "which services of a node should be assigned to the cluster and " "which services to the physical node. This is done by this ruleset. " "Please note that the rule will be applied to the <i>nodes</i>, not " "to the cluster.<br><br>Please make sure that you re-inventorize the " "cluster and the physical nodes after changing this ruleset."), itemtype = "service") group = "monconf/" + _("Various") register_rule(group, "clustered_services_mapping", TextAscii( title = _("Clustered services for overlapping clusters"), label = _("Assign services to the following cluster:"), help = _("It's possible to have clusters that share nodes. You could say that " "such clusters &quot;overlap&quot;. In such a case using the ruleset " "<i>Clustered services</i> is not sufficient since it would not be clear " "to which of the several possible clusters a service found on such a shared " "node should be assigned to. With this ruleset you can assign services and " "explicitely specify which cluster assign them to."), ), itemtype = "service", ) register_rule(group, "extra_host_conf:service_period", TimeperiodSelection( title = _("Service period for hosts"), help = _("When it comes to availability reporting, you might want the report " "to cover only certain time periods, e.g. only Monday to Friday " "from 8:00 to 17:00. You can do this by specifying a service period " "for hosts or services. In the reporting you can then decide to " "include, exclude or ignore such periods und thus e.g. create a report " "of the availability just within or without these times. <b>Note</b>: Changes in the " "actual <i>definition</i> of a time period will only be reflected in " "times <i>after</i> that change. Selecting a different service period " "will also be reflected in the past.")), ) register_rule(group, "extra_service_conf:service_period", TimeperiodSelection( title = _("Service period for services"), help = _("When it comes to availability reporting, you might want the report " "to cover only certain time periods, e.g. only Monday to Friday " "from 8:00 to 17:00. You can do this by specifying a service period " "for hosts or services. In the reporting you can then decide to " "include, exclude or ignore such periods und thus e.g. create a report " "of the availability just within or without these times. <b>Note</b>: Changes in the " "actual <i>definition</i> of a time period will only be reflected in " "times <i>after</i> that change. Selecting a different service period " "will also be reflected in the past.")), itemtype = "service") class MonitoringIcon(ValueSpec): def __init__(self, **kwargs): ValueSpec.__init__(self, **kwargs) def available_icons(self): if defaults.omd_root: dirs = [ defaults.omd_root + "/local/share/check_mk/web/htdocs/images/icons", defaults.omd_root + "/share/check_mk/web/htdocs/images/icons" ] else: dirs = [ defaults.web_dir + "/htdocs/images/icons" ] icons = [] for dir in dirs: if os.path.exists(dir): icons += [ i for i in os.listdir(dir) if '.' in i and os.path.isfile(dir + "/" + i) ] icons.sort() return icons def render_input(self, varprefix, value): if value is None: value = "" num_columns = 12 html.write("<table>") for nr, filename in enumerate([""] + self.available_icons()): if nr % num_columns == 0: html.write("<tr>") html.write("<td>") html.radiobutton(varprefix, str(nr), value == filename, self.value_to_text(filename)) html.write("&nbsp; </td>") if nr % num_columns == num_columns - 1: html.write("</tr>") if nr != num_columns - 1: html.write("</tr>") html.write("</table>") def value_to_text(self, value): if value: return '<img align=middle title="%s" class=icon src="images/icons/%s">' % \ (value, value) else: return _("none") def from_html_vars(self, varprefix): nr = int(html.var(varprefix)) if nr == 0: return None else: return self.available_icons()[nr-1] def validate_datatype(self, value, varprefix): if value is not None and type(value) != str: raise MKUserError(varprefix, _("The type is %s, but should be str") % type(value)) def validate_value(self, value, varprefix): if value and value not in self.available_icons(): raise MKUserError(varprefix, _("The selected icon image does not exist.")) ValueSpec.custom_validate(self, value, varprefix) register_rule(group, "extra_host_conf:icon_image", MonitoringIcon( title = _("Icon image for hosts in status GUI"), help = _("You can assign icons to hosts for the status GUI. " "Put your images into <tt>%s</tt>. ") % ( defaults.omd_root and defaults.omd_root + "/local/share/check_mk/web/htdocs/images/icons" or defaults.web_dir + "/htdocs/images/icons" ), )) register_rule(group, "extra_service_conf:icon_image", MonitoringIcon( title = _("Icon image for services in status GUI"), help = _("You can assign icons to services for the status GUI. " "Put your images into <tt>%s</tt>. ") % ( defaults.omd_root and defaults.omd_root + "/local/share/check_mk/web/htdocs/images/icons" or defaults.web_dir + "/htdocs/images/icons" ), ), itemtype = "service") register_rulegroup("agent", _("Access to Agents"), _("Settings concerning the connection to the Check_MK and SNMP agents")) group = "agent/" + _("General Settings") register_rule(group, "dyndns_hosts", title = _("Hosts with dynamic DNS lookup during monitoring"), help = _("This ruleset selects host for dynamic DNS lookup during monitoring. Normally " "the IP addresses of hosts are statically configured or looked up when you " "activate the changes. In some rare cases DNS lookups must be done each time " "a host is connected to, e.g. when the IP address of the host is dynamic " "and can change.")) group = "agent/" + _("SNMP") _snmpv3_auth_elements = [ DropdownChoice( choices = [ ( "md5", _("MD5") ), ( "sha", _("SHA1") ), ], title = _("Authentication protocol") ), TextAscii( title = _("Security name"), attrencode = True ), Password( title = _("Authentication password"), minlen = 8, ) ] register_rule(group, "snmp_communities", Alternative( elements = [ TextAscii( title = _("SNMP community (SNMP Versions 1 and 2c)"), allow_empty = False, attrencode = True, ), Tuple( title = _("Credentials for SNMPv3 without authentication and privacy (noAuthNoPriv)"), elements = [ FixedValue("noAuthNoPriv", title = _("Security Level"), totext = _("No authentication, no privacy"), ), ] ), Tuple( title = _("Credentials for SNMPv3 with authentication but without privacy (authNoPriv)"), elements = [ FixedValue("authNoPriv", title = _("Security Level"), totext = _("authentication but no privacy"), ), ] + _snmpv3_auth_elements ), Tuple( title = _("Credentials for SNMPv3 with authentication and privacy (authPriv)"), elements = [ FixedValue("authPriv", title = _("Security Level"), totext = _("authentication and encryption"), ), ] + _snmpv3_auth_elements + [ DropdownChoice( choices = [ ( "DES", _("DES") ), ( "AES", _("AES") ), ], title = _("Privacy protocol") ), Password( title = _("Privacy pass phrase"), minlen = 8, ), ] ), ], match = lambda x: type(x) == tuple and ( \ len(x) == 1 and 1 or \ len(x) == 4 and 2 or 3) or 0, style = "dropdown", default_value = "public", title = _("SNMP credentials of monitored hosts"), help = _("By default Check_MK uses the community \"public\" to contact hosts via SNMP v1/v2. This rule " "can be used to customize the the credentials to be used when contacting hosts via SNMP."))) register_rule(group, "snmp_character_encodings", DropdownChoice( title = _("Output text coding settings for SNMP devices"), help = _("Some devices send texts in non-ASCII characters. Check_MK" " always assumes UTF-8 encoding. You can declare other " " other encodings here"), choices = [ ("utf-8", _("UTF-8") ), ("latin1" ,_("latin1")), ] )), register_rule(group, "bulkwalk_hosts", title = _("Hosts using SNMP bulk walk (enforces SNMP v2c)"), help = _("Most SNMP hosts support SNMP version 2c. However, Check_MK defaults to version 1, " "in order to support as many devices as possible. Please use this ruleset in order " "to configure SNMP v2c for as many hosts as possible. That version has two advantages: " "it supports 64 bit counters, which avoids problems with wrapping counters at too " "much traffic. And it supports bulk walk, which saves much CPU and network resources. " "Please be aware, however, that there are some broken devices out there, that support " "bulk walk but behave very bad when it is used. When you want to enable v2c while not using " "bulk walk, please use the rule set snmpv2c_hosts instead.")) register_rule(group, "snmp_without_sys_descr", title = _("Hosts without system description OID"), help = _("Devices which do not publish the system description OID " ".1.3.6.1.2.1.1.1.0 are normally ignored by the SNMP inventory. " "Use this ruleset to select hosts which should nevertheless " "be checked.")) register_rule(group, "snmpv2c_hosts", title = _("Legacy SNMP devices using SNMP v2c"), help = _("There exist a few devices out there that behave very badly when using SNMP v2c and bulk walk. " "If you want to use SNMP v2c on those devices, nevertheless, you need to configure this device as " "legacy snmp device and upgrade it to SNMP v2c (without bulk walk) with this rule set. One reason is enabling 64 bit counters. " "Note: This rule won't apply if the device is already configured as SNMP v2c device.")) register_rule(group, "snmp_timing", Dictionary( title = _("Timing settings for SNMP access"), help = _("This rule decides about the number of retries and timeout values " "for the SNMP access to devices."), elements = [ ( "timeout", Float( title = _("Timeout between retries"), help = _("After a request is sent to the remote SNMP agent we will wait up to this " "number of seconds until assuming the answer get lost and retrying."), default_value = 1, minvalue = 0.1, maxvalue = 60, allow_int = True, unit = _("sec"), size = 6, ), ), ( "retries", Integer( title = _("Number of retries"), default_value = 5, minvalue = 0, maxvalue = 50, ) ), ] ), factory_default = { "timeout" : 1, "retries" : 5 }, match = "dict") register_rule(group, "usewalk_hosts", title = _("Simulating SNMP by using a stored SNMP walk"), help = _("This ruleset helps in test and development. You can create stored SNMP " "walks on the command line with cmk --snmpwalk HOSTNAME. A host that " "is configured with this ruleset will then use the information from that " "file instead of using real SNMP. ")) group = "agent/" + _("Check_MK Agent") register_rule(group, "agent_ports", Integer( minvalue = 1, maxvalue = 65535, default_value = 6556), title = _("TCP port for connection to Check_MK agent"), help = _("This variable allows to specify the TCP port to " "be used to connect to the agent on a per-host-basis. "), ) register_rule(group, "check_mk_exit_status", Dictionary( elements = [ ( "connection", MonitoringState( default_value = 2, title = _("State in case of connection problems")), ), ( "timeout", MonitoringState( default_value = 2, title = _("State in case of a overall timeout")), ), ( "missing_sections", MonitoringState( default_value = 1, title = _("State if just <i>some</i> agent sections are missing")), ), ( "empty_output", MonitoringState( default_value = 2, title = _("State in case of empty agent output")), ), ( "wrong_version", MonitoringState( default_value = 1, title = _("State in case of wrong agent version")), ), ( "exception", MonitoringState( default_value = 3, title = _("State in case of unhandled exception")), ), ], ), factory_default = { "connection" : 2, "missing_sections" : 1, "empty_output" : 2, "wrong_version" : 1, "exception": 3 }, title = _("Status of the Check_MK service"), help = _("This ruleset specifies the total status of the Check_MK service in " "case of various error situations. One use case is the monitoring " "of hosts that are not always up. You can have Check_MK an OK status " "here if the host is not reachable. Note: the <i>Timeout</i> setting only works " "when using the Check_MK Micro Core."), match = "dict", ) register_rule(group, "check_mk_agent_target_versions", Transform( CascadingDropdown( title = _("Check for correct version of Check_MK agent"), help = _("Here you can make sure that all of your Check_MK agents are running" " one specific version. Agents running " " a different version return a non-OK state."), choices = [ ("ignore", _("Ignore the version")), ("site", _("Same version as the monitoring site")), ("specific", _("Specific version"), TextAscii( allow_empty = False, ) ), ("at_least", _("At least"), Dictionary( elements = [ ('release', TextAscii( title = _('Official Release version'), allow_empty = False, )), ('daily_build', TextAscii( title = _('Daily build'), allow_empty = False, )), ] ), ), ], default_value = "ignore", ), # In the past, this was a OptionalDropdownChoice() which values could be strings: # ignore, site or a custom string representing a version number. forth = lambda x: type(x) == str and x not in [ "ignore", "site" ] and ("specific", x) or x ) ) register_rule(group, "piggyback_translation", HostnameTranslation( title = _("Hostname translation for piggybacked hosts"), help = _("Some agents or agent plugins send data not only for the queried host but also " "for other hosts &quot;piggyback&quot; with their own data. This is the case " "for the vSphere special agent and the SAP R/3 plugin, for example. The hostnames " "that these agents send must match your hostnames in your monitoring configuration. " "If that is not the case, then with this rule you can define a hostname translation. " "Note: This rule must be configured for the &quot;pig&quot; - i.e. the host that the " "agent is running on. It is not applied to the translated piggybacked hosts."), ), match = "dict") def get_snmp_checktypes(): checks = check_mk_local_automation("get-check-information") types = [ (cn, (c['title'] != cn and '%s: ' % cn or '') + c['title']) for (cn, c) in checks.items() if c['snmp'] ] types.sort() return [ (None, _('All SNMP Checks')) ] + types register_rule(group, "snmp_check_interval", Tuple( title = _('Check intervals for SNMP checks'), help = _('This rule can be used to customize the check interval of each SNMP based check. ' 'With this option it is possible to configure a longer check interval for specific ' 'checks, than then normal check interval.'), elements = [ DropdownChoice( title = _("Checktype"), choices = get_snmp_checktypes, ), Integer( title = _("Do check every"), unit = _("minutes"), min_value = 1, default_value = 1, ), ] ) ) register_rule(group, "snmpv3_contexts", Tuple( title = _('SNMPv3 contexts to use in requests'), help = _('By default Check_MK does not use a specific context during SNMPv3 queries, ' 'but some devices are offering their information in different SNMPv3 contexts. ' 'This rule can be used to configure, based on hosts and check type, which SNMPv3 ' 'contexts Check_MK should ask for when getting information via SNMPv3.'), elements = [ DropdownChoice( title = _("Checktype"), choices = get_snmp_checktypes, ), ListOfStrings( title = _("SNMP Context IDs"), allow_empty = False, ), ] ) )
maohongyuan/kbengine
refs/heads/master
kbe/src/lib/python/Doc/tools/sphinxext/c_annotations.py
31
# -*- coding: utf-8 -*- """ c_annotations.py ~~~~~~~~~~~~~~~~ Supports annotations for C API elements: * reference count annotations for C API functions. Based on refcount.py and anno-api.py in the old Python documentation tools. * stable API annotations Usage: Set the `refcount_file` config value to the path to the reference count data file. :copyright: Copyright 2007-2013 by Georg Brandl. :license: Python license. """ from os import path from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes from sphinx.domains.c import CObject class RCEntry: def __init__(self, name): self.name = name self.args = [] self.result_type = '' self.result_refs = None class Annotations(dict): @classmethod def fromfile(cls, filename): d = cls() fp = open(filename, 'r') try: for line in fp: line = line.strip() if line[:1] in ("", "#"): # blank lines and comments continue parts = line.split(":", 4) if len(parts) != 5: raise ValueError("Wrong field count in %r" % line) function, type, arg, refcount, comment = parts # Get the entry, creating it if needed: try: entry = d[function] except KeyError: entry = d[function] = RCEntry(function) if not refcount or refcount == "null": refcount = None else: refcount = int(refcount) # Update the entry with the new parameter or the result # information. if arg: entry.args.append((arg, type, refcount)) else: entry.result_type = type entry.result_refs = refcount finally: fp.close() return d def add_annotations(self, app, doctree): for node in doctree.traverse(addnodes.desc_content): par = node.parent if par['domain'] != 'c': continue if par['stableabi']: node.insert(0, nodes.emphasis(' Part of the stable ABI.', ' Part of the stable ABI.', classes=['stableabi'])) if par['objtype'] != 'function': continue if not par[0].has_key('names') or not par[0]['names']: continue name = par[0]['names'][0] if name.startswith("c."): name = name[2:] entry = self.get(name) if not entry: continue elif entry.result_type not in ("PyObject*", "PyVarObject*"): continue if entry.result_refs is None: rc = 'Return value: Always NULL.' elif entry.result_refs: rc = 'Return value: New reference.' else: rc = 'Return value: Borrowed reference.' node.insert(0, nodes.emphasis(rc, rc, classes=['refcount'])) def init_annotations(app): refcounts = Annotations.fromfile( path.join(app.srcdir, app.config.refcount_file)) app.connect('doctree-read', refcounts.add_annotations) def setup(app): app.add_config_value('refcount_file', '', True) app.connect('builder-inited', init_annotations) # monkey-patch C object... CObject.option_spec = { 'noindex': directives.flag, 'stableabi': directives.flag, } old_handle_signature = CObject.handle_signature def new_handle_signature(self, sig, signode): signode.parent['stableabi'] = 'stableabi' in self.options return old_handle_signature(self, sig, signode) CObject.handle_signature = new_handle_signature
nuagenetworks/vspk-python
refs/heads/master
vspk/v5_0/fetchers/nusharednetworkresources_fetcher.py
2
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from bambou import NURESTFetcher class NUSharedNetworkResourcesFetcher(NURESTFetcher): """ Represents a NUSharedNetworkResources fetcher Notes: This fetcher enables to fetch NUSharedNetworkResource objects. See: bambou.NURESTFetcher """ @classmethod def managed_class(cls): """ Return NUSharedNetworkResource class that is managed. Returns: .NUSharedNetworkResource: the managed class """ from .. import NUSharedNetworkResource return NUSharedNetworkResource
damdam-s/OpenUpgrade
refs/heads/8.0
addons/web_tests/tests/__init__.py
385
# -*- coding: utf-8 -*- import test_ui
gajden/se-guess-who
refs/heads/master
crawlers/wikidata_crawler.py
1
import json import os from dircache import listdir from os.path import join, basename import shutil import wget def read_json(path): with open(path, 'r') as f_in: data = json.load(f_in) return data class WikiDataCrawler(object): def __init__(self, mapping_path=None): self.url = 'https://www.wikidata.org/' self.api = 'w/api.php' self.query = { 'action': 'wbgetentities', 'languages': 'en', 'format': 'json' } self.data_dir = '../data' self.filtered_dir = '../data/filtered' self.tmp_dir = '../data/tmp' self.mapping_path = mapping_path if self.mapping_path is None: self.mapping_path = join(self.data_dir, 'ids_mapping.json') self.mapping = None def get_mapping_for_known_ids(self, ids_list): ids_mapping = {} for query_id in ids_list: print query_id if not self.__check_if_id_file_exists(query_id): query_data = self.query.copy() query_data['ids'] = query_id query_url = '%s%s?action=%s&ids=%s&languages=%s&format=%s' % (self.url, self.api, query_data['action'], query_data['ids'], query_data['languages'], query_data['format']) filename = wget.download(query_url, out=join(self.tmp_dir, '%s.json' % query_id)) else: filename = join(self.tmp_dir, '%s.json' % query_id) with open(filename, 'r') as f_in: query_data = json.load(f_in) try: ids_mapping[query_id] = query_data['entities'][query_id]['labels']['en']['value'] except Exception: pass self.mapping = ids_mapping with open(self.mapping_path, 'w') as f_out: json.dump(self.mapping, f_out, indent=4) def crawl_ids(self, ids_list): result_ids = [] for query_id in ids_list: print query_id if not self.__check_if_id_file_exists(query_id): query_data = self.query.copy() query_data['ids'] = query_id query_url = '%s%s?action=%s&ids=%s&languages=%s&format=%s' % (self.url, self.api, query_data['action'], query_data['ids'], query_data['languages'], query_data['format']) filename = wget.download(query_url, out=join(self.tmp_dir, '%s.json' % query_id)) else: filename = join(self.tmp_dir, '%s.json' % query_id) with open(filename, 'r') as f_in: data = json.load(f_in) try: if data['entities'][query_id]['claims']['P31'][0]['mainsnak']['datavalue']['value']['id'] == 'Q5': shutil.copy(filename, join(self.filtered_dir, basename(filename))) result_ids.append(query_id) else: os.remove(filename) except Exception: pass return result_ids def get_ids_for_mapping(self): known_ids = [] filenames = [join(self.filtered_dir, name) for name in listdir(self.filtered_dir)] for filename in filenames: data = read_json(filename) id = data['entities'].keys()[0] claims = data['entities'][id]['claims'] for claim, values in claims.iteritems(): known_ids.append(claim) for value in values: try: known_ids.append(value['mainsnak']['datavalue']['value']['id']) except Exception: pass return known_ids def parse_json(self, id): person_data = {} data = read_json(join(self.filtered_dir, '%s.json' % id)) person = data['entities'][id]['labels']['en']['value'] claims = data['entities'][id]['claims'] for claim, values in claims.iteritems(): claim_dict = [] for value in values: try: claim_dict.append(self.mapping[value['mainsnak']['datavalue']['value']['id']]) except Exception: pass person_data[self.mapping[claim]] = claim_dict with open(join(self.data_dir, 'person_data', '%s.json' % person), 'w') as f_out: json.dump(person_data, f_out, indent=4) return person, person_data def __check_if_id_file_exists(self, id): print os.path.exists(join(self.tmp_dir, '%s.json' % id)) return os.path.exists(join(self.tmp_dir, '%s.json' % id)) # https://www.wikidata.org/w/api.php?action=wbgetentities&ids=Q5284&languages=en # https://www.wikidata.org/w/api.php?action=wbgetentities&ids=Q5284&languages=en def generate_ids(min=10000, max=12000): ids = [] for i in xrange(min, max): ids.append('Q%d' % i) return ids if __name__ == '__main__': crawler_ids = generate_ids() crawler = WikiDataCrawler() crawled_ids = crawler.crawl_ids(crawler_ids) ids = crawler.get_ids_for_mapping() crawler.get_mapping_for_known_ids(ids) for id in crawled_ids: crawler.parse_json(id)
sidzan/netforce
refs/heads/master
netforce_account/netforce_account/tests/service_sale_multi_pay.py
4
from netforce.test import TestCase from netforce.model import get_model from datetime import * import time class Test(TestCase): _name="service.sale.multi.pay" _description="Service sale payment for 2 invoices" def test_run(self): # create invoice #1 vals={ "type": "out", "inv_type": "invoice", "partner_id": get_model("partner").search([["name","=","ABC Food"]])[0], "date": time.strftime("%Y-%m-%d"), "due_date": (datetime.now()+timedelta(days=30)).strftime("%Y-%m-%d"), "tax_type": "tax_ex", "lines": [("create",{ "description": "Training", "qty": 1, "unit_price": 1000, "account_id": get_model("account.account").search([["name","=","Service Income"]])[0], "tax_id": get_model("account.tax.rate").search([["name","=","Service Sales"]])[0], "amount": 1000, })], } inv1_id=get_model("account.invoice").create(vals,context={"type":"out","inv_type":"invoice"}) inv=get_model("account.invoice").browse(inv1_id) inv.post() self.assertEqual(inv.state,"waiting_payment") move=inv.move_id self.assertEqual(move.lines[0].account_id.name,"Account receivables - Trade") self.assertEqual(move.lines[0].debit,1070) self.assertEqual(move.lines[1].account_id.name,"Suspend Output Vat") self.assertEqual(move.lines[1].credit,70) self.assertEqual(move.lines[2].account_id.name,"Service Income") self.assertEqual(move.lines[2].credit,1000) # create invoice #2 vals={ "type": "out", "inv_type": "invoice", "partner_id": get_model("partner").search([["name","=","ABC Food"]])[0], "date": time.strftime("%Y-%m-%d"), "due_date": (datetime.now()+timedelta(days=30)).strftime("%Y-%m-%d"), "tax_type": "tax_ex", "lines": [("create",{ "description": "Training", "qty": 1, "unit_price": 2000, "account_id": get_model("account.account").search([["name","=","Service Income"]])[0], "tax_id": get_model("account.tax.rate").search([["name","=","Service Sales"]])[0], "amount": 2000, })], } inv2_id=get_model("account.invoice").create(vals,context={"type":"out","inv_type":"invoice"}) inv=get_model("account.invoice").browse(inv2_id) inv.post() self.assertEqual(inv.state,"waiting_payment") move=inv.move_id self.assertEqual(move.lines[0].account_id.name,"Account receivables - Trade") self.assertEqual(move.lines[0].debit,2140) self.assertEqual(move.lines[1].account_id.name,"Suspend Output Vat") self.assertEqual(move.lines[1].credit,140) self.assertEqual(move.lines[2].account_id.name,"Service Income") self.assertEqual(move.lines[2].credit,2000) # create payment for invoices #1 and #2 vals={ "partner_id": get_model("partner").search([["name","=","ABC Food"]])[0], "type": "in", "pay_type": "invoice", "date": time.strftime("%Y-%m-%d"), "account_id": get_model("account.account").search([["name","=","Saving Account -Kbank"]])[0], "lines": [ ("create",{ "invoice_id": inv1_id, "amount": 1070, }), ("create",{ "invoice_id": inv2_id, "amount": 2140, }), ], } pmt_id=get_model("account.payment").create(vals,context={"type":"in"}) pmt=get_model("account.payment").browse(pmt_id) pmt.post() inv1=get_model("account.invoice").browse(inv1_id) inv2=get_model("account.invoice").browse(inv2_id) self.assertEqual(pmt.state,"posted") self.assertEqual(inv1.state,"paid") self.assertEqual(inv2.state,"paid") move=pmt.move_id self.assertEqual(move.lines[0].account_id.name,"Saving Account -Kbank") self.assertEqual(move.lines[0].debit,3120) self.assertEqual(move.lines[1].account_id.name,"Account receivables - Trade") self.assertEqual(move.lines[1].credit,1070) self.assertEqual(move.lines[2].account_id.name,"Suspend Output Vat") self.assertEqual(move.lines[2].debit,70) self.assertEqual(move.lines[3].account_id.name,"Output Vat") self.assertEqual(move.lines[3].credit,70) self.assertEqual(move.lines[4].account_id.name,"Account receivables - Trade") self.assertEqual(move.lines[4].credit,2140) self.assertEqual(move.lines[5].account_id.name,"Suspend Output Vat") self.assertEqual(move.lines[5].debit,140) self.assertEqual(move.lines[6].account_id.name,"Output Vat") self.assertEqual(move.lines[6].credit,140) self.assertEqual(move.lines[7].account_id.name,"Withholding Income Tax") self.assertEqual(move.lines[7].debit,90) Test.register()
evernote/evernote-sdk-python3
refs/heads/master
lib/thrift/TSerialization.py
111
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from .protocol import TBinaryProtocol from .transport import TTransport def serialize(thrift_object, protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()): transport = TTransport.TMemoryBuffer() protocol = protocol_factory.getProtocol(transport) thrift_object.write(protocol) return transport.getvalue() def deserialize(base, buf, protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()): transport = TTransport.TMemoryBuffer(buf) protocol = protocol_factory.getProtocol(transport) base.read(protocol) return base
lizardsystem/lizard-esf
refs/heads/master
lizard_esf/tasks.py
1
#!/usr/bin/python # -*- coding: utf-8 -*- # pylint: disable=C0111 # Copyright (c) 2012 Nelen & Schuurmans. GPL licensed, see LICENSE.rst. import logging from celery.task import task from lizard_esf.import_dbf import DBFImporter from lizard_esf.models import DBFConfiguration from lizard_esf.export_dbf import DBFExporter from lizard_esf.models import DbfFile from lizard_task.task import task_logging def get_logger(handler, taskname, levelno): """Create logger to log messages into db.""" logger = logging.getLogger(taskname) logger.addHandler(handler) logger.setLevel(int(levelno)) return logger @task() @task_logging def import_dbf_all(taskname="", data_set=None, username=None, levelno=20): """ Import esf configurations from dbf. """ logger = logging.getLogger(taskname) dbffiles = DbfFile.objects.all() for dbffile in dbffiles: dbfimporter = DBFImporter(logger) dbfimporter.esftype = dbffile.name dbfimporter.data_set = data_set logger.info("Start import of '%s'." % dbffile.name) dbfimporter.run() logger.info("END IMPORT.") @task() @task_logging def import_dbf(taskname="", username=None, levelno=20, data_set=None, esftype=None): """ Import esf configurations from dbf. """ logger = logging.getLogger(taskname) dbfimporter = DBFImporter(logger) dbfimporter.esftype = esftype dbfimporter.data_set = data_set logger.info("Start import of '%s'." % esftype) dbfimporter.run() logger.info("END IMPORT.") @task() @task_logging def export_esf_to_dbf( data_set=None, levelno=20, username=None, taskname=""): """ Export esf configurations into dbf. """ logger = logging.getLogger(taskname) dbfexporter = DBFExporter(logger) dbf_configurations = DBFConfiguration.objects.filter(enabled=True) if data_set is not None: dbf_configurations = dbf_configurations.filter(data_set__name=data_set) logger.info("%s esf configurations to export." % len( dbf_configurations)) for dbf_configuration in dbf_configurations: owner = dbf_configuration.data_set save_to = dbf_configuration.save_to dbf_file = dbf_configuration.dbf_file filename = dbf_configuration.filename logger.info("Start export '%s' for '%s'." % (dbf_file, owner)) dbfexporter.export_esf_configurations( owner, save_to, dbf_file, filename) logger.info("End export '%s' for '%s'." % (dbf_file, owner)) logger.info("END EXPORT.") def run_export_esf_to_dbf(): """Run export_esf_to_dbf task for HHNK as test.""" kwargs = {"data_set": "HHNK"} export_esf_to_dbf.delay(**kwargs) def run_importdbf_task(): """Run import_dbf task as test.""" kwargs = {'esftype': 'esf1', 'taskname': 'esf_import_dbf_waternet', 'username': 'admin', 'levelno': 20, 'data_set': 'Waternet'} import_dbf.delay(**kwargs)
asedunov/intellij-community
refs/heads/master
python/testData/inspections/PyTypeCheckerInspection/HomogeneousTuplePlusHeterogeneousTupleWithTheSameElementsType.py
30
A = tuple(sorted([1, 4, 2])) B = A + (4, 6, 7, 8)
acbodine/koding
refs/heads/master
go/src/vendor/github.com/caglar10ur/lxc/src/python-lxc/setup.py
15
#!/usr/bin/env python3 # # python-lxc: Python bindings for LXC # # (C) Copyright Canonical Ltd. 2012 # # Authors: # Stéphane Graber <stgraber@ubuntu.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # USA import os import subprocess from setuptools import setup, Extension from setuptools.command.build_ext import build_ext as BuildExtCommand class LxcBuildExtCommand(BuildExtCommand): user_options = BuildExtCommand.user_options + [ ('no-pkg-config', None, "don't use pkg-config to detect include/library paths") ] def initialize_options(self): super(LxcBuildExtCommand, self).initialize_options() self.no_pkg_config = False def build_extensions(self): if not self.no_pkg_config: pkg_config_executable = os.environ.get('PKG_CONFIG_EXECUTABLE', 'pkg-config') def get_pkg_config_var(name): args = [pkg_config_executable, '--variable', name, 'lxc'] output = subprocess.check_output(args, universal_newlines=True) return output.rstrip('\n') try: includedir = get_pkg_config_var('includedir') libdir = get_pkg_config_var('libdir') self.compiler.add_include_dir(includedir) self.compiler.add_library_dir(libdir) except subprocess.CalledProcessError: pass super(LxcBuildExtCommand, self).build_extensions() setup(name='lxc', version='0.1', description='LXC', packages=['lxc'], package_dir={'lxc': 'lxc'}, ext_modules=[Extension('_lxc', sources=['lxc.c'], libraries=['lxc'])], cmdclass={'build_ext': LxcBuildExtCommand}, )
thanhacun/odoo
refs/heads/8.0
addons/account/wizard/account_report_general_journal.py
378
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class account_general_journal(osv.osv_memory): _inherit = "account.common.journal.report" _name = 'account.general.journal' _description = 'Account General Journal' _columns = { 'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True), } def _print_report(self, cr, uid, ids, data, context=None): data = self.pre_print_report(cr, uid, ids, data, context=context) return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
brianmay/karaage
refs/heads/master
karaage/migrations/0002_auto_20140924_1111.py
2
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('karaage', '0001_initial'), ] operations = [ migrations.AlterField( model_name='logentry', name='content_type', field=models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE), ), ]
rinfo/fst
refs/heads/master
wsgi.py
2
""" WSGI application settings for FST instance The default setup at Domstolsverket assumes that instances of FST run under the default virtualenv unless something else is explicitly specified. """ import os import sys import site # Change this to match your production server path! VIRTUALENV_PATH = '/opt/rinfo/fst/venv-default' PYTHON_SITE_PACKAGES = 'lib/python2.7/site-packages' # Specify the site-packages folder of your virtualenv ALLDIRS = [os.path.join(VIRTUALENV_PATH, PYTHON_SITE_PACKAGES)] # Redirect sys.stdout to sys.stderr for bad libraries like geopy that uses # print statements for optional import exceptions. sys.stdout = sys.stderr prev_sys_path = list(sys.path) # Add all third-party libraries from your virtualenv for directory in ALLDIRS: site.addsitedir(directory) # Reorder sys.path so new directories come first. new_sys_path = [] for item in list(sys.path): if item not in prev_sys_path: new_sys_path.append(item) sys.path.remove(item) sys.path[:0] = new_sys_path # Activate the virtualenv activate_this = os.path.join(VIRTUALENV_PATH, 'bin/activate_this.py') execfile(activate_this, dict(__file__=activate_this)) # Some more path trickery... from os.path import abspath, dirname, join sys.path.insert(0, abspath(join(dirname(__file__), "../../"))) # Get the default settings from django.conf import settings os.environ["DJANGO_SETTINGS_MODULE"] = "fst_web.settings" # Make sure both project and app are on sys.path path = os.path.dirname(__file__) subpath = path + os.sep + "fst_web" if subpath not in sys.path: sys.path.insert(0, subpath) if path not in sys.path: sys.path.insert(0, path) # Finally... run our Django app under WSGI! from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
ProteinDF/ProteinDF_bridge
refs/heads/master
proteindf_bridge/position_test.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2014 The ProteinDF development team. # see also AUTHORS and README if provided. # # This file is a part of the ProteinDF software package. # # The ProteinDF is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # The ProteinDF is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ProteinDF. If not, see <http://www.gnu.org/licenses/>. import unittest import proteindf_bridge class PositionTest(unittest.TestCase): def test_constructor(self): p = proteindf_bridge.Position([5.0, 3.0, -1.2]) self.assertTrue(math.fabs(p.x - 5.0) < 1.0E-5)
scalable-networks/ext
refs/heads/master
gnuradio-3.7.0.1/gr-qtgui/apps/qt_digital.py
58
#!/usr/bin/env python # # Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, digital from gnuradio import blocks from gnuradio import filter from gnuradio import channels from gnuradio import eng_notation import sys try: from gnuradio import qtgui from PyQt4 import QtGui, QtCore import sip except ImportError: print "Error: Program requires PyQt4 and gr-qtgui." sys.exit(1) try: import scipy except ImportError: print "Error: Program requires scipy (see: www.scipy.org)." sys.exit(1) try: from qt_digital_window import Ui_DigitalWindow except ImportError: print "Error: could not find qt_digital_window.py:" print "\t\"Please run: pyuic4 qt_digital_window.ui -o qt_digital_window.py\"" sys.exit(1) class dialog_box(QtGui.QMainWindow): def __init__(self, snkTx, snkRx, fg, parent=None): QtGui.QWidget.__init__(self, parent) self.gui = Ui_DigitalWindow() self.gui.setupUi(self) self.fg = fg self.set_sample_rate(self.fg.sample_rate()) self.set_snr(self.fg.snr()) self.set_frequency(self.fg.frequency_offset()) self.set_time_offset(self.fg.timing_offset()) self.set_gain_mu(self.fg.rx_gain_mu()) self.set_loop_bw(self.fg.loop_bw()) # Add the qtsnk widgets to the hlayout box self.gui.sinkLayout.addWidget(snkTx) self.gui.sinkLayout.addWidget(snkRx) # Connect up some signals self.connect(self.gui.pauseButton, QtCore.SIGNAL("clicked()"), self.pauseFg) self.connect(self.gui.sampleRateEdit, QtCore.SIGNAL("editingFinished()"), self.sampleRateEditText) self.connect(self.gui.snrEdit, QtCore.SIGNAL("editingFinished()"), self.snrEditText) self.connect(self.gui.freqEdit, QtCore.SIGNAL("editingFinished()"), self.freqEditText) self.connect(self.gui.timeEdit, QtCore.SIGNAL("editingFinished()"), self.timeEditText) self.connect(self.gui.gainMuEdit, QtCore.SIGNAL("editingFinished()"), self.gainMuEditText) self.connect(self.gui.alphaEdit, QtCore.SIGNAL("editingFinished()"), self.alphaEditText) def pauseFg(self): if(self.gui.pauseButton.text() == "Pause"): self.fg.stop() self.fg.wait() self.gui.pauseButton.setText("Unpause") else: self.fg.start() self.gui.pauseButton.setText("Pause") # Accessor functions for Gui to manipulate system parameters def set_sample_rate(self, sr): ssr = eng_notation.num_to_str(sr) self.gui.sampleRateEdit.setText(QtCore.QString("%1").arg(ssr)) def sampleRateEditText(self): try: rate = self.gui.sampleRateEdit.text().toAscii() srate = eng_notation.str_to_num(rate) self.fg.set_sample_rate(srate) except RuntimeError: pass # Accessor functions for Gui to manipulate channel model def set_snr(self, snr): self.gui.snrEdit.setText(QtCore.QString("%1").arg(snr)) def set_frequency(self, fo): self.gui.freqEdit.setText(QtCore.QString("%1").arg(fo)) def set_time_offset(self, to): self.gui.timeEdit.setText(QtCore.QString("%1").arg(to)) def snrEditText(self): try: snr = self.gui.snrEdit.text().toDouble()[0] self.fg.set_snr(snr) except RuntimeError: pass def freqEditText(self): try: freq = self.gui.freqEdit.text().toDouble()[0] self.fg.set_frequency_offset(freq) except RuntimeError: pass def timeEditText(self): try: to = self.gui.timeEdit.text().toDouble()[0] self.fg.set_timing_offset(to) except RuntimeError: pass # Accessor functions for Gui to manipulate receiver parameters def set_gain_mu(self, gain): self.gui.gainMuEdit.setText(QtCore.QString("%1").arg(gain)) def set_loop_bw(self, bw): self.gui.alphaEdit.setText(QtCore.QString("%1").arg(bw)) def alphaEditText(self): try: bw = self.gui.alphaEdit.text().toDouble()[0] self.fg.set_loop_bw(bw) except RuntimeError: pass def gainMuEditText(self): try: gain = self.gui.gainMuEdit.text().toDouble()[0] self.fg.set_rx_gain_mu(gain) except RuntimeError: pass class my_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) self.qapp = QtGui.QApplication(sys.argv) self._sample_rate = 2000e3 self.sps = 2 self.excess_bw = 0.35 self.gray_code = digital.mod_codes.GRAY_CODE fftsize = 2048 self.data = scipy.random.randint(0, 255, 1000) self.src = blocks.vector_source_b(self.data.tolist(), True) self.mod = digital.dqpsk_mod(self.gray_code, samples_per_symbol=self.sps, excess_bw=self.excess_bw, verbose=False, log=False) self.rrctaps = filter.firdes.root_raised_cosine(1, self.sps, 1, self.excess_bw, 21) self.rx_rrc = filter.fir_filter_ccf(1, self.rrctaps) # Set up the carrier & clock recovery parameters self.arity = 4 self.mu = 0.5 self.gain_mu = 0.05 self.omega = self.sps self.gain_omega = .25 * self.gain_mu * self.gain_mu self.omega_rel_lim = 0.05 self._loop_bw = 2*scipy.pi/100.0 self.fmin = -1000/self.sample_rate() self.fmax = 1000/self.sample_rate() self.receiver = digital.mpsk_receiver_cc(self.arity, 0, self._loop_bw, self.fmin, self.fmax, self.mu, self.gain_mu, self.omega, self.gain_omega, self.omega_rel_lim) self.snr_dB = 15 noise = self.get_noise_voltage(self.snr_dB) self.fo = 100/self.sample_rate() self.to = 1.0 self.channel = channels.channel_model(noise, self.fo, self.to) self.thr = blocks.throttle(gr.sizeof_char, self._sample_rate) self.snk_tx = qtgui.sink_c(fftsize, filter.firdes.WIN_BLACKMAN_hARRIS, 0, self._sample_rate*self.sps, "Tx", True, True, True, True) self.snk_rx = qtgui.sink_c(fftsize, filter.firdes.WIN_BLACKMAN_hARRIS, 0, self._sample_rate, "Rx", True, True, True, True) self.connect(self.src, self.thr, self.mod, self.channel, self.snk_tx) self.connect(self.channel, self.rx_rrc, self.receiver, self.snk_rx) pyTxQt = self.snk_tx.pyqwidget() pyTx = sip.wrapinstance(pyTxQt, QtGui.QWidget) pyRxQt = self.snk_rx.pyqwidget() pyRx = sip.wrapinstance(pyRxQt, QtGui.QWidget) self.main_box = dialog_box(pyTx, pyRx, self); self.main_box.show() def get_noise_voltage(self, SNR): S = 0 # dBm, assuming signal power normalized N = S - SNR # dBm npwr = pow(10.0, N/10.0) # ratio nv = scipy.sqrt(npwr * self.sps) # convert the noise voltage return nv # System Parameters def sample_rate(self): return self._sample_rate def set_sample_rate(self, sr): self._sample_rate = sr # Channel Model Parameters def snr(self): return self.snr_dB def set_snr(self, snr): self.snr_dB = snr noise = self.get_noise_voltage(self.snr_dB) self.channel.set_noise_voltage(noise) def frequency_offset(self): return self.fo * self.sample_rate() def set_frequency_offset(self, fo): self.fo = fo / self.sample_rate() self.channel.set_frequency_offset(self.fo) def timing_offset(self): return self.to def set_timing_offset(self, to): self.to = to self.channel.set_timing_offset(self.to) # Receiver Parameters def rx_gain_mu(self): return self.gain_mu def rx_gain_omega(self): return self.gain_omega def set_rx_gain_mu(self, gain): self.gain_mu = gain self.gain_omega = .25 * self.gain_mu * self.gain_mu self.receiver.set_gain_mu(self.gain_mu) self.receiver.set_gain_omega(self.gain_omega) def set_loop_bw(self, loop_bw): self._loop_bw = bw self.receiver.set_loop_bw(self._loop_bw) def loop_bw(self): return self._loop_bw if __name__ == "__main__": tb = my_top_block(); tb.start() tb.qapp.exec_() tb.stop()
Medium/phantomjs-1
refs/heads/master
src/breakpad/src/tools/gyp/test/subdirectory/gyptest-SYMROOT-default.py
399
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a target and a subsidiary dependent target from a .gyp file in a subdirectory, without specifying an explicit output build directory, and using the generated solution or project file at the top of the tree as the entry point. The configuration sets the Xcode SYMROOT variable and uses --depth= to make Xcode behave like the other build tools--that is, put all built targets in a single output build directory at the top of the tree. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src') test.relocate('src', 'relocate/src') # Suppress the test infrastructure's setting SYMROOT on the command line. test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src') test.run_built_executable('prog1', stdout="Hello from prog1.c\n", chdir='relocate/src') test.run_built_executable('prog2', stdout="Hello from prog2.c\n", chdir='relocate/src') test.pass_test()
craynot/django
refs/heads/master
tests/template_tests/filter_tests/test_urlencode.py
388
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.template.defaultfilters import urlencode from django.test import SimpleTestCase from ..utils import setup class UrlencodeTests(SimpleTestCase): @setup({'urlencode01': '{{ url|urlencode }}'}) def test_urlencode01(self): output = self.engine.render_to_string('urlencode01', {'url': '/test&"/me?/'}) self.assertEqual(output, '/test%26%22/me%3F/') @setup({'urlencode02': '/test/{{ urlbit|urlencode:"" }}/'}) def test_urlencode02(self): output = self.engine.render_to_string('urlencode02', {'urlbit': 'escape/slash'}) self.assertEqual(output, '/test/escape%2Fslash/') class FunctionTests(SimpleTestCase): def test_urlencode(self): self.assertEqual(urlencode('fran\xe7ois & jill'), 'fran%C3%A7ois%20%26%20jill') def test_non_string_input(self): self.assertEqual(urlencode(1), '1')
detialiaj/pro
refs/heads/master
SYS/inout/migrations/0006_entry.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-01-02 00:08 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('main', '0017_auto_20161217_2358'), ('inout', '0005_auto_20161218_0000'), ] operations = [ migrations.CreateModel( name='Entry', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('In', 'In'), ('Out', 'Out')], max_length=3)), ('rfid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.RFIDtag')), ], ), ]
ucrawler/cp-uc
refs/heads/master
libs/suds/umx/core.py
200
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ Provides base classes for XML->object I{unmarshalling}. """ from logging import getLogger from suds import * from suds.umx import * from suds.umx.attrlist import AttrList from suds.sax.text import Text from suds.sudsobject import Factory, merge log = getLogger(__name__) reserved = { 'class':'cls', 'def':'dfn', } class Core: """ The abstract XML I{node} unmarshaller. This class provides the I{core} unmarshalling functionality. """ def process(self, content): """ Process an object graph representation of the xml I{node}. @param content: The current content being unmarshalled. @type content: L{Content} @return: A suds object. @rtype: L{Object} """ self.reset() return self.append(content) def append(self, content): """ Process the specified node and convert the XML document into a I{suds} L{object}. @param content: The current content being unmarshalled. @type content: L{Content} @return: A I{append-result} tuple as: (L{Object}, I{value}) @rtype: I{append-result} @note: This is not the proper entry point. @see: L{process()} """ self.start(content) self.append_attributes(content) self.append_children(content) self.append_text(content) self.end(content) return self.postprocess(content) def postprocess(self, content): """ Perform final processing of the resulting data structure as follows: - Mixed values (children and text) will have a result of the I{content.node}. - Simi-simple values (attributes, no-children and text) will have a result of a property object. - Simple values (no-attributes, no-children with text nodes) will have a string result equal to the value of the content.node.getText(). @param content: The current content being unmarshalled. @type content: L{Content} @return: The post-processed result. @rtype: I{any} """ node = content.node if len(node.children) and node.hasText(): return node attributes = AttrList(node.attributes) if attributes.rlen() and \ not len(node.children) and \ node.hasText(): p = Factory.property(node.name, node.getText()) return merge(content.data, p) if len(content.data): return content.data lang = attributes.lang() if content.node.isnil(): return None if not len(node.children) and content.text is None: if self.nillable(content): return None else: return Text('', lang=lang) if isinstance(content.text, basestring): return Text(content.text, lang=lang) else: return content.text def append_attributes(self, content): """ Append attribute nodes into L{Content.data}. Attributes in the I{schema} or I{xml} namespaces are skipped. @param content: The current content being unmarshalled. @type content: L{Content} """ attributes = AttrList(content.node.attributes) for attr in attributes.real(): name = attr.name value = attr.value self.append_attribute(name, value, content) def append_attribute(self, name, value, content): """ Append an attribute name/value into L{Content.data}. @param name: The attribute name @type name: basestring @param value: The attribute's value @type value: basestring @param content: The current content being unmarshalled. @type content: L{Content} """ key = name key = '_%s' % reserved.get(key, key) setattr(content.data, key, value) def append_children(self, content): """ Append child nodes into L{Content.data} @param content: The current content being unmarshalled. @type content: L{Content} """ for child in content.node: cont = Content(child) cval = self.append(cont) key = reserved.get(child.name, child.name) if key in content.data: v = getattr(content.data, key) if isinstance(v, list): v.append(cval) else: setattr(content.data, key, [v, cval]) continue if self.unbounded(cont): if cval is None: setattr(content.data, key, []) else: setattr(content.data, key, [cval,]) else: setattr(content.data, key, cval) def append_text(self, content): """ Append text nodes into L{Content.data} @param content: The current content being unmarshalled. @type content: L{Content} """ if content.node.hasText(): content.text = content.node.getText() def reset(self): pass def start(self, content): """ Processing on I{node} has started. Build and return the proper object. @param content: The current content being unmarshalled. @type content: L{Content} @return: A subclass of Object. @rtype: L{Object} """ content.data = Factory.object(content.node.name) def end(self, content): """ Processing on I{node} has ended. @param content: The current content being unmarshalled. @type content: L{Content} """ pass def bounded(self, content): """ Get whether the content is bounded (not a list). @param content: The current content being unmarshalled. @type content: L{Content} @return: True if bounded, else False @rtype: boolean '""" return ( not self.unbounded(content) ) def unbounded(self, content): """ Get whether the object is unbounded (a list). @param content: The current content being unmarshalled. @type content: L{Content} @return: True if unbounded, else False @rtype: boolean '""" return False def nillable(self, content): """ Get whether the object is nillable. @param content: The current content being unmarshalled. @type content: L{Content} @return: True if nillable, else False @rtype: boolean '""" return False
marcuskelly/recover
refs/heads/master
Lib/site-packages/passlib/tests/test_handlers_cisco.py
5
""" passlib.tests.test_handlers_cisco - tests for Cisco-specific algorithms """ #============================================================================= # imports #============================================================================= from __future__ import absolute_import, division, print_function # core import logging log = logging.getLogger(__name__) # site # pkg from passlib import hash, exc from passlib.utils.compat import u from .utils import UserHandlerMixin, HandlerCase, repeat_string from .test_handlers import UPASS_TABLE # module __all__ = [ "cisco_pix_test", "cisco_asa_test", "cisco_type7_test", ] #============================================================================= # shared code for cisco PIX & ASA #============================================================================= class _PixAsaSharedTest(UserHandlerMixin, HandlerCase): """ class w/ shared info for PIX & ASA tests. """ __unittest_skip = True # for TestCase requires_user = False # for UserHandlerMixin #: shared list of hashes which should be identical under pix & asa7 #: (i.e. combined secret + user < 17 bytes) pix_asa_shared_hashes = [ # # http://www.perlmonks.org/index.pl?node_id=797623 # (("cisco", ""), "2KFQnbNIdI.2KYOU"), # confirmed ASA 9.6 # # http://www.hsc.fr/ressources/breves/pix_crack.html.en # (("hsc", ""), "YtT8/k6Np8F1yz2c"), # confirmed ASA 9.6 # # www.freerainbowtables.com/phpBB3/viewtopic.php?f=2&t=1441 # (("", ""), "8Ry2YjIyt7RRXU24"), # confirmed ASA 9.6 (("cisco", "john"), "hN7LzeyYjw12FSIU"), (("cisco", "jack"), "7DrfeZ7cyOj/PslD"), # # http://comments.gmane.org/gmane.comp.security.openwall.john.user/2529 # (("ripper", "alex"), "h3mJrcH0901pqX/m"), (("cisco", "cisco"), "3USUcOPFUiMCO4Jk"), (("cisco", "cisco1"), "3USUcOPFUiMCO4Jk"), (("CscFw-ITC!", "admcom"), "lZt7HSIXw3.QP7.R"), ("cangetin", "TynyB./ftknE77QP"), (("cangetin", "rramsey"), "jgBZqYtsWfGcUKDi"), # # http://openwall.info/wiki/john/sample-hashes # (("phonehome", "rharris"), "zyIIMSYjiPm0L7a6"), # # http://www.openwall.com/lists/john-users/2010/08/08/3 # (("cangetin", ""), "TynyB./ftknE77QP"), (("cangetin", "rramsey"), "jgBZqYtsWfGcUKDi"), # # from JTR 1.7.9 # ("test1", "TRPEas6f/aa6JSPL"), ("test2", "OMT6mXmAvGyzrCtp"), ("test3", "gTC7RIy1XJzagmLm"), ("test4", "oWC1WRwqlBlbpf/O"), ("password", "NuLKvvWGg.x9HEKO"), ("0123456789abcdef", ".7nfVBEIEu4KbF/1"), # # http://www.cisco.com/en/US/docs/security/pix/pix50/configuration/guide/commands.html#wp5472 # (("1234567890123456", ""), "feCkwUGktTCAgIbD"), # canonical source (("watag00s1am", ""), "jMorNbK0514fadBh"), # canonical source # # custom # (("cisco1", "cisco1"), "jmINXNH6p1BxUppp"), # ensures utf-8 used for unicode (UPASS_TABLE, 'CaiIvkLMu2TOHXGT'), # # passlib reference vectors # # Some of these have been confirmed on various ASA firewalls, # and the exact version is noted next to each hash. # Would like to verify these under more PIX & ASA versions. # # Those without a note are generally an extrapolation, # to ensure the code stays consistent, but for various reasons, # hasn't been verified. # # * One such case is usernames w/ 1 & 2 digits -- # ASA (9.6 at least) requires 3+ digits in username. # # The following hashes (below 13 chars) should be identical for PIX/ASA. # Ones which differ are listed separately in the known_correct_hashes # list for the two test classes. # # 4 char password (('1234', ''), 'RLPMUQ26KL4blgFN'), # confirmed ASA 9.6 # 8 char password (('01234567', ''), '0T52THgnYdV1tlOF'), # confirmed ASA 9.6 (('01234567', '3'), '.z0dT9Alkdc7EIGS'), (('01234567', '36'), 'CC3Lam53t/mHhoE7'), (('01234567', '365'), '8xPrWpNnBdD2DzdZ'), # confirmed ASA 9.6 (('01234567', '3333'), '.z0dT9Alkdc7EIGS'), # confirmed ASA 9.6 (('01234567', '3636'), 'CC3Lam53t/mHhoE7'), # confirmed ASA 9.6 (('01234567', '3653'), '8xPrWpNnBdD2DzdZ'), # confirmed ASA 9.6 (('01234567', 'adm'), 'dfWs2qiao6KD/P2L'), # confirmed ASA 9.6 (('01234567', 'adma'), 'dfWs2qiao6KD/P2L'), # confirmed ASA 9.6 (('01234567', 'admad'), 'dfWs2qiao6KD/P2L'), # confirmed ASA 9.6 (('01234567', 'user'), 'PNZ4ycbbZ0jp1.j1'), # confirmed ASA 9.6 (('01234567', 'user1234'), 'PNZ4ycbbZ0jp1.j1'), # confirmed ASA 9.6 # 12 char password (('0123456789ab', ''), 'S31BxZOGlAigndcJ'), # confirmed ASA 9.6 (('0123456789ab', '36'), 'wFqSX91X5.YaRKsi'), (('0123456789ab', '365'), 'qjgo3kNgTVxExbno'), # confirmed ASA 9.6 (('0123456789ab', '3333'), 'mcXPL/vIZcIxLUQs'), # confirmed ASA 9.6 (('0123456789ab', '3636'), 'wFqSX91X5.YaRKsi'), # confirmed ASA 9.6 (('0123456789ab', '3653'), 'qjgo3kNgTVxExbno'), # confirmed ASA 9.6 (('0123456789ab', 'user'), 'f.T4BKdzdNkjxQl7'), # confirmed ASA 9.6 (('0123456789ab', 'user1234'), 'f.T4BKdzdNkjxQl7'), # confirmed ASA 9.6 # NOTE: remaining reference vectors for 13+ char passwords # are split up between cisco_pix & cisco_asa tests. # unicode passwords # ASA supposedly uses utf-8 encoding, but entering non-ascii # chars is error-prone, and while UTF-8 appears to be intended, # observed behaviors include: # * ssh cli stripping non-ascii chars entirely # * ASDM web iface double-encoding utf-8 strings ((u("t\xe1ble").encode("utf-8"), 'user'), 'Og8fB4NyF0m5Ed9c'), ((u("t\xe1ble").encode("utf-8").decode("latin-1").encode("utf-8"), 'user'), 'cMvFC2XVBmK/68yB'), # confirmed ASA 9.6 when typed into ASDM ] def test_calc_digest_spoiler(self): """ _calc_checksum() -- spoil oversize passwords during verify for details, see 'spoil_digest' flag instead that function. this helps cisco_pix/cisco_asa implement their policy of ``.truncate_verify_reject=True``. """ def calc(secret, for_hash=False): return self.handler(use_defaults=for_hash)._calc_checksum(secret) # short (non-truncated) password short_secret = repeat_string("1234", self.handler.truncate_size) short_hash = calc(short_secret) # longer password should have totally different hash, # to prevent verify from matching (i.e. "spoiled"). long_secret = short_secret + "X" long_hash = calc(long_secret) self.assertNotEqual(long_hash, short_hash) # spoiled hash should depend on whole secret, # so that output isn't predictable alt_long_secret = short_secret + "Y" alt_long_hash = calc(alt_long_secret) self.assertNotEqual(alt_long_hash, short_hash) self.assertNotEqual(alt_long_hash, long_hash) # for hash(), should throw error if password too large calc(short_secret, for_hash=True) self.assertRaises(exc.PasswordSizeError, calc, long_secret, for_hash=True) self.assertRaises(exc.PasswordSizeError, calc, alt_long_secret, for_hash=True) #============================================================================= # cisco pix #============================================================================= class cisco_pix_test(_PixAsaSharedTest): handler = hash.cisco_pix #: known correct pix hashes known_correct_hashes = _PixAsaSharedTest.pix_asa_shared_hashes + [ # # passlib reference vectors (PIX-specific) # # NOTE: See 'pix_asa_shared_hashes' for general PIX+ASA vectors, # and general notes about the 'passlib reference vectors' test set. # # All of the following are PIX-specific, as ASA starts # to use a different padding size at 13 characters. # # TODO: these need confirming w/ an actual PIX system. # # 13 char password (('0123456789abc', ''), 'eacOpB7vE7ZDukSF'), (('0123456789abc', '3'), 'ylJTd/qei66WZe3w'), (('0123456789abc', '36'), 'hDx8QRlUhwd6bU8N'), (('0123456789abc', '365'), 'vYOOtnkh1HXcMrM7'), (('0123456789abc', '3333'), 'ylJTd/qei66WZe3w'), (('0123456789abc', '3636'), 'hDx8QRlUhwd6bU8N'), (('0123456789abc', '3653'), 'vYOOtnkh1HXcMrM7'), (('0123456789abc', 'user'), 'f4/.SALxqDo59mfV'), (('0123456789abc', 'user1234'), 'f4/.SALxqDo59mfV'), # 14 char password (('0123456789abcd', ''), '6r8888iMxEoPdLp4'), (('0123456789abcd', '3'), 'f5lvmqWYj9gJqkIH'), (('0123456789abcd', '36'), 'OJJ1Khg5HeAYBH1c'), (('0123456789abcd', '365'), 'OJJ1Khg5HeAYBH1c'), (('0123456789abcd', '3333'), 'f5lvmqWYj9gJqkIH'), (('0123456789abcd', '3636'), 'OJJ1Khg5HeAYBH1c'), (('0123456789abcd', '3653'), 'OJJ1Khg5HeAYBH1c'), (('0123456789abcd', 'adm'), 'DbPLCFIkHc2SiyDk'), (('0123456789abcd', 'adma'), 'DbPLCFIkHc2SiyDk'), (('0123456789abcd', 'user'), 'WfO2UiTapPkF/FSn'), (('0123456789abcd', 'user1234'), 'WfO2UiTapPkF/FSn'), # 15 char password (('0123456789abcde', ''), 'al1e0XFIugTYLai3'), (('0123456789abcde', '3'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', '36'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', '365'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', '3333'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', '3636'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', '3653'), 'lYbwBu.f82OIApQB'), (('0123456789abcde', 'adm'), 'KgKx1UQvdR/09i9u'), (('0123456789abcde', 'adma'), 'KgKx1UQvdR/09i9u'), (('0123456789abcde', 'user'), 'qLopkenJ4WBqxaZN'), (('0123456789abcde', 'user1234'), 'qLopkenJ4WBqxaZN'), # 16 char password (('0123456789abcdef', ''), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', '36'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', '365'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', '3333'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', '3636'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', '3653'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', 'user'), '.7nfVBEIEu4KbF/1'), (('0123456789abcdef', 'user1234'), '.7nfVBEIEu4KbF/1'), ] #============================================================================= # cisco asa #============================================================================= class cisco_asa_test(_PixAsaSharedTest): handler = hash.cisco_asa known_correct_hashes = _PixAsaSharedTest.pix_asa_shared_hashes + [ # # passlib reference vectors (ASA-specific) # # NOTE: See 'pix_asa_shared_hashes' for general PIX+ASA vectors, # and general notes about the 'passlib reference vectors' test set. # # 13 char password # NOTE: past this point, ASA pads to 32 bytes instead of 16 # for all cases where user is set (secret + 4 bytes > 16), # but still uses 16 bytes for enable pwds (secret <= 16). # hashes w/ user WON'T match PIX, but "enable" passwords will. (('0123456789abc', ''), 'eacOpB7vE7ZDukSF'), # confirmed ASA 9.6 (('0123456789abc', '36'), 'FRV9JG18UBEgX0.O'), (('0123456789abc', '365'), 'NIwkusG9hmmMy6ZQ'), # confirmed ASA 9.6 (('0123456789abc', '3333'), 'NmrkP98nT7RAeKZz'), # confirmed ASA 9.6 (('0123456789abc', '3636'), 'FRV9JG18UBEgX0.O'), # confirmed ASA 9.6 (('0123456789abc', '3653'), 'NIwkusG9hmmMy6ZQ'), # confirmed ASA 9.6 (('0123456789abc', 'user'), '8Q/FZeam5ai1A47p'), # confirmed ASA 9.6 (('0123456789abc', 'user1234'), '8Q/FZeam5ai1A47p'), # confirmed ASA 9.6 # 14 char password (('0123456789abcd', ''), '6r8888iMxEoPdLp4'), # confirmed ASA 9.6 (('0123456789abcd', '3'), 'yxGoujXKPduTVaYB'), (('0123456789abcd', '36'), 'W0jckhnhjnr/DiT/'), (('0123456789abcd', '365'), 'HuVOxfMQNahaoF8u'), # confirmed ASA 9.6 (('0123456789abcd', '3333'), 'yxGoujXKPduTVaYB'), # confirmed ASA 9.6 (('0123456789abcd', '3636'), 'W0jckhnhjnr/DiT/'), # confirmed ASA 9.6 (('0123456789abcd', '3653'), 'HuVOxfMQNahaoF8u'), # confirmed ASA 9.6 (('0123456789abcd', 'adm'), 'RtOmSeoCs4AUdZqZ'), # confirmed ASA 9.6 (('0123456789abcd', 'adma'), 'RtOmSeoCs4AUdZqZ'), # confirmed ASA 9.6 (('0123456789abcd', 'user'), 'rrucwrcM0h25pr.m'), # confirmed ASA 9.6 (('0123456789abcd', 'user1234'), 'rrucwrcM0h25pr.m'), # confirmed ASA 9.6 # 15 char password (('0123456789abcde', ''), 'al1e0XFIugTYLai3'), # confirmed ASA 9.6 (('0123456789abcde', '3'), 'nAZrQoHaL.fgrIqt'), (('0123456789abcde', '36'), '2GxIQ6ICE795587X'), (('0123456789abcde', '365'), 'QmDsGwCRBbtGEKqM'), # confirmed ASA 9.6 (('0123456789abcde', '3333'), 'nAZrQoHaL.fgrIqt'), # confirmed ASA 9.6 (('0123456789abcde', '3636'), '2GxIQ6ICE795587X'), # confirmed ASA 9.6 (('0123456789abcde', '3653'), 'QmDsGwCRBbtGEKqM'), # confirmed ASA 9.6 (('0123456789abcde', 'adm'), 'Aj2aP0d.nk62wl4m'), # confirmed ASA 9.6 (('0123456789abcde', 'adma'), 'Aj2aP0d.nk62wl4m'), # confirmed ASA 9.6 (('0123456789abcde', 'user'), 'etxiXfo.bINJcXI7'), # confirmed ASA 9.6 (('0123456789abcde', 'user1234'), 'etxiXfo.bINJcXI7'), # confirmed ASA 9.6 # 16 char password (('0123456789abcdef', ''), '.7nfVBEIEu4KbF/1'), # confirmed ASA 9.6 (('0123456789abcdef', '36'), 'GhI8.yFSC5lwoafg'), (('0123456789abcdef', '365'), 'KFBI6cNQauyY6h/G'), # confirmed ASA 9.6 (('0123456789abcdef', '3333'), 'Ghdi1IlsswgYzzMH'), # confirmed ASA 9.6 (('0123456789abcdef', '3636'), 'GhI8.yFSC5lwoafg'), # confirmed ASA 9.6 (('0123456789abcdef', '3653'), 'KFBI6cNQauyY6h/G'), # confirmed ASA 9.6 (('0123456789abcdef', 'user'), 'IneB.wc9sfRzLPoh'), # confirmed ASA 9.6 (('0123456789abcdef', 'user1234'), 'IneB.wc9sfRzLPoh'), # confirmed ASA 9.6 # 17 char password # NOTE: past this point, ASA pads to 32 bytes instead of 16 # for ALL cases, since secret > 16 bytes even for enable pwds; # and so none of these rest here should match PIX. (('0123456789abcdefq', ''), 'bKshl.EN.X3CVFRQ'), # confirmed ASA 9.6 (('0123456789abcdefq', '36'), 'JAeTXHs0n30svlaG'), (('0123456789abcdefq', '365'), '4fKSSUBHT1ChGqHp'), # confirmed ASA 9.6 (('0123456789abcdefq', '3333'), 'USEJbxI6.VY4ecBP'), # confirmed ASA 9.6 (('0123456789abcdefq', '3636'), 'JAeTXHs0n30svlaG'), # confirmed ASA 9.6 (('0123456789abcdefq', '3653'), '4fKSSUBHT1ChGqHp'), # confirmed ASA 9.6 (('0123456789abcdefq', 'user'), '/dwqyD7nGdwSrDwk'), # confirmed ASA 9.6 (('0123456789abcdefq', 'user1234'), '/dwqyD7nGdwSrDwk'), # confirmed ASA 9.6 # 27 char password (('0123456789abcdefqwertyuiopa', ''), '4wp19zS3OCe.2jt5'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', '36'), 'PjUoGqWBKPyV9qOe'), (('0123456789abcdefqwertyuiopa', '365'), 'bfCy6xFAe5O/gzvM'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', '3333'), 'rd/ZMuGTJFIb2BNG'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', '3636'), 'PjUoGqWBKPyV9qOe'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', '3653'), 'bfCy6xFAe5O/gzvM'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', 'user'), 'zynfWw3UtszxLMgL'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopa', 'user1234'), 'zynfWw3UtszxLMgL'), # confirmed ASA 9.6 # 28 char password # NOTE: past this point, ASA stops appending the username AT ALL, # even though there's still room for the first few chars. (('0123456789abcdefqwertyuiopas', ''), 'W6nbOddI0SutTK7m'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopas', '36'), 'W6nbOddI0SutTK7m'), (('0123456789abcdefqwertyuiopas', '365'), 'W6nbOddI0SutTK7m'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopas', 'user'), 'W6nbOddI0SutTK7m'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopas', 'user1234'), 'W6nbOddI0SutTK7m'), # confirmed ASA 9.6 # 32 char password # NOTE: this is max size that ASA allows, and throws error for larger (('0123456789abcdefqwertyuiopasdfgh', ''), '5hPT/iC6DnoBxo6a'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopasdfgh', '36'), '5hPT/iC6DnoBxo6a'), (('0123456789abcdefqwertyuiopasdfgh', '365'), '5hPT/iC6DnoBxo6a'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopasdfgh', 'user'), '5hPT/iC6DnoBxo6a'), # confirmed ASA 9.6 (('0123456789abcdefqwertyuiopasdfgh', 'user1234'), '5hPT/iC6DnoBxo6a'), # confirmed ASA 9.6 ] #============================================================================= # cisco type 7 #============================================================================= class cisco_type7_test(HandlerCase): handler = hash.cisco_type7 salt_bits = 4 salt_type = int known_correct_hashes = [ # # http://mccltd.net/blog/?p=1034 # ("secure ", "04480E051A33490E"), # # http://insecure.org/sploits/cisco.passwords.html # ("Its time to go to lunch!", "153B1F1F443E22292D73212D5300194315591954465A0D0B59"), # # http://blog.ioshints.info/2007/11/type-7-decryption-in-cisco-ios.html # ("t35t:pa55w0rd", "08351F1B1D431516475E1B54382F"), # # http://www.m00nie.com/2011/09/cisco-type-7-password-decryption-and-encryption-with-perl/ # ("hiImTesting:)", "020E0D7206320A325847071E5F5E"), # # http://packetlife.net/forums/thread/54/ # ("cisco123", "060506324F41584B56"), ("cisco123", "1511021F07257A767B"), # # source ? # ('Supe&8ZUbeRp4SS', "06351A3149085123301517391C501918"), # # custom # # ensures utf-8 used for unicode (UPASS_TABLE, '0958EDC8A9F495F6F8A5FD'), ] known_unidentified_hashes = [ # salt with hex value "0A480E051A33490E", # salt value > 52. this may in fact be valid, but we reject it for now # (see docs for more). '99400E4812', ] def test_90_decode(self): """test cisco_type7.decode()""" from passlib.utils import to_unicode, to_bytes handler = self.handler for secret, hash in self.known_correct_hashes: usecret = to_unicode(secret) bsecret = to_bytes(secret) self.assertEqual(handler.decode(hash), usecret) self.assertEqual(handler.decode(hash, None), bsecret) self.assertRaises(UnicodeDecodeError, handler.decode, '0958EDC8A9F495F6F8A5FD', 'ascii') def test_91_salt(self): """test salt value border cases""" handler = self.handler self.assertRaises(TypeError, handler, salt=None) handler(salt=None, use_defaults=True) self.assertRaises(TypeError, handler, salt='abc') self.assertRaises(ValueError, handler, salt=-10) self.assertRaises(ValueError, handler, salt=100) self.assertRaises(TypeError, handler.using, salt='abc') self.assertRaises(ValueError, handler.using, salt=-10) self.assertRaises(ValueError, handler.using, salt=100) with self.assertWarningList("salt/offset must be.*"): subcls = handler.using(salt=100, relaxed=True) self.assertEqual(subcls(use_defaults=True).salt, 52) #============================================================================= # eof #=============================================================================
xinjiguaike/edx-platform
refs/heads/master
common/lib/xmodule/xmodule/video_module/video_xfields.py
46
""" XFields for video module. """ import datetime from xblock.fields import Scope, String, Float, Boolean, List, Dict, DateTime from xmodule.fields import RelativeTime # Make '_' a no-op so we can scrape strings _ = lambda text: text class VideoFields(object): """Fields for `VideoModule` and `VideoDescriptor`.""" display_name = String( help=_("The name students see. This name appears in the course ribbon and as a header for the video."), display_name=_("Component Display Name"), default="Video", scope=Scope.settings ) saved_video_position = RelativeTime( help=_("Current position in the video."), scope=Scope.user_state, default=datetime.timedelta(seconds=0) ) # TODO: This should be moved to Scope.content, but this will # require data migration to support the old video module. youtube_id_1_0 = String( help=_("Optional, for older browsers: the YouTube ID for the normal speed video."), display_name=_("YouTube ID"), scope=Scope.settings, default="3_yD_cEKoCk" ) youtube_id_0_75 = String( help=_("Optional, for older browsers: the YouTube ID for the .75x speed video."), display_name=_("YouTube ID for .75x speed"), scope=Scope.settings, default="" ) youtube_id_1_25 = String( help=_("Optional, for older browsers: the YouTube ID for the 1.25x speed video."), display_name=_("YouTube ID for 1.25x speed"), scope=Scope.settings, default="" ) youtube_id_1_5 = String( help=_("Optional, for older browsers: the YouTube ID for the 1.5x speed video."), display_name=_("YouTube ID for 1.5x speed"), scope=Scope.settings, default="" ) start_time = RelativeTime( # datetime.timedelta object help=_( "Time you want the video to start if you don't want the entire video to play. " "Not supported in the native mobile app: the full video file will play. " "Formatted as HH:MM:SS. The maximum value is 23:59:59." ), display_name=_("Video Start Time"), scope=Scope.settings, default=datetime.timedelta(seconds=0) ) end_time = RelativeTime( # datetime.timedelta object help=_( "Time you want the video to stop if you don't want the entire video to play. " "Not supported in the native mobile app: the full video file will play. " "Formatted as HH:MM:SS. The maximum value is 23:59:59." ), display_name=_("Video Stop Time"), scope=Scope.settings, default=datetime.timedelta(seconds=0) ) #front-end code of video player checks logical validity of (start_time, end_time) pair. # `source` is deprecated field and should not be used in future. # `download_video` is used instead. source = String( help=_("The external URL to download the video."), display_name=_("Download Video"), scope=Scope.settings, default="" ) download_video = Boolean( help=_("Allow students to download versions of this video in different formats if they cannot use the edX video player or do not have access to YouTube. You must add at least one non-YouTube URL in the Video File URLs field."), display_name=_("Video Download Allowed"), scope=Scope.settings, default=False ) html5_sources = List( help=_("The URL or URLs where you've posted non-YouTube versions of the video. Each URL must end in .mpeg, .mp4, .ogg, or .webm and cannot be a YouTube URL. (For browser compatibility, we strongly recommend .mp4 and .webm format.) Students will be able to view the first listed video that's compatible with the student's computer. To allow students to download these videos, set Video Download Allowed to True."), display_name=_("Video File URLs"), scope=Scope.settings, ) track = String( help=_("By default, students can download an .srt or .txt transcript when you set Download Transcript Allowed to True. If you want to provide a downloadable transcript in a different format, we recommend that you upload a handout by using the Upload a Handout field. If this isn't possible, you can post a transcript file on the Files & Uploads page or on the Internet, and then add the URL for the transcript here. Students see a link to download that transcript below the video."), display_name=_("Downloadable Transcript URL"), scope=Scope.settings, default='' ) download_track = Boolean( help=_("Allow students to download the timed transcript. A link to download the file appears below the video. By default, the transcript is an .srt or .txt file. If you want to provide the transcript for download in a different format, upload a file by using the Upload Handout field."), display_name=_("Download Transcript Allowed"), scope=Scope.settings, default=False ) sub = String( help=_("The default transcript for the video, from the Default Timed Transcript field on the Basic tab. This transcript should be in English. You don't have to change this setting."), display_name=_("Default Timed Transcript"), scope=Scope.settings, default="" ) show_captions = Boolean( help=_("Specify whether the transcripts appear with the video by default."), display_name=_("Show Transcript"), scope=Scope.settings, default=True ) # Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'} transcripts = Dict( help=_("Add transcripts in different languages. Click below to specify a language and upload an .srt transcript file for that language."), display_name=_("Transcript Languages"), scope=Scope.settings, default={} ) transcript_language = String( help=_("Preferred language for transcript."), display_name=_("Preferred language for transcript"), scope=Scope.preferences, default="en" ) transcript_download_format = String( help=_("Transcript file format to download by user."), scope=Scope.preferences, values=[ # Translators: This is a type of file used for captioning in the video player. {"display_name": _("SubRip (.srt) file"), "value": "srt"}, {"display_name": _("Text (.txt) file"), "value": "txt"} ], default='srt', ) speed = Float( help=_("The last speed that the user specified for the video."), scope=Scope.user_state ) global_speed = Float( help=_("The default speed for the video."), scope=Scope.preferences, default=1.0 ) youtube_is_available = Boolean( help=_("Specify whether YouTube is available for the user."), scope=Scope.user_info, default=True ) handout = String( help=_("Upload a handout to accompany this video. Students can download the handout by clicking Download Handout under the video."), display_name=_("Upload Handout"), scope=Scope.settings, ) only_on_web = Boolean( help=_( "Specify whether access to this video is limited to browsers only, or if it can be " "accessed from other applications including mobile apps." ), display_name="Video Available on Web Only", scope=Scope.settings, default=False ) edx_video_id = String( help=_("If you were assigned a Video ID by edX for the video to play in this component, enter the ID here. In this case, do not enter values in the Default Video URL, the Video File URLs, and the YouTube ID fields. If you were not assigned a Video ID, enter values in those other fields and ignore this field."), # pylint: disable=line-too-long display_name=_("Video ID"), scope=Scope.settings, default="", ) bumper_last_view_date = DateTime( display_name=_("Date of the last view of the bumper"), scope=Scope.preferences, ) bumper_do_not_show_again = Boolean( display_name=_("Do not show bumper again"), scope=Scope.preferences, default=False, )
home-assistant/home-assistant
refs/heads/dev
tests/test_runner.py
2
"""Test the runner.""" import threading from unittest.mock import patch from homeassistant import core, runner from homeassistant.util import executor, thread # https://github.com/home-assistant/supervisor/blob/main/supervisor/docker/homeassistant.py SUPERVISOR_HARD_TIMEOUT = 220 TIMEOUT_SAFETY_MARGIN = 10 async def test_cumulative_shutdown_timeout_less_than_supervisor(): """Verify the cumulative shutdown timeout is at least 10s less than the supervisor.""" assert ( core.STAGE_1_SHUTDOWN_TIMEOUT + core.STAGE_2_SHUTDOWN_TIMEOUT + core.STAGE_3_SHUTDOWN_TIMEOUT + executor.EXECUTOR_SHUTDOWN_TIMEOUT + thread.THREADING_SHUTDOWN_TIMEOUT + TIMEOUT_SAFETY_MARGIN <= SUPERVISOR_HARD_TIMEOUT ) async def test_setup_and_run_hass(hass, tmpdir): """Test we can setup and run.""" test_dir = tmpdir.mkdir("config") default_config = runner.RuntimeConfig(test_dir) with patch("homeassistant.bootstrap.async_setup_hass", return_value=hass), patch( "threading._shutdown" ), patch("homeassistant.core.HomeAssistant.async_run") as mock_run: await runner.setup_and_run_hass(default_config) assert threading._shutdown == thread.deadlock_safe_shutdown assert mock_run.called
matheuscas/pyfuzzy_toolbox
refs/heads/master
pyfuzzy_toolbox/mf.py
1
import numpy as np def trimf(x, params): """Triangular-shaped membership function Keyword arguments: x -- integer, float or (int or float) input numpy array params -- list with parameters a and c locate the "feet" of the triangle and the parameter b locates the peak. """ if not (type(x) == int or type(x) == float or type(x) == np.ndarray): raise TypeError('x must be int, float or numpy array.') assert len(params) == 3 a = float(params[0]) b = float(params[1]) c = float(params[2]) if a > b or b > c or a > c: raise ValueError() if type(x) == int or type(x) == float: x = np.array([float(x)]) result = np.zeros(len(x)) index = (x <= a).nonzero()[0] result[index] = np.zeros(len(index)) index = (x >= c).nonzero()[0] result[index] = np.zeros(len(index)) if a != b: index = np.logical_and(x > a, x < b).nonzero()[0] result[index] = (x[index] - a) / (b - a) if b != c: index = np.logical_and(x > b, x < c).nonzero()[0] result[index] = (c - x[index]) / (c - b) index = (x == b).nonzero()[0] result[index] = np.ones(len(index)) return result def centroid(x, mf): """Returns a defuzzified value, of a membership function 'mf' positioned at associated variable value 'x' Keyword arguments: x -- (int or float) numpy array of a set or alpha cut mf -- (int or float) numpy array that represents a mf function """ if not (type(mf) == np.ndarray): raise TypeError( 'Undefined mf type. Assure that you are using numpy array.') if not (type(x) == np.ndarray): raise TypeError( 'Undefined x type. Assure that you are using numpy array.') if not (len(x) == len(mf)): raise AssertionError('x and mf are not the same size.') return round((x * mf).sum() / mf.sum(), 4) # rounded due to matlsb pattern
detiber/lib_openshift
refs/heads/master
lib_openshift/models/v1_job_spec.py
2
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1JobSpec(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ operations = [ ] # The key is attribute name # and the value is attribute type. swagger_types = { 'parallelism': 'int', 'completions': 'int', 'active_deadline_seconds': 'int', 'selector': 'V1LabelSelector', 'manual_selector': 'bool', 'template': 'V1PodTemplateSpec' } # The key is attribute name # and the value is json key in definition. attribute_map = { 'parallelism': 'parallelism', 'completions': 'completions', 'active_deadline_seconds': 'activeDeadlineSeconds', 'selector': 'selector', 'manual_selector': 'manualSelector', 'template': 'template' } def __init__(self, parallelism=None, completions=None, active_deadline_seconds=None, selector=None, manual_selector=None, template=None): """ V1JobSpec - a model defined in Swagger """ self._parallelism = parallelism self._completions = completions self._active_deadline_seconds = active_deadline_seconds self._selector = selector self._manual_selector = manual_selector self._template = template @property def parallelism(self): """ Gets the parallelism of this V1JobSpec. Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :return: The parallelism of this V1JobSpec. :rtype: int """ return self._parallelism @parallelism.setter def parallelism(self, parallelism): """ Sets the parallelism of this V1JobSpec. Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :param parallelism: The parallelism of this V1JobSpec. :type: int """ self._parallelism = parallelism @property def completions(self): """ Gets the completions of this V1JobSpec. Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :return: The completions of this V1JobSpec. :rtype: int """ return self._completions @completions.setter def completions(self, completions): """ Sets the completions of this V1JobSpec. Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :param completions: The completions of this V1JobSpec. :type: int """ self._completions = completions @property def active_deadline_seconds(self): """ Gets the active_deadline_seconds of this V1JobSpec. Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer :return: The active_deadline_seconds of this V1JobSpec. :rtype: int """ return self._active_deadline_seconds @active_deadline_seconds.setter def active_deadline_seconds(self, active_deadline_seconds): """ Sets the active_deadline_seconds of this V1JobSpec. Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer :param active_deadline_seconds: The active_deadline_seconds of this V1JobSpec. :type: int """ self._active_deadline_seconds = active_deadline_seconds @property def selector(self): """ Gets the selector of this V1JobSpec. Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors :return: The selector of this V1JobSpec. :rtype: V1LabelSelector """ return self._selector @selector.setter def selector(self, selector): """ Sets the selector of this V1JobSpec. Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors :param selector: The selector of this V1JobSpec. :type: V1LabelSelector """ self._selector = selector @property def manual_selector(self): """ Gets the manual_selector of this V1JobSpec. ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md :return: The manual_selector of this V1JobSpec. :rtype: bool """ return self._manual_selector @manual_selector.setter def manual_selector(self, manual_selector): """ Sets the manual_selector of this V1JobSpec. ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md :param manual_selector: The manual_selector of this V1JobSpec. :type: bool """ self._manual_selector = manual_selector @property def template(self): """ Gets the template of this V1JobSpec. Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :return: The template of this V1JobSpec. :rtype: V1PodTemplateSpec """ return self._template @template.setter def template(self, template): """ Sets the template of this V1JobSpec. Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md :param template: The template of this V1JobSpec. :type: V1PodTemplateSpec """ self._template = template def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(V1JobSpec.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
hoihu/micropython
refs/heads/master
tests/basics/special_methods2.py
7
class Cud(): def __init__(self): #print("__init__ called") pass def __repr__(self): print("__repr__ called") return "" def __lt__(self, other): print("__lt__ called") def __le__(self, other): print("__le__ called") def __eq__(self, other): print("__eq__ called") def __ne__(self, other): print("__ne__ called") def __ge__(self, other): print("__ge__ called") def __gt__(self, other): print("__gt__ called") def __abs__(self): print("__abs__ called") def __add__(self, other): print("__add__ called") def __and__(self, other): print("__and__ called") def __floordiv__(self, other): print("__floordiv__ called") def __index__(self, other): print("__index__ called") def __inv__(self): print("__inv__ called") def __invert__(self): print("__invert__ called") def __lshift__(self, val): print("__lshift__ called") def __mod__(self, val): print("__mod__ called") def __mul__(self, other): print("__mul__ called") def __matmul__(self, other): print("__matmul__ called") def __neg__(self): print("__neg__ called") def __or__(self, other): print("__or__ called") def __pos__(self): print("__pos__ called") def __pow__(self, val): print("__pow__ called") def __rshift__(self, val): print("__rshift__ called") def __sub__(self, other): print("__sub__ called") def __truediv__(self, other): print("__truediv__ called") def __div__(self, other): print("__div__ called") def __xor__(self, other): print("__xor__ called") def __iadd__(self, other): print("__iadd__ called") return self def __isub__(self, other): print("__isub__ called") return self def __dir__(self): return ['a', 'b', 'c'] cud1 = Cud() cud2 = Cud() try: +cud1 except TypeError: print("SKIP") raise SystemExit # the following require MICROPY_PY_ALL_SPECIAL_METHODS +cud1 -cud1 ~cud1 cud1 * cud2 cud1 @ cud2 cud1 / cud2 cud2 // cud1 cud1 += cud2 cud1 -= cud2 cud1 % 2 cud1 ** 2 cud1 | cud2 cud1 & cud2 cud1 ^ cud2 cud1 << 1 cud1 >> 1 # test that dir() delegates to __dir__ special method print(dir(cud1)) # test that dir() does not delegate to __dir__ for the type print('a' in dir(Cud)) # TODO: the following operations are not supported on every ports # # ne is not supported, !(eq) is called instead #cud1 != cud2 # # in the following test, cpython still calls __eq__ # cud3=cud1 # cud3==cud1
vberaudi/scipy
refs/heads/master
scipy/sparse/linalg/eigen/arpack/setup.py
76
#!/usr/bin/env python from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import get_info, NotFoundError from numpy.distutils.misc_util import Configuration from scipy._build_utils import get_g77_abi_wrappers, get_sgemv_fix config = Configuration('arpack',parent_package,top_path) lapack_opt = get_info('lapack_opt') if not lapack_opt: raise NotFoundError('no lapack/blas resources found') config = Configuration('arpack', parent_package, top_path) arpack_sources = [join('ARPACK','SRC', '*.f')] arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) arpack_sources.extend([join('ARPACK','LAPACK', '*.f')]) arpack_sources += get_g77_abi_wrappers(lapack_opt) config.add_library('arpack_scipy', sources=arpack_sources, include_dirs=[join('ARPACK', 'SRC')]) ext_sources = ['arpack.pyf.src'] ext_sources += get_sgemv_fix(lapack_opt) config.add_extension('_arpack', sources=ext_sources, libraries=['arpack_scipy'], extra_info=lapack_opt, depends=arpack_sources, ) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
arbrandes/edx-platform
refs/heads/master
lms/djangoapps/coursewarehistoryextended/tests.py
3
""" Tests for coursewarehistoryextended Many aspects of this app are covered by the courseware tests, but these are specific to the new storage model with multiple backend tables. """ import json from unittest import skipUnless from unittest.mock import patch from django.conf import settings from django.db import connections from django.test import TestCase from lms.djangoapps.courseware.models import BaseStudentModuleHistory, StudentModule, StudentModuleHistory from lms.djangoapps.courseware.tests.factories import COURSE_KEY from lms.djangoapps.courseware.tests.factories import LOCATION from lms.djangoapps.courseware.tests.factories import StudentModuleFactory @skipUnless(settings.FEATURES["ENABLE_CSMH_EXTENDED"], "CSMH Extended needs to be enabled") class TestStudentModuleHistoryBackends(TestCase): """ Tests of data in CSMH and CSMHE """ # Tell Django to clean out all databases, not just default databases = {alias for alias in connections} # lint-amnesty, pylint: disable=unnecessary-comprehension def setUp(self): super().setUp() for record in (1, 2, 3): # This will store into CSMHE via the post_save signal csm = StudentModuleFactory.create( module_state_key=LOCATION('usage_id'), course_id=COURSE_KEY, state=json.dumps({'type': 'csmhe', 'order': record}), ) # This manually gets us a CSMH record to compare csmh = StudentModuleHistory(student_module=csm, version=None, created=csm.modified, state=json.dumps({'type': 'csmh', 'order': record}), grade=csm.grade, max_grade=csm.max_grade) csmh.save() @patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True}) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True}) def test_get_history_true_true(self): student_module = StudentModule.objects.all() history = BaseStudentModuleHistory.get_history(student_module) assert len(history) == 6 assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state) assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state) assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state) assert {'type': 'csmh', 'order': 3} == json.loads(history[3].state) assert {'type': 'csmh', 'order': 2} == json.loads(history[4].state) assert {'type': 'csmh', 'order': 1} == json.loads(history[5].state) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True}) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False}) def test_get_history_true_false(self): student_module = StudentModule.objects.all() history = BaseStudentModuleHistory.get_history(student_module) assert len(history) == 3 assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state) assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state) assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False}) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True}) def test_get_history_false_true(self): student_module = StudentModule.objects.all() history = BaseStudentModuleHistory.get_history(student_module) assert len(history) == 3 assert {'type': 'csmh', 'order': 3} == json.loads(history[0].state) assert {'type': 'csmh', 'order': 2} == json.loads(history[1].state) assert {'type': 'csmh', 'order': 1} == json.loads(history[2].state) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False}) @patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False}) def test_get_history_false_false(self): student_module = StudentModule.objects.all() history = BaseStudentModuleHistory.get_history(student_module) assert len(history) == 0
pigeonflight/strider-plone
refs/heads/master
docker/appengine/lib/django-1.4/tests/regressiontests/utils/crypto.py
41
import math import timeit import hashlib from django.utils import unittest from django.utils.crypto import pbkdf2 class TestUtilsCryptoPBKDF2(unittest.TestCase): # http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06 rfc_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": "0c60c80f961f0e71f3a9b524af6012062fe037a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 2, "dklen": 20, "digest": hashlib.sha1, }, "result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957", }, { "args": { "password": "password", "salt": "salt", "iterations": 4096, "dklen": 20, "digest": hashlib.sha1, }, "result": "4b007901b765489abead49d926f721d065a429c1", }, # # this takes way too long :( # { # "args": { # "password": "password", # "salt": "salt", # "iterations": 16777216, # "dklen": 20, # "digest": hashlib.sha1, # }, # "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984", # }, { "args": { "password": "passwordPASSWORDpassword", "salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt", "iterations": 4096, "dklen": 25, "digest": hashlib.sha1, }, "result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038", }, { "args": { "password": "pass\0word", "salt": "sa\0lt", "iterations": 4096, "dklen": 16, "digest": hashlib.sha1, }, "result": "56fa6aa75548099dcc37d7f03425e0c3", }, ] regression_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha256, }, "result": "120fb6cffcf8b32c43e7225256c4f837a86548c9", }, { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha512, }, "result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 1000, "dklen": 0, "digest": hashlib.sha512, }, "result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee" "549fd42fb6695779ad8a1c5bf59de69c48f774ef" "c4007d5298f9033c0241d5ab69305e7b64eceeb8d" "834cfec"), }, # Check leading zeros are not stripped (#17481) { "args": { "password": chr(186), "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b', }, ] def test_public_vectors(self): for vector in self.rfc_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.encode('hex'), vector['result']) def test_regression_vectors(self): for vector in self.regression_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.encode('hex'), vector['result']) def test_performance_scalability(self): """ Theory: If you run with 100 iterations, it should take 100 times as long as running with 1 iteration. """ # These values are chosen as a reasonable tradeoff between time # to run the test suite and false positives caused by imprecise # measurement. n1, n2 = 200000, 800000 elapsed = lambda f: timeit.Timer(f, 'from django.utils.crypto import pbkdf2').timeit(number=1) t1 = elapsed('pbkdf2("password", "salt", iterations=%d)' % n1) t2 = elapsed('pbkdf2("password", "salt", iterations=%d)' % n2) measured_scale_exponent = math.log(t2 / t1, n2 / n1) # This should be less than 1. We allow up to 1.2 so that tests don't # fail nondeterministically too often. self.assertLess(measured_scale_exponent, 1.2)
siosio/intellij-community
refs/heads/master
python/testData/copyPaste/Indent7709.dst.py
83
a = 1 <caret> b = 2
erkanay/django
refs/heads/master
tests/servers/models.py
623
from django.db import models class Person(models.Model): name = models.CharField(max_length=256)
CydarLtd/ansible
refs/heads/devel
lib/ansible/modules/network/nxos/nxos_smu.py
40
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nxos_smu extends_documentation_fragment: nxos version_added: "2.2" short_description: Perform SMUs on Cisco NX-OS devices. description: - Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices. author: Gabriele Gerbino (@GGabriele) notes: - The module can only activate and commit a package, not remove or deactivate it. - Use C(transport=nxapi) to avoid connection timeout options: pkg: description: - Name of the remote package. required: true file_system: description: - The remote file system of the device. If omitted, devices that support a file_system parameter will use their default values. required: false default: null ''' EXAMPLES = ''' - nxos_smu: pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm" username: "{{ un }}" password: "{{ pwd }}" host: "{{ inventory_hostname }}" ''' RETURN = ''' file_system: description: The remote file system of the device. returned: always type: string sample: "bootflash:" pkg: description: Name of the remote package type: string returned: always sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm" updates: description: commands sent to the device returned: always type: list sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm", "install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force", "install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule import time import collections import re import re def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': cmds = [command] body = run_commands(module, cmds) elif module.params['transport'] == 'nxapi': cmds = [command] body = run_commands(module, cmds) return body def remote_file_exists(module, dst, file_system='bootflash:'): command = 'dir {0}/{1}'.format(file_system, dst) body = execute_show_command(command, module, command_type='cli_show_ascii') if 'No such file' in body[0]: return False return True def apply_patch(module, commands): for command in commands: load_config(module, [command]) time.sleep(5) if 'failed' in response: module.fail_json(msg="Operation failed!", response=response) def get_commands(module, pkg, file_system): commands = [] splitted_pkg = pkg.split('.') fixed_pkg = '.'.join(splitted_pkg[0:-1]) command = 'show install inactive' inactive_body = execute_show_command(command, module, command_type='cli_show_ascii') command = 'show install active' active_body = execute_show_command(command, module, command_type='cli_show_ascii') if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]: commands.append('install add {0}{1}'.format(file_system, pkg)) if fixed_pkg not in active_body[0]: commands.append('install activate {0}{1} force'.format( file_system, pkg)) command = 'show install committed' install_body = execute_show_command(command, module, command_type='cli_show_ascii') if fixed_pkg not in install_body[0]: commands.append('install commit {0}{1}'.format(file_system, pkg)) return commands def main(): argument_spec = dict( pkg=dict(required=True), file_system=dict(required=False, default='bootflash:'), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) pkg = module.params['pkg'] file_system = module.params['file_system'] changed = False remote_exists = remote_file_exists(module, pkg, file_system=file_system) if not remote_exists: module.fail_json(msg="The requested package doesn't exist " "on the device") commands = get_commands(module, pkg, file_system) if not module.check_mode and commands: apply_patch(module, commands) changed = True if 'configure' in commands: commands.pop(0) module.exit_json(changed=changed, pkg=pkg, file_system=file_system, updates=commands) if __name__ == '__main__': main()
paweljasinski/ironpython3
refs/heads/master
Tests/test_index.py
2
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### import sys from iptest.assert_util import * if is_cli or is_silverlight: import System import System.Collections load_iron_python_test() from IronPythonTest import * @skip('win32') def test_string(): x = System.Array.CreateInstance(System.String, 2) x[0]="Hello" x[1]="Python" Assert(x[0] == "Hello") Assert(x[1] == "Python") @skip('win32') def test_hashtable(): hashtables = [System.Collections.Generic.Dictionary[object, object]()] if not is_silverlight: # Hashtable isn't available in Silverlight hashtables.append(System.Collections.Hashtable()) for x in hashtables: x["Hi"] = "Hello" x[1] = "Python" x[10,] = "Tuple Int" x["String",] = "Tuple String" x[2.4,] = "Tuple Double" Assert(x["Hi"] == "Hello") Assert(x[1] == "Python") Assert(x[(10,)] == "Tuple Int") Assert(x[("String",)] == "Tuple String") Assert(x[(2.4,)] == "Tuple Double") success=False try: x[1,2] = 10 except TypeError, e: success=True Assert(success) x[(1,2)] = "Tuple key in hashtable" Assert(x[1,2,] == "Tuple key in hashtable") @skip('win32') def test_multidim_array(): md = System.Array.CreateInstance(System.Int32, 2, 2, 2) for i in range(2): for j in range(2): for k in range(2): md[i,j,k] = i+j+k for i in range(2): for j in range(2): for k in range(2): Assert(md[i,j,k] == i+j+k) @skip('win32') def test_array(): # verify that slicing an array returns an array of the proper type from System import Array data = Array[int]( (2,3,4,5,6) ) AreEqual(type(data[:0]), Array[int]) AreEqual(type(data[0:3:2]), Array[int]) def test_dict(): d = dict() d[1,2,3,4,5] = 12345 Assert(d[1,2,3,4,5] == d[(1,2,3,4,5)]) Assert(d[1,2,3,4,5] == 12345) Assert(d[(1,2,3,4,5)] == 12345) d = {None:23} del d[None] AreEqual(d, {}) @skip('win32') def test_custom_indexable(): i = Indexable() i[10] = "Hello Integer" i["String"] = "Hello String" i[2.4] = "Hello Double" Assert(i[10] == "Hello Integer") Assert(i["String"] == "Hello String") Assert(i[2.4] == "Hello Double") indexes = (10, "String", 2.4) for a in indexes: for b in indexes: complicated = "Complicated " + str(a) + " " + str(b) i[a,b] = complicated Assert(i[a,b] == complicated) @skip('win32') def test_property_access(): x = PropertyAccessClass() for i in range(3): Assert(x[i] == i) for j in range(3): x[i, j] = i + j Assert(x[i, j] == i + j) for k in range(3): x[i, j, k] = i + j + k Assert(x[i, j, k] == i + j + k) @skip('win32') def test_multiple_indexes(): x = MultipleIndexes() def get_value(*i): value = "" append = False for v in i: if append: value = value + " : " value = value + str(v) append = True return value def get_tuple_value(*i): return get_value("Indexing as tuple", *i) def get_none(*i): return None def verify_values(mi, gv, gtv): for i in i_idx: Assert(x[i] == gv(i)) Assert(x[i,] == gtv(i)) for j in j_idx: Assert(x[i,j] == gv(i,j)) Assert(x[i,j,] == gtv(i,j)) for k in k_idx: Assert(x[i,j,k] == gv(i,j,k)) Assert(x[i,j,k,] == gtv(i,j,k)) for l in l_idx: Assert(x[i,j,k,l] == gv(i,j,k,l)) Assert(x[i,j,k,l,] == gtv(i,j,k,l)) for m in m_idx: Assert(x[i,j,k,l,m] == gv(i,j,k,l,m)) Assert(x[i,j,k,l,m,] == gtv(i,j,k,l,m)) i_idx = ("Hi", 2.5, 34) j_idx = (0, "*", "@") k_idx = range(3) l_idx = ("Sun", "Moon", "Star") m_idx = ((9,8,7), (6,5,4,3,2), (4,)) for i in i_idx: x[i] = get_value(i) for j in j_idx: x[i,j] = get_value(i,j) for k in k_idx: x[i,j,k] = get_value(i,j,k) for l in l_idx: x[i,j,k,l] = get_value(i,j,k,l) for m in m_idx: x[i,j,k,l,m] = get_value(i,j,k,l,m) verify_values(x, get_value, get_none) for i in i_idx: x[i,] = get_tuple_value(i) for j in j_idx: x[i,j,] = get_tuple_value(i,j) for k in k_idx: x[i,j,k,] = get_tuple_value(i,j,k) for l in l_idx: x[i,j,k,l,] = get_tuple_value(i,j,k,l) for m in m_idx: x[i,j,k,l,m,] = get_tuple_value(i,j,k,l,m) verify_values(x, get_value, get_tuple_value) @skip('win32') def test_indexable_list(): a = IndexableList() for i in range(5): result = a.Add(i) for i in range(5): AreEqual(a[str(i)], i) @skip('win32') def test_generic_function(): # all should succeed at indexing x = GenMeth.StaticMeth[int, int] x = GenMeth.StaticMeth[int] x = GenMeth.StaticMeth[(int, int)] x = GenMeth.StaticMeth[(int,)] def test_getorsetitem_override(): class old_base: pass for base in [object, list, dict, int, str, tuple, float, long, complex, old_base]: class foo(base): def __getitem__(self, index): return index def __setitem__(self, index, value): self.res = (index, value) a = foo() AreEqual(a[1], 1) AreEqual(a[1,2], (1,2)) AreEqual(a[1,2,3], (1,2,3)) AreEqual(a[(1, 2)], (1, 2)) AreEqual(a[(5,)], (5,)) AreEqual(a[6,], (6,)) a[1] = 23 AreEqual(a.res, (1,23)) a[1,2] = 23 AreEqual(a.res, ((1,2),23)) a[1,2,3] = 23 AreEqual(a.res, ((1,2,3),23)) a[(1, 2)] = "B"; AreEqual(a.res, ((1, 2), "B")) a[(5,)] = "D"; AreEqual(a.res, ((5,), "D")) a[6,] = "E"; AreEqual(a.res, ((6,), "E")) def test_getorsetitem_super(): tests = [ # base type, constructor arg, result of index 0 (list,(1,2,3,4,5), 1), (dict,{0:2, 3:4, 5:6, 7:8}, 2), (str,'abcde', 'a'), (tuple, (1,2,3,4,5), 1),] for testInfo in tests: base = testInfo[0] arg = testInfo[1] zero = testInfo[2] class foo(base): def __getitem__(self, index): if isinstance(index, tuple): return base.__getitem__(self, index[0]) return base.__getitem__(self, index) def __setitem__(self, index, value): if isinstance(index, tuple): base.__setitem__(self, index[0], value) else: base.__setitem__(self, index, value) a = foo(arg) AreEqual(a[0], zero) a = foo(arg) AreEqual(a[0,1], zero) a = foo(arg) AreEqual(a[0,1,2], zero) a = foo(arg) AreEqual(a[(0,)], zero) a = foo(arg) AreEqual(a[(0,1)], zero) a = foo(arg) AreEqual(a[(0,1,2)], zero) if hasattr(base, '__setitem__'): a[0] = 'x' AreEqual(a[0], 'x') a[0,1] = 'y' AreEqual(a[0,1], 'y') a[0,1,2] = 'z' AreEqual(a[0,1,2], 'z') a[(0,)] = 'x' AreEqual(a[(0,)], 'x') a[(0,1)] = 'y' AreEqual(a[(0,1)], 'y') a[(0,1,2)] = 'z' AreEqual(a[(0,1,2)], 'z') def test_getorsetitem_slice(): tests = [ # base type, constructor arg, result of index 0 (list,(1,2,3,4,5), 1, lambda x: [x]), (str,'abcde', 'a', lambda x: x), (tuple, (1,2,3,4,5), 1, lambda x: (x,)),] for testInfo in tests: base = testInfo[0] arg = testInfo[1] zero = testInfo[2] resL = testInfo[3] class foo(base): def __getitem__(self, index): if isinstance(index, tuple): return base.__getitem__(self, index[0]) return base.__getitem__(self, index) def __setitem__(self, index, value): if isinstance(index, tuple): base.__setitem__(self, index[0], value) else: base.__setitem__(self, index, value) a = foo(arg) AreEqual(a[0:1], resL(zero)) a = foo(arg) AreEqual(a[0:1, 1:2], resL(zero)) a = foo(arg) AreEqual(a[0:1, 1:2, 2:3], resL(zero)) a = foo(arg) AreEqual(a[(slice(0,1),)], resL(zero)) a = foo(arg) AreEqual(a[(slice(0,1),slice(1,2))], resL(zero)) a = foo(arg) AreEqual(a[(slice(0,1),slice(1,2),slice(2,3))], resL(zero)) if hasattr(base, '__setitem__'): a[0:1] = 'x' AreEqual(a[0:1], ['x']) a[0:1,1:2] = 'y' AreEqual(a[0:1,1:2], ['y']) a[0:1,1:2,2:3] = 'z' AreEqual(a[0:1,1:2,2:3], ['z']) a[(slice(0,1),)] = 'x' AreEqual(a[(slice(0,1),)], ['x']) a[(slice(0,1),slice(1,2))] = 'y' AreEqual(a[(slice(0,1),slice(1,2))], ['y']) a[(slice(0,1),slice(1,2),slice(2,3))] = 'z' AreEqual(a[(slice(0,1),slice(1,2),slice(2,3))], ['z']) def test_index_by_tuple(): class indexable: def __getitem__(self, index): return index def __setitem__(self, index, value): self.index = index self.value = value i = indexable() AreEqual(i["Hi"], "Hi") AreEqual(i[(1, 2)], (1, 2)) AreEqual(i[3, 4], (3, 4)) AreEqual(i[(5,)], (5,)) AreEqual(i[6,], (6,)) i["Hi"] = "A"; AreEqual(i.index, "Hi"); AreEqual(i.value, "A") i[(1, 2)] = "B"; AreEqual(i.index, (1, 2)); AreEqual(i.value, "B") i[3, 4] = "C"; AreEqual(i.index, (3, 4)); AreEqual(i.value, "C") i[(5,)] = "D"; AreEqual(i.index, (5,)); AreEqual(i.value, "D") i[6,] = "E"; AreEqual(i.index, (6,)); AreEqual(i.value, "E") def test_assignment_order(): # declare types to log the execution ordering class Q: def __init__(self): self.log = "" # we're just doing assignment, so don't define a __getitem__ def __setitem__(self, idx, val): self.log += "(idx=%s, val=%s)" % ( idx, val) c=Q() def f(): c[0]=1 # do a side effect to log execution order of f() return 'x' # Now execute the interesting statement. This has side-effects in c.log to log execution order. c[5]=c[2]=f() # now check that order is as expected # - assignments should occur from left to right # - rhs expression is evalled first, and should only be executed once, AreEqual(c.log, "(idx=0, val=1)(idx=5, val=x)(idx=2, val=x)") def test_custom_indexer(): class cust_index(object): def __init__(self, index): self.index = index def __index__(self): return self.index for sliceable in [x(range(5)) for x in (list, tuple)]: AreEqual(sliceable[cust_index(0L)], 0) AreEqual(sliceable[cust_index(0)], 0) AreEqual(list(sliceable[cust_index(0L) : cust_index(3L)]), [0, 1, 2]) AreEqual(list(sliceable[cust_index(0) : cust_index(3)]), [0, 1, 2]) # dictionary indexing shouldn't be affected x = cust_index(42) d = {x:3} AreEqual(d[x], 3) for key in d.keys(): AreEqual(key, x) if is_cli: from System.Collections.Generic import List List[int] @skip('win32') def test_csharp_enumeration(): a = CSharpEnumerable() for method in ('GetEnumerableOfInt', 'GetEnumerableOfObject', 'GetEnumerable', 'GetEnumeratorOfInt', 'GetEnumeratorOfObject', 'GetEnumerator'): sum = 0 for i in getattr(a, method)(): sum = sum + i AreEqual(sum, 6) def test_error(): l = [] if is_cpython: #http://ironpython.codeplex.com/workitem/28220 AssertErrorWithPartialMessage(TypeError, "'builtin_function_or_method' object is not subscriptable", lambda: l.append[float](1.0)) AssertErrorWithPartialMessage(TypeError, "'int' object is not subscriptable", lambda: 1[2]) else: AssertErrorWithPartialMessage(TypeError, "'builtin_function_or_method' object is unsubscriptable", lambda: l.append[float](1.0)) AssertErrorWithPartialMessage(TypeError, "'int' object is unsubscriptable", lambda: 1[2]) def test_cp19350_index_restrictions(): global keyValue class X(object): def __setitem__(self, key, value): global keyValue keyValue = key def f(a, b): X()[a, b] = object() f(1, 2) AreEqual(keyValue, (1, 2)) f('one', 'two') AreEqual(keyValue, ('one', 'two')) run_test(__name__)
alexsmx/djangoAppengineSrcTemplate
refs/heads/master
django/contrib/databrowse/views.py
529
from django.http import Http404 from django.shortcuts import render_to_response ########### # CHOICES # ########### def choice_list(request, app_label, module_name, field_name, models): m, f = lookup_field(app_label, module_name, field_name, models) return render_to_response('databrowse/choice_list.html', {'model': m, 'field': f}) def choice_detail(request, app_label, module_name, field_name, field_val, models): m, f = lookup_field(app_label, module_name, field_name, models) try: label = dict(f.field.choices)[field_val] except KeyError: raise Http404('Invalid choice value given') obj_list = m.objects(**{f.field.name: field_val}) return render_to_response('databrowse/choice_detail.html', {'model': m, 'field': f, 'value': label, 'object_list': obj_list})
textbook/flash_services
refs/heads/master
tests/test_core_service.py
1
from collections import OrderedDict from datetime import datetime, timezone, timedelta import pytest from flash_services.core import Service class Fake(Service): ROOT = 'root/url' def __init__(self, *, foo, bar): super().__init__() def update(self): pass def format_data(self, data): pass @pytest.mark.parametrize('config', [ {}, {'foo': None}, {'bar': None}, ]) def test_required_config(config): with pytest.raises(TypeError) as excinfo: Fake.from_config(**config) message = excinfo.value.args[0] assert 'missing required config keys' in message assert 'from Fake' in message @pytest.mark.parametrize('args, kwargs, expected', [ (('/endpoint',), {}, 'root/url/endpoint'), (('/endpoint/{foo}',), {'params': {'foo': 'bar'}}, 'root/url/endpoint/bar'), (('/endpoint',), {'url_params': {'foo': 'bar'}}, 'root/url/endpoint?foo=bar'), ( ('/endpoint',), {'url_params': OrderedDict([('foo', 'bar'), ('bar', 'baz')])}, 'root/url/endpoint?foo=bar&bar=baz', ), ( ('/endpoint/{hello}',), { 'params': {'hello': 'world'}, 'url_params': OrderedDict([('foo', 'bar'), ('bar', 'baz')]), }, 'root/url/endpoint/world?foo=bar&bar=baz', ), ]) def test_url_builder(args, kwargs, expected): assert Fake(foo=None, bar=None).url_builder(*args, **kwargs) == expected def test_calculate_timeout_delta_seconds(): assert Service.calculate_timeout('120') == 120 def test_calculate_timeout_http_date(): three_minutes_later = datetime.now(tz=timezone.utc) + timedelta(minutes=3) http_date = '%a, %d %b %Y %H:%M:%S %Z' assert 179 <= Service.calculate_timeout( three_minutes_later.strftime(http_date), ) <= 181 def test_abstract_methods_required(): class Broken(Service): """Missing abstract methods.""" with pytest.raises(TypeError): Broken()
romain-dartigues/ansible
refs/heads/devel
lib/ansible/modules/network/nxos/nxos_vxlan_vtep.py
25
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_vxlan_vtep extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages VXLAN Network Virtualization Endpoint (NVE). description: - Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface that terminates VXLAN tunnels. author: Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - The module is used to manage NVE properties, not to create NVE interfaces. Use M(nxos_interface) if you wish to do so. - C(state=absent) removes the interface. - Default, where supported, restores params default value. options: interface: description: - Interface name for the VXLAN Network Virtualization Endpoint. required: true description: description: - Description of the NVE interface. host_reachability: description: - Specify mechanism for host reachability advertisement. type: bool shutdown: description: - Administratively shutdown the NVE interface. type: bool source_interface: description: - Specify the loopback interface whose IP address should be used for the NVE interface. source_interface_hold_down_time: description: - Suppresses advertisement of the NVE loopback address until the overlay has converged. global_mcast_group_L3: description: - Global multicast ip prefix for L3 VNIs or the keyword 'default' This is available on NX-OS 9K series running 9.2.x or higher. version_added: "2.8" global_mcast_group_L2: description: - Global multicast ip prefix for L2 VNIs or the keyword 'default' This is available on NX-OS 9K series running 9.2.x or higher. version_added: "2.8" global_suppress_arp: description: - Enables ARP suppression for all VNIs This is available on NX-OS 9K series running 9.2.x or higher. type: bool version_added: "2.8" global_ingress_replication_bgp: description: - Configures ingress replication protocol as bgp for all VNIs This is available on NX-OS 9K series running 9.2.x or higher. type: bool version_added: "2.8" state: description: - Determines whether the config should be present or not on the device. default: present choices: ['present','absent'] ''' EXAMPLES = ''' - nxos_vxlan_vtep: interface: nve1 description: default host_reachability: default source_interface: Loopback0 source_interface_hold_down_time: 30 shutdown: default ''' RETURN = ''' commands: description: commands sent to the device returned: always type: list sample: ["interface nve1", "source-interface loopback0", "source-interface hold-down-time 30", "description simple description", "shutdown", "host-reachability protocol bgp"] ''' import re from ansible.module_utils.network.nxos.nxos import get_config, load_config from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import CustomNetworkConfig BOOL_PARAMS = [ 'shutdown', 'host_reachability', 'global_ingress_replication_bgp', 'global_suppress_arp', ] PARAM_TO_COMMAND_KEYMAP = { 'description': 'description', 'global_suppress_arp': 'global suppress-arp', 'global_ingress_replication_bgp': 'global ingress-replication protocol bgp', 'global_mcast_group_L3': 'global mcast-group L3', 'global_mcast_group_L2': 'global mcast-group L2', 'host_reachability': 'host-reachability protocol bgp', 'interface': 'interface', 'shutdown': 'shutdown', 'source_interface': 'source-interface', 'source_interface_hold_down_time': 'source-interface hold-down-time' } PARAM_TO_DEFAULT_KEYMAP = { 'description': False, 'shutdown': True, 'source_interface_hold_down_time': '180', } def get_value(arg, config, module): if arg in BOOL_PARAMS: REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M) value = False if arg == 'shutdown': try: if NO_SHUT_REGEX.search(config): value = False elif REGEX.search(config): value = True except TypeError: value = False else: try: if REGEX.search(config): value = True except TypeError: value = False else: REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M) SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P<value>\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) value = '' if arg == 'description': if NO_DESC_REGEX.search(config): value = False elif PARAM_TO_COMMAND_KEYMAP[arg] in config: value = REGEX.search(config).group('value').strip() elif arg == 'source_interface': for line in config.splitlines(): try: if PARAM_TO_COMMAND_KEYMAP[arg] in config: value = SOURCE_INTF_REGEX.search(config).group('value').strip() break except AttributeError: value = '' elif arg == 'global_mcast_group_L2': for line in config.splitlines(): try: if 'global mcast-group' in line and 'L2' in line: value = line.split()[2].strip() break except AttributeError: value = '' elif arg == 'global_mcast_group_L3': for line in config.splitlines(): try: if 'global mcast-group' in line and 'L3' in line: value = line.split()[2].strip() break except AttributeError: value = '' else: if PARAM_TO_COMMAND_KEYMAP[arg] in config: value = REGEX.search(config).group('value').strip() return value def get_existing(module, args): existing = {} netcfg = CustomNetworkConfig(indent=2, contents=get_config(module, flags=['all'])) interface_string = 'interface {0}'.format(module.params['interface'].lower()) parents = [interface_string] config = netcfg.get_section(parents) if config: for arg in args: existing[arg] = get_value(arg, config, module) existing['interface'] = module.params['interface'].lower() else: if interface_string in str(netcfg): existing['interface'] = module.params['interface'].lower() for arg in args: existing[arg] = '' return existing def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def fix_commands(commands, module): source_interface_command = '' no_source_interface_command = '' no_host_reachability_command = '' host_reachability_command = '' for command in commands: if 'no source-interface hold-down-time' in command: pass elif 'source-interface hold-down-time' in command: pass elif 'no source-interface' in command: no_source_interface_command = command elif 'source-interface' in command: source_interface_command = command elif 'no host-reachability' in command: no_host_reachability_command = command elif 'host-reachability' in command: host_reachability_command = command if host_reachability_command: commands.pop(commands.index(host_reachability_command)) commands.insert(0, host_reachability_command) if source_interface_command: commands.pop(commands.index(source_interface_command)) commands.insert(0, source_interface_command) if no_host_reachability_command: commands.pop(commands.index(no_host_reachability_command)) commands.append(no_host_reachability_command) if no_source_interface_command: commands.pop(commands.index(no_source_interface_command)) commands.append(no_source_interface_command) commands.insert(0, 'terminal dont-ask') return commands def state_present(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, value in proposed_commands.items(): if value is True: commands.append(key) elif value is False: commands.append('no {0}'.format(key)) elif value == 'default': if existing_commands.get(key): existing_value = existing_commands.get(key) if 'global mcast-group' in key: commands.append('no {0}'.format(key)) else: commands.append('no {0} {1}'.format(key, existing_value)) else: if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS: commands.append('no {0}'.format(key.lower())) module.exit_json(commands=commands) else: if 'L2' in key: commands.append('global mcast-group ' + value + ' L2') elif 'L3' in key: commands.append('global mcast-group ' + value + ' L3') else: command = '{0} {1}'.format(key, value.lower()) commands.append(command) if commands: commands = fix_commands(commands, module) parents = ['interface {0}'.format(module.params['interface'].lower())] candidate.add(commands, parents=parents) else: if not existing and module.params['interface']: commands = ['interface {0}'.format(module.params['interface'].lower())] candidate.add(commands, parents=[]) def state_absent(module, existing, proposed, candidate): commands = ['no interface {0}'.format(module.params['interface'].lower())] candidate.add(commands, parents=[]) def main(): argument_spec = dict( interface=dict(required=True, type='str'), description=dict(required=False, type='str'), host_reachability=dict(required=False, type='bool'), global_ingress_replication_bgp=dict(required=False, type='bool'), global_suppress_arp=dict(required=False, type='bool'), global_mcast_group_L2=dict(required=False, type='str'), global_mcast_group_L3=dict(required=False, type='str'), shutdown=dict(required=False, type='bool'), source_interface=dict(required=False, type='str'), source_interface_hold_down_time=dict(required=False, type='str'), state=dict(choices=['present', 'absent'], default='present', required=False), ) argument_spec.update(nxos_argument_spec) mutually_exclusive = [('global_ingress_replication_bgp', 'global_mcast_group_L2')] module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True, ) warnings = list() result = {'changed': False, 'commands': [], 'warnings': warnings} check_args(module, warnings) state = module.params['state'] args = PARAM_TO_COMMAND_KEYMAP.keys() existing = get_existing(module, args) proposed_args = dict((k, v) for k, v in module.params.items() if v is not None and k in args) proposed = {} for key, value in proposed_args.items(): if key != 'interface': if str(value).lower() == 'default': value = PARAM_TO_DEFAULT_KEYMAP.get(key) if value is None: if key in BOOL_PARAMS: value = False else: value = 'default' if str(existing.get(key)).lower() != str(value).lower(): proposed[key] = value candidate = CustomNetworkConfig(indent=3) if state == 'present': if not existing: warnings.append("The proposed NVE interface did not exist. " "It's recommended to use nxos_interface to create " "all logical interfaces.") state_present(module, existing, proposed, candidate) elif state == 'absent' and existing: state_absent(module, existing, proposed, candidate) if candidate: candidate = candidate.items_text() result['commands'] = candidate result['changed'] = True load_config(module, candidate) module.exit_json(**result) if __name__ == '__main__': main()
firstjob/python-social-auth
refs/heads/master
social/apps/django_app/default/migrations/0003_alter_email_max_length.py
63
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('default', '0002_add_related_name'), ] operations = [ migrations.AlterField( model_name='code', name='email', field=models.EmailField(max_length=254), ), ]
aboganas/frappe
refs/heads/develop
frappe/core/doctype/scheduler_log/scheduler_log.py
18
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class SchedulerLog(Document): def onload(self): if not self.seen: self.seen = 1 self.save() def set_old_logs_as_seen(): frappe.db.sql("""update `tabScheduler Log` set seen=1 where seen=0 and datediff(curdate(), creation) > 7""")
tobinjt/Flexget
refs/heads/develop
flexget/plugins/cli/win32_service.py
4
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin import argparse import logging import os import socket import sys import flexget from flexget import options from flexget.event import event from flexget.terminal import console log = logging.getLogger('win32_service') try: import servicemanager import win32event import win32service import win32serviceutil class AppServerSvc(win32serviceutil.ServiceFramework): _svc_name_ = 'FlexGet' _svc_display_name_ = 'FlexGet Daemon' _svc_description_ = 'Runs FlexGet tasks according to defined schedules' def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) socket.setdefaulttimeout(60) self.manager = None def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) from flexget.manager import manager manager.shutdown(finish_queue=False) self.ReportServiceStatus(win32service.SERVICE_STOPPED) def SvcDoRun(self): servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''), ) flexget.main(['daemon', 'start']) except ImportError: pass def do_cli(manager, options): import win32file import win32serviceutil if hasattr(sys, 'real_prefix'): # We are in a virtualenv, there is some special setup if not os.path.exists(os.path.join(sys.prefix, 'python.exe')): console('Creating a hard link to virtualenv python.exe in root of virtualenv') win32file.CreateHardLink( os.path.join(sys.prefix, 'python.exe'), os.path.join(sys.prefix, 'Scripts', 'python.exe'), ) argv = options.args if options.help: argv = [] # Hack sys.argv a bit so that we get a better usage message sys.argv[0] = 'flexget service' win32serviceutil.HandleCommandLine(AppServerSvc, argv=['flexget service'] + argv) @event('options.register') def register_parser_arguments(): if not sys.platform.startswith('win'): return # Still not fully working. Hidden for now. parser = options.register_command( 'service', do_cli, # help='set up or control a windows service for the daemon', add_help=False, ) parser.add_argument('--help', '-h', action='store_true') parser.add_argument('args', nargs=argparse.REMAINDER)
lukacu/manus
refs/heads/master
python/manus_starter/__init__.py
1
#!/usr/bin/env python2 import os import sys import time import struct import signal import shutil from subprocess import call from ignition import ProgramGroup from manus_starter.privileged import run_upgrade, run_shutdown try: from manus import VERSION except ImportError: VERSION = 'N/A' LOCAL_CAMERA = os.getenv('MANUS_CAMERA', '/dev/video0') LOCAL_MANIPULATOR = os.getenv('MANUS_MANIPULATOR', '/dev/i2c-1') LOCAL_INTERFACE = os.getenv('MANUS_INTERFACE', 'eth0') kill_now = False def exit_gracefully(signum, frame): global kill_now kill_now = True print "Stopping gracefully" def get_ip_address(ifname): try: import socket import fcntl s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) except IOError: return "unknown" class Menu(object): import curses import curses.panel def __init__(self, items): self.position = 0 self.items = items def navigate(self, n): self.position += n if self.position < 0: self.position = 0 elif self.position >= len(self.items): self.position = len(self.items)-1 def display(self, stdscreen): global kill_now self.curses.curs_set(0) self.screen = stdscreen self.window = stdscreen.subwin(0, 0) self.window.keypad(1) self.panel = self.curses.panel.new_panel(self.window) self.panel.hide() self.curses.panel.update_panels() self.panel.top() self.panel.show() self.window.clear() self.window.timeout(1000) self.update_footer() while not kill_now: try: self.window.refresh() self.curses.doupdate() for index, item in enumerate(self.items): if index == self.position: mode = self.curses.A_REVERSE else: mode = self.curses.A_NORMAL msg = '%d. %s' % (index+1, item[0]) self.window.addstr(3+index, 3, msg, mode) key = self.window.getch() if key in [self.curses.KEY_ENTER, ord('\n')]: if self.items[self.position][1] is None: break else: self.no_curses(self.items[self.position][1]) if key == self.curses.KEY_UP: self.navigate(-1) elif key == self.curses.KEY_DOWN: self.navigate(1) elif key == -1: self.update_footer() except KeyboardInterrupt: pass self.window.clear() self.panel.hide() self.curses.panel.update_panels() self.curses.doupdate() def update_footer(self): s = self.window.getmaxyx() has_camera, has_manipulator = scan_resources() datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) metadata = "%s Manus %s" % (datetime, VERSION) camera = "ON" if has_camera else "OFF" local_address = get_ip_address(LOCAL_INTERFACE) devices = "%s Camera %s" % (local_address, camera) self.window.move(s[0]-3, 0) self.window.clrtoeol() self.window.addstr(s[0]-3, 3, devices, self.curses.A_REVERSE) self.window.addstr(s[0]-3, s[1] - 3 - len(metadata), metadata, self.curses.A_REVERSE) def no_curses(self, fun, **kwargs): self.screen.clear() self.curses.nocbreak() self.screen.keypad(0) self.screen.move(0,0) self.curses.echo() self.curses.endwin() try: fun(**kwargs) finally: self.curses.noecho() self.curses.cbreak() self.screen.keypad(1) def run_script(script, shutdown=None): try: if isinstance(script, ProgramGroup): group = script print "Running %s" % group.description else: print "Running %s" % script group = ProgramGroup(script) group.announce("Starting up ...") group.start() time.sleep(1) except ValueError, e: print "Error opening spark file %s" % e return False stop_requested = False try: while group.valid(): time.sleep(1) if not shutdown is None and shutdown(): stop_requested = True break except KeyboardInterrupt: stop_requested = True group.announce("Shutting down ...") group.stop() return not stop_requested def scan_resources(): has_camera = os.path.exists(LOCAL_CAMERA) has_manipulator = os.path.exists(LOCAL_MANIPULATOR) return has_camera, has_manipulator def run_interactive(launchfiles): import curses signal.signal(signal.SIGTERM, exit_gracefully) menu_items = [] def wrap_run(group): return lambda: run_script(group) for file in os.listdir(launchfiles): if not file.endswith(".spark"): continue try: group = ProgramGroup(os.path.join(launchfiles, file)) menu_items.append((group.description, wrap_run(group))) finally: pass def run_terminal(): call(["sudo", "-u", "manus", "bash"]) return False def run_upgrade_reload(): run_upgrade() os.execv(sys.executable, [sys.executable, sys.argv[0]]) return True menu_items.append(('Upgrade system', run_upgrade_reload)) menu_items.append(('Exit to terminal', run_terminal)) menu_items.append(('Shutdown', run_shutdown)) menu = Menu(menu_items) curses.wrapper(lambda scr: menu.display(scr)) def run_service(launchfile): signal.signal(signal.SIGTERM, exit_gracefully) def resources_available(): camera, manipulator = scan_resources() print camera, manipulator return camera and manipulator try: while not kill_now: if resources_available(): if not run_script(launchfile, shutdown=lambda: kill_now): break else: time.sleep(3) except KeyboardInterrupt: return
pasiegel/SickGear
refs/heads/master
lib/unidecode/x003.py
246
data = ( '', # 0x00 '', # 0x01 '', # 0x02 '', # 0x03 '', # 0x04 '', # 0x05 '', # 0x06 '', # 0x07 '', # 0x08 '', # 0x09 '', # 0x0a '', # 0x0b '', # 0x0c '', # 0x0d '', # 0x0e '', # 0x0f '', # 0x10 '', # 0x11 '', # 0x12 '', # 0x13 '', # 0x14 '', # 0x15 '', # 0x16 '', # 0x17 '', # 0x18 '', # 0x19 '', # 0x1a '', # 0x1b '', # 0x1c '', # 0x1d '', # 0x1e '', # 0x1f '', # 0x20 '', # 0x21 '', # 0x22 '', # 0x23 '', # 0x24 '', # 0x25 '', # 0x26 '', # 0x27 '', # 0x28 '', # 0x29 '', # 0x2a '', # 0x2b '', # 0x2c '', # 0x2d '', # 0x2e '', # 0x2f '', # 0x30 '', # 0x31 '', # 0x32 '', # 0x33 '', # 0x34 '', # 0x35 '', # 0x36 '', # 0x37 '', # 0x38 '', # 0x39 '', # 0x3a '', # 0x3b '', # 0x3c '', # 0x3d '', # 0x3e '', # 0x3f '', # 0x40 '', # 0x41 '', # 0x42 '', # 0x43 '', # 0x44 '', # 0x45 '', # 0x46 '', # 0x47 '', # 0x48 '', # 0x49 '', # 0x4a '', # 0x4b '', # 0x4c '', # 0x4d '', # 0x4e '[?]', # 0x4f '[?]', # 0x50 '[?]', # 0x51 '[?]', # 0x52 '[?]', # 0x53 '[?]', # 0x54 '[?]', # 0x55 '[?]', # 0x56 '[?]', # 0x57 '[?]', # 0x58 '[?]', # 0x59 '[?]', # 0x5a '[?]', # 0x5b '[?]', # 0x5c '[?]', # 0x5d '[?]', # 0x5e '[?]', # 0x5f '', # 0x60 '', # 0x61 '', # 0x62 'a', # 0x63 'e', # 0x64 'i', # 0x65 'o', # 0x66 'u', # 0x67 'c', # 0x68 'd', # 0x69 'h', # 0x6a 'm', # 0x6b 'r', # 0x6c 't', # 0x6d 'v', # 0x6e 'x', # 0x6f '[?]', # 0x70 '[?]', # 0x71 '[?]', # 0x72 '[?]', # 0x73 '\'', # 0x74 ',', # 0x75 '[?]', # 0x76 '[?]', # 0x77 '[?]', # 0x78 '[?]', # 0x79 '', # 0x7a '[?]', # 0x7b '[?]', # 0x7c '[?]', # 0x7d '?', # 0x7e '[?]', # 0x7f '[?]', # 0x80 '[?]', # 0x81 '[?]', # 0x82 '[?]', # 0x83 '', # 0x84 '', # 0x85 'A', # 0x86 ';', # 0x87 'E', # 0x88 'E', # 0x89 'I', # 0x8a '[?]', # 0x8b 'O', # 0x8c '[?]', # 0x8d 'U', # 0x8e 'O', # 0x8f 'I', # 0x90 'A', # 0x91 'B', # 0x92 'G', # 0x93 'D', # 0x94 'E', # 0x95 'Z', # 0x96 'E', # 0x97 'Th', # 0x98 'I', # 0x99 'K', # 0x9a 'L', # 0x9b 'M', # 0x9c 'N', # 0x9d 'Ks', # 0x9e 'O', # 0x9f 'P', # 0xa0 'R', # 0xa1 '[?]', # 0xa2 'S', # 0xa3 'T', # 0xa4 'U', # 0xa5 'Ph', # 0xa6 'Kh', # 0xa7 'Ps', # 0xa8 'O', # 0xa9 'I', # 0xaa 'U', # 0xab 'a', # 0xac 'e', # 0xad 'e', # 0xae 'i', # 0xaf 'u', # 0xb0 'a', # 0xb1 'b', # 0xb2 'g', # 0xb3 'd', # 0xb4 'e', # 0xb5 'z', # 0xb6 'e', # 0xb7 'th', # 0xb8 'i', # 0xb9 'k', # 0xba 'l', # 0xbb 'm', # 0xbc 'n', # 0xbd 'x', # 0xbe 'o', # 0xbf 'p', # 0xc0 'r', # 0xc1 's', # 0xc2 's', # 0xc3 't', # 0xc4 'u', # 0xc5 'ph', # 0xc6 'kh', # 0xc7 'ps', # 0xc8 'o', # 0xc9 'i', # 0xca 'u', # 0xcb 'o', # 0xcc 'u', # 0xcd 'o', # 0xce '[?]', # 0xcf 'b', # 0xd0 'th', # 0xd1 'U', # 0xd2 'U', # 0xd3 'U', # 0xd4 'ph', # 0xd5 'p', # 0xd6 '&', # 0xd7 '[?]', # 0xd8 '[?]', # 0xd9 'St', # 0xda 'st', # 0xdb 'W', # 0xdc 'w', # 0xdd 'Q', # 0xde 'q', # 0xdf 'Sp', # 0xe0 'sp', # 0xe1 'Sh', # 0xe2 'sh', # 0xe3 'F', # 0xe4 'f', # 0xe5 'Kh', # 0xe6 'kh', # 0xe7 'H', # 0xe8 'h', # 0xe9 'G', # 0xea 'g', # 0xeb 'CH', # 0xec 'ch', # 0xed 'Ti', # 0xee 'ti', # 0xef 'k', # 0xf0 'r', # 0xf1 'c', # 0xf2 'j', # 0xf3 '[?]', # 0xf4 '[?]', # 0xf5 '[?]', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa '[?]', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
daavery/audacity
refs/heads/master
lib-src/lv2/lv2/plugins/eg-amp.lv2/waflib/Runner.py
330
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import random,atexit try: from queue import Queue except ImportError: from Queue import Queue from waflib import Utils,Task,Errors,Logs GAP=10 class TaskConsumer(Utils.threading.Thread): def __init__(self): Utils.threading.Thread.__init__(self) self.ready=Queue() self.setDaemon(1) self.start() def run(self): try: self.loop() except Exception: pass def loop(self): while 1: tsk=self.ready.get() if not isinstance(tsk,Task.TaskBase): tsk(self) else: tsk.process() pool=Queue() def get_pool(): try: return pool.get(False) except Exception: return TaskConsumer() def put_pool(x): pool.put(x) def _free_resources(): global pool lst=[] while pool.qsize(): lst.append(pool.get()) for x in lst: x.ready.put(None) for x in lst: x.join() pool=None atexit.register(_free_resources) class Parallel(object): def __init__(self,bld,j=2): self.numjobs=j self.bld=bld self.outstanding=[] self.frozen=[] self.out=Queue(0) self.count=0 self.processed=1 self.stop=False self.error=[] self.biter=None self.dirty=False def get_next_task(self): if not self.outstanding: return None return self.outstanding.pop(0) def postpone(self,tsk): if random.randint(0,1): self.frozen.insert(0,tsk) else: self.frozen.append(tsk) def refill_task_list(self): while self.count>self.numjobs*GAP: self.get_out() while not self.outstanding: if self.count: self.get_out() elif self.frozen: try: cond=self.deadlock==self.processed except AttributeError: pass else: if cond: msg='check the build order for the tasks' for tsk in self.frozen: if not tsk.run_after: msg='check the methods runnable_status' break lst=[] for tsk in self.frozen: lst.append('%s\t-> %r'%(repr(tsk),[id(x)for x in tsk.run_after])) raise Errors.WafError('Deadlock detected: %s%s'%(msg,''.join(lst))) self.deadlock=self.processed if self.frozen: self.outstanding+=self.frozen self.frozen=[] elif not self.count: self.outstanding.extend(self.biter.next()) self.total=self.bld.total() break def add_more_tasks(self,tsk): if getattr(tsk,'more_tasks',None): self.outstanding+=tsk.more_tasks self.total+=len(tsk.more_tasks) def get_out(self): tsk=self.out.get() if not self.stop: self.add_more_tasks(tsk) self.count-=1 self.dirty=True return tsk def error_handler(self,tsk): if not self.bld.keep: self.stop=True self.error.append(tsk) def add_task(self,tsk): try: self.pool except AttributeError: self.init_task_pool() self.ready.put(tsk) def init_task_pool(self): pool=self.pool=[get_pool()for i in range(self.numjobs)] self.ready=Queue(0) def setq(consumer): consumer.ready=self.ready for x in pool: x.ready.put(setq) return pool def free_task_pool(self): def setq(consumer): consumer.ready=Queue(0) self.out.put(self) try: pool=self.pool except AttributeError: pass else: for x in pool: self.ready.put(setq) for x in pool: self.get_out() for x in pool: put_pool(x) self.pool=[] def start(self): self.total=self.bld.total() while not self.stop: self.refill_task_list() tsk=self.get_next_task() if not tsk: if self.count: continue else: break if tsk.hasrun: self.processed+=1 continue if self.stop: break try: st=tsk.runnable_status() except Exception: self.processed+=1 tsk.err_msg=Utils.ex_stack() if not self.stop and self.bld.keep: tsk.hasrun=Task.SKIPPED if self.bld.keep==1: if Logs.verbose>1 or not self.error: self.error.append(tsk) self.stop=True else: if Logs.verbose>1: self.error.append(tsk) continue tsk.hasrun=Task.EXCEPTION self.error_handler(tsk) continue if st==Task.ASK_LATER: self.postpone(tsk) elif st==Task.SKIP_ME: self.processed+=1 tsk.hasrun=Task.SKIPPED self.add_more_tasks(tsk) else: tsk.position=(self.processed,self.total) self.count+=1 tsk.master=self self.processed+=1 if self.numjobs==1: tsk.process() else: self.add_task(tsk) while self.error and self.count: self.get_out() assert(self.count==0 or self.stop) self.free_task_pool()
beauseant/dogalive
refs/heads/master
lib/sqlite.py
1
#!/usr/bin/python # -*- coding: utf-8 -*- import sqlite3 as lite import logging import argparse import datetime class SQLite: _name = 'sqlite.db' _con = None _dir = '' def __init__ ( self, direct ): self._dir = direct try: self._con = lite.connect( self._name ) except lite.Error, e: print "Error %s:" % e.args[0] sys.exit(1) def __del__ ( self ): if self._con: self._con.close() def crearTablaHosts ( self ): with self._con: #Creando tabla con los distintos hosts cur = self._con.cursor() cur.execute("DROP TABLE IF EXISTS hosts") sql_sen = '''CREATE TABLE hosts ( id_host INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, ip VARCHAR(15) UNIQUE NOT NULL, name VARCHAR(30))''' cur.execute(sql_sen) self._con.commit() def crearTablaFechas ( self ): with self._con: #Creando tabla de fechas cur = self._con.cursor() cur.execute("DROP TABLE IF EXISTS dates") sql_sen = '''CREATE TABLE dates ( id_date INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, date DATE NOT NULL)''' cur.execute(sql_sen) self._con.commit() def crearTablaEscaneo ( self ): with self._con: #Creando tabla con la fecha de realizacion de los distintos pings a los hosts #Status 0=No responde 1=Activo cur = self._con.cursor() cur.execute("DROP TABLE IF EXISTS scan") sql_sen = '''CREATE TABLE scan ( id INTEGER NOT NULL, date INTEGER NOT NULL, status INT(1) NOT NULL, PRIMARY KEY (id,date), FOREIGN KEY (id) REFERENCES hosts(id_host), FOREIGN KEY (date) REFERENCES dates(id_date) )''' cur.execute(sql_sen) self._con.commit() def crearTablas ( self, db ): db.crearTablaHosts() db.crearTablaFechas() db.crearTablaEscaneo() def insertarHosts( self, items ): with self._con: cur = self._con.cursor() for item in items: if ( cur.execute ("SELECT name FROM hosts WHERE name == 'loro'") ): cur.executemany ("INSERT INTO hosts VALUES (null,?,?)", item[0]) self._con.commit() def insertarFecha ( self ): date = datetime.today() with self._con: cur = self._con.cursor() cur.execute ("INSERT INTO dates VALUES (?)", date) self._con.commit() def insertarEscaneo ( self, id_host, id_date, status ): with self._con: cur = self._con.cursor() cur.execute ("INSERT INTO scan VALUES (?,?,?)", id_host, id_date, status) self._con.commit() if __name__ == "__main__": parser = argparse.ArgumentParser ( description= '' ) parser.add_argument('--d', action="store_true", help='imprimir informacion de debug') args = parser.parse_args() if args.d: logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) db = SQLite ('SQLitePrueba/') hosts = {"charran":"192.168.149.19","loro":"192.168.149.20","piquituerto":"192.168.149.21"} db.crearTablas ( db ) db.insertarHosts ( hosts ) print 'Fin del proceso'
aglitke/vdsm
refs/heads/master
tests/vmTestsData.py
2
# # Copyright Red Hat 2013 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # CONF_TO_DOMXML = [({ 'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f', 'smp': '8', 'memSize': '1024', 'memGuaranteedSize': '512', 'displayPort': '-1', 'vmName': 'testVm', 'display': 'vnc', 'emulatedMachine': 'pc', 'boot': '', 'timeOffset': 0, 'tdf': True, 'acpiEnable': 'true', 'cpuType': 'qemu64', 'smpCoresPerSocket': 1, 'smpThreadsPerCore': 1, 'smp': '1', 'cpuPinning': {}, 'vmchannel': 'true', 'qgaEnable': 'true', 'tabletEnable': False, 'displayNetwork': 'mydisp', 'custom': {}}, """<?xml version="1.0" encoding="utf-8"?> <domain type="kvm"> <name>testVm</name> <uuid>%(vmId)s</uuid> <memory>1048576</memory> <currentMemory>1048576</currentMemory> <vcpu>1</vcpu> <memtune> <min_guarantee>524288</min_guarantee> </memtune> <devices> <channel type="unix"> <target name="com.redhat.rhevm.vdsm" type="virtio"/> <source mode="bind" path="/var/lib/libvirt/qemu/channels/%(vmId)s.com.redhat.rhevm.vdsm"/> </channel> <channel type="unix"> <target name="org.qemu.guest_agent.0" type="virtio"/> <source mode="bind" path="/var/lib/libvirt/qemu/channels/%(vmId)s.org.qemu.guest_agent.0"/> </channel> <input bus="ps2" type="mouse"/> <graphics autoport="yes" passwd="*****" passwdValidTo="1970-01-01T00:00:01" port="-1" type="vnc"> <listen network="vdsm-mydisp" type="network"/> </graphics> </devices> <os> <type arch="x86_64" machine="pc">hvm</type> <smbios mode="sysinfo"/> </os> <sysinfo type="smbios"> <system> <entry name="manufacturer">oVirt</entry> <entry name="product">oVirt Node</entry> <entry name="version">18-1</entry> <entry name="serial">fc25cbbe-5520-4f83-b82e-1541914753d9</entry> <entry name="uuid">%(vmId)s</entry> </system> </sysinfo> <clock adjustment="0" offset="variable"> <timer name="rtc" tickpolicy="catchup"/> </clock> <features> <acpi/> </features> <cputune/> <cpu match="exact"> <model>qemu64</model> <feature name="svm" policy="disable"/> <topology cores="1" sockets="1" threads="1"/> </cpu> </domain> """, )]
pap/nupic
refs/heads/master
src/nupic/swarming/ModelTerminator.py
31
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import functools from nupic.swarming.hypersearch.utils import PeriodicActivityRequest class ModelTerminator(object): """ This is essentially an static class that handles the logic for terminating bad models. """ _MILESTONES = [(10, 0.5), (15, 0.25), (20, 0.01)] def __init__(self, modelID, cjDAO, logLevel = None): self._modelID = modelID self._cjDB = cjDAO self.logger = self.logger = logging.getLogger(".".join( ['com.numenta', self.__class__.__module__, self.__class__.__name__])) self._jobID = self._cjDB.modelsGetFields(modelID, ['jobId'])[0] if logLevel is not None: self.logger.setLevel(logLevel) self.logger.info("Created new ModelTerminator for model %d"%modelID) def getTerminationCallbacks(self, terminationFunc): """ Returns the periodic checks to see if the model should continue running. Parameters: ----------------------------------------------------------------------- terminationFunc: The function that will be called in the model main loop as a wrapper around this function. Must have a parameter called 'index' Returns: A list of PeriodicActivityRequest objects. """ activities = [None] * len(ModelTerminator._MILESTONES) for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES): cb = functools.partial(terminationFunc, index=index) activities[index] = PeriodicActivityRequest(repeating =False, period = iteration, cb=cb) def checkIsTerminated(self, metric, milestoneIndex): bestMetric = self._cjDB.jobGetFields(self._jobID,['results'])[0]['bestMetric'] tolerance = ModelTerminator._MILESTONES[self._index](1) # Right now we're assuming that we want to minize the metric if metric >= (1.0 + tolerance) * bestMetric: self.logger.info("Model %d underperforming (metric:%f, best:%f). Canceling..." %(metric, bestMetric)) self._cjDB.modelSetFields(self._modelID, {'engCancel':True}, ignoreUnchanged = True) return True return False
gauravssnl/PyPastebin-Symbian
refs/heads/master
requests/packages/oreos/__init__.py
3
# -*- coding: utf-8 -*- from .core import dict_from_string
jinglining/flink
refs/heads/master
flink-python/pyflink/table/environment_settings.py
13
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ from pyflink.java_gateway import get_gateway __all__ = ['EnvironmentSettings'] class EnvironmentSettings(object): """ Defines all parameters that initialize a table environment. Those parameters are used only during instantiation of a :class:`~pyflink.table.TableEnvironment` and cannot be changed afterwards. Example: :: >>> EnvironmentSettings.new_instance() \\ ... .use_old_planner() \\ ... .in_streaming_mode() \\ ... .with_built_in_catalog_name("my_catalog") \\ ... .with_built_in_database_name("my_database") \\ ... .build() """ class Builder(object): """ A builder for :class:`EnvironmentSettings`. """ def __init__(self): gateway = get_gateway() self._j_builder = gateway.jvm.EnvironmentSettings.Builder() def use_old_planner(self): """ Sets the old Flink planner as the required module. This is the default behavior. :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.useOldPlanner() return self def use_blink_planner(self): """ Sets the Blink planner as the required module. By default, :func:`use_old_planner` is enabled. :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.useBlinkPlanner() return self def use_any_planner(self): """ Does not set a planner requirement explicitly. A planner will be discovered automatically, if there is only one planner available. By default, :func:`use_old_planner` is enabled. :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.useAnyPlanner() return self def in_batch_mode(self): """ Sets that the components should work in a batch mode. Streaming mode by default. :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.inBatchMode() return self def in_streaming_mode(self): """ Sets that the components should work in a streaming mode. Enabled by default. :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.inStreamingMode() return self def with_built_in_catalog_name(self, built_in_catalog_name): """ Specifies the name of the initial catalog to be created when instantiating a :class:`~pyflink.table.TableEnvironment`. This catalog will be used to store all non-serializable objects such as tables and functions registered via e.g. :func:`~pyflink.table.TableEnvironment.register_table_sink` or :func:`~pyflink.table.TableEnvironment.register_java_function`. It will also be the initial value for the current catalog which can be altered via :func:`~pyflink.table.TableEnvironment.use_catalog`. Default: "default_catalog". :param built_in_catalog_name: The specified built-in catalog name. :type built_in_catalog_name: str :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.withBuiltInCatalogName(built_in_catalog_name) return self def with_built_in_database_name(self, built_in_database_name): """ Specifies the name of the default database in the initial catalog to be created when instantiating a :class:`~pyflink.table.TableEnvironment`. The database will be used to store all non-serializable objects such as tables and functions registered via e.g. :func:`~pyflink.table.TableEnvironment.register_table_sink` or :func:`~pyflink.table.TableEnvironment.register_java_function`. It will also be the initial value for the current database which can be altered via :func:`~pyflink.table.TableEnvironment.use_database`. Default: "default_database". :param built_in_database_name: The specified built-in database name. :type built_in_database_name: str :return: This object. :rtype: EnvironmentSettings.Builder """ self._j_builder = self._j_builder.withBuiltInDatabaseName(built_in_database_name) return self def build(self): """ Returns an immutable instance of EnvironmentSettings. :return: an immutable instance of EnvironmentSettings. :rtype: EnvironmentSettings """ return EnvironmentSettings(self._j_builder.build()) def __init__(self, j_environment_settings): self._j_environment_settings = j_environment_settings def get_built_in_catalog_name(self): """ Gets the specified name of the initial catalog to be created when instantiating a :class:`~pyflink.table.TableEnvironment`. :return: The specified name of the initial catalog to be created. :rtype: str """ return self._j_environment_settings.getBuiltInCatalogName() def get_built_in_database_name(self): """ Gets the specified name of the default database in the initial catalog to be created when instantiating a :class:`~pyflink.table.TableEnvironment`. :return: The specified name of the default database in the initial catalog to be created. :rtype: str """ return self._j_environment_settings.getBuiltInDatabaseName() def is_streaming_mode(self): """ Tells if the :class:`~pyflink.table.TableEnvironment` should work in a batch or streaming mode. :return: True if the TableEnvironment should work in a streaming mode, false otherwise. :rtype: bool """ return self._j_environment_settings.isStreamingMode() @staticmethod def new_instance(): """ Creates a builder for creating an instance of EnvironmentSettings. By default, it does not specify a required planner and will use the one that is available on the classpath via discovery. :return: A builder of EnvironmentSettings. :rtype: EnvironmentSettings.Builder """ return EnvironmentSettings.Builder()
sffjunkie/home-assistant
refs/heads/master
homeassistant/components/switch/edimax.py
7
""" Support for Edimax switches. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.edimax/ """ import logging from homeassistant.components.switch import DOMAIN, SwitchDevice from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME) from homeassistant.helpers import validate_config # constants DEFAULT_USERNAME = 'admin' DEFAULT_PASSWORD = '1234' DEVICE_DEFAULT_NAME = 'Edimax Smart Plug' REQUIREMENTS = ['https://github.com/rkabadi/pyedimax/archive/' '365301ce3ff26129a7910c501ead09ea625f3700.zip#pyedimax==0.1'] _LOGGER = logging.getLogger(__name__) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices_callback, discovery_info=None): """Find and return Edimax Smart Plugs.""" from pyedimax.smartplug import SmartPlug # pylint: disable=global-statement # check for required values in configuration file if not validate_config({DOMAIN: config}, {DOMAIN: [CONF_HOST]}, _LOGGER): return False host = config.get(CONF_HOST) auth = (config.get(CONF_USERNAME, DEFAULT_USERNAME), config.get(CONF_PASSWORD, DEFAULT_PASSWORD)) name = config.get(CONF_NAME, DEVICE_DEFAULT_NAME) add_devices_callback([SmartPlugSwitch(SmartPlug(host, auth), name)]) class SmartPlugSwitch(SwitchDevice): """Representation an Edimax Smart Plug switch.""" def __init__(self, smartplug, name): """Initialize the switch.""" self.smartplug = smartplug self._name = name @property def name(self): """Return the name of the Smart Plug, if any.""" return self._name @property def current_power_mwh(self): """Return the current power usage in mWh.""" try: return float(self.smartplug.now_power) / 1000000.0 except ValueError: return None except TypeError: return None @property def today_power_mw(self): """Return the today total power usage in mW.""" try: return float(self.smartplug.now_energy_day) / 1000.0 except ValueError: return None except TypeError: return None @property def is_on(self): """Return true if switch is on.""" return self.smartplug.state == 'ON' def turn_on(self, **kwargs): """Turn the switch on.""" self.smartplug.state = 'ON' def turn_off(self): """Turn the switch off.""" self.smartplug.state = 'OFF'
koson/MissionPlannerKMTI
refs/heads/master
Lib/pydoc_data/topics.py
45
# Autogenerated by Sphinx on Sat Jul 3 08:52:04 2010 topics = {'assert': u'\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets. (This rule is relaxed as of\n Python 1.5; in earlier versions, the object had to be a tuple.\n Since strings are sequences, an assignment like ``a, b = "xy"`` is\n now legal as long as the string has the right length.)\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n``AttributeError`` is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer (plain or long) and the other must be a sequence.\nIn the former case, the numbers are converted to a common type and\nthen multiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: ``x == (x/y)*y + (x%y)``. Integer division and\nmodulo are also connected with the built-in function ``divmod()``:\n``divmod(x, y) == (x/y, x%y)``. These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere ``x/y`` is replaced by ``floor(x/y)`` or ``floor(x/y) - 1`` [3].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\n*String Formatting Operations*.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the ``divmod()`` function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``func_code`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec`` statement or the built-in ``eval()``\nfunction.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see *Slicings*). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed ``Ellipsis`` (a built-in name).\n\nIt is written as ``Ellipsis``.\n', 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s ``stdio`` package and can be\ncreated with the built-in ``open()`` function. File objects are also\nreturned by some other built-in functions and methods, such as\n``os.popen()`` and ``os.fdopen()`` and the ``makefile()`` method of\nsocket objects. Temporary files can be created using the ``tempfile``\nmodule, and high-level file operations such as copying, moving, and\ndeleting files and directories can be achieved with the ``shutil``\nmodule.\n\nWhen a file operation fails for an I/O-related reason, the exception\n``IOError`` is raised. This includes situations where the operation\nis not defined for some reason, like ``seek()`` on a tty device or\nwriting a file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n ``ValueError`` after the file has been closed. Calling ``close()``\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the ``with`` statement. For example, the\n following code will automatically close *f* when the ``with`` block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a context\n manager for the ``with`` statement. If your code is intended to\n work with any file-like object, you can use the function\n ``contextlib.closing()`` instead of using the object directly.\n\nfile.flush()\n\n Flush the internal buffer, like ``stdio``\'s ``fflush()``. This may\n be a no-op on some file-like objects.\n\n Note: ``flush()`` does not necessarily write the file\'s data to disk.\n Use ``flush()`` followed by ``os.fsync()`` to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the ``fcntl`` module or ``os.read()`` and\n friends.\n\n Note: File-like objects which do not have a real file descriptor should\n *not* provide this method!\n\nfile.isatty()\n\n Return ``True`` if the file is connected to a tty(-like) device,\n else ``False``.\n\n Note: If a file-like object is not associated with a real file, this\n method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example ``iter(f)`` returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a ``for`` loop (for example, ``for line in f: print\n line``), the ``next()`` method is called repeatedly. This method\n returns the next input line, or raises ``StopIteration`` when EOF\n is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a ``for``\n loop the most efficient way of looping over the lines of a file (a\n very common operation), the ``next()`` method uses a hidden read-\n ahead buffer. As a consequence of using a read-ahead buffer,\n combining ``next()`` with other file methods (like ``readline()``)\n does not work right. However, using ``seek()`` to reposition the\n file to an absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function ``fread()`` more\n than once in an effort to acquire as close to *size* bytes as\n possible. Also note that when in non-blocking mode, less data than\n was requested may be returned, even if no *size* parameter was\n given.\n\n Note: This function is simply a wrapper for the underlying ``fread()``\n C function, and will behave the same in corner cases, such as\n whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [5] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. An empty string is\n returned *only* when EOF is encountered immediately.\n\n Note: Unlike ``stdio``\'s ``fgets()``, the returned string contains null\n characters (``\'\\0\'``) if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using ``readline()`` and return a list containing\n the lines thus read. If the optional *sizehint* argument is\n present, instead of reading up to EOF, whole lines totalling\n approximately *sizehint* bytes (possibly after rounding up to an\n internal buffer size) are read. Objects implementing a file-like\n interface may choose to ignore *sizehint* if it cannot be\n implemented, or cannot be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as ``iter(f)``.\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use ``for line in file`` instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like ``stdio``\'s ``fseek()``. The\n *whence* argument is optional and defaults to ``os.SEEK_SET`` or\n ``0`` (absolute file positioning); other values are ``os.SEEK_CUR``\n or ``1`` (seek relative to the current position) and\n ``os.SEEK_END`` or ``2`` (seek relative to the file\'s end). There\n is no return value.\n\n For example, ``f.seek(2, os.SEEK_CUR)`` advances the position by\n two and ``f.seek(-3, os.SEEK_END)`` sets the position to the third\n to last.\n\n Note that if the file is opened for appending (mode ``\'a\'`` or\n ``\'a+\'``), any ``seek()`` operations will be undone at the next\n write. If the file is only opened for writing in append mode (mode\n ``\'a\'``), this method is essentially a no-op, but it remains useful\n for files opened in append mode with reading enabled (mode\n ``\'a+\'``). If the file is opened in text mode (without ``\'b\'``),\n only offsets returned by ``tell()`` are legal. Use of other\n offsets causes undefined behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like ``stdio``\'s ``ftell()``.\n\n Note: On Windows, ``tell()`` can return illegal values (after an\n ``fgets()``) when reading files with Unix-style line-endings. Use\n binary mode (``\'rb\'``) to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the ``flush()`` or ``close()`` method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n ``readlines()``; ``writelines()`` does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as ``file.readline()``, and iteration ends when the\n``readline()`` method returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the ``close()`` method changes the value. It\n may not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be ``None``, in which case\n the file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n ``open()`` built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using ``open()``, the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form ``<...>``. This is a read-only attribute and\n may not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with the *--with-universal-newlines* option to\n **configure** (the default) this read-only attribute exists, and\n for files opened in universal newline read mode it keeps track of\n the types of newlines encountered while reading the file. The\n values it can take are ``\'\\r\'``, ``\'\\n\'``, ``\'\\r\\n\'``, ``None``\n (unknown, no newlines read yet) or a tuple containing all the\n newline types seen, to indicate that multiple newline conventions\n were encountered. For files not opened in universal newline read\n mode the value of this attribute will be ``None``.\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the ``print`` statement.\n Classes that are trying to simulate a file object should also have\n a writable ``softspace`` attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n ``softspace`` attribute.\n\n Note: This attribute is not used to control the ``print`` statement,\n but to allow the implementation of ``print`` to keep track of its\n internal state.\n', 'bltin-null-object': u"\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name).\n\nIt is written as ``None``.\n", 'bltin-type-objects': u"\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<type 'int'>``.\n", 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the ``__nonzero__()`` special method for a way to\nchange this.)\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', 'break': u'\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n', 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a function) with a possibly\nempty series of arguments:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," "*" expression] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal parameter lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to a sequence. Elements from this\nsequence are treated as if they were additional positional arguments;\nif there are positional arguments *x1*,..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax ``(sublist)`` cannot be\nused as keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'coercion-rules': u"\nCoercion rules\n**************\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don't define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator '``+``', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base's ``__rop__()`` method, the right operand's ``__rop__()``\n method is tried *before* the left operand's ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand's ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type's ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like '``+=``') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n", 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': u'\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', 'debugger': u'\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``c`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run_*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', 'del': u'\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather that spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n', 'else': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exec': u'\nThe ``exec`` statement\n**********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a string, an open file object, or\na code object. If it is a string, the string is parsed as a suite of\nPython statements which is then executed (unless a syntax error\noccurs). [1] If it is an open file, the file is parsed until EOF and\nexecuted. If it is a code object, it is simply executed. In all\ncases, the code that\'s executed is expected to be valid as file input\n(see section *File input*). Be aware that the ``return`` and\n``yield`` statements may not be used outside of function definitions\neven within the context of code passed to the ``exec`` statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after ``in`` is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module\n``__builtin__`` under the key ``__builtins__`` (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function ``eval()``. The built-in functions\n``globals()`` and ``locals()`` return the current global and local\ndictionary, respectively, which may be useful to pass around for use\nby ``exec``.\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use universal newline mode to convert Windows or Mac-style\n newlines.\n', 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, ``077e010`` is legal, and denotes the same number as\n``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': u'\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either\neither a number or a keyword. If it\'s a number, it refers to a\npositional argument, and if it\'s a keyword, it refers to a named\nkeyword argument. If the numerical arg_names in a format string are\n0, 1, 2, ... in sequence, they can all be omitted (not just some) and\nthe numbers 0, 1, 2, ... will be automatically inserted in that order.\nThe *arg_name* can be followed by any number of index or attribute\nexpressions. An expression of the form ``\'.name\'`` selects the named\nattribute using ``getattr()``, while an expression of the form\n``\'[index]\'`` does an index lookup using ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'}\' (which\nsignifies the end of the field). The presence of a fill character is\nsignaled by the *next* character, which must be one of the alignment\noptions. If the second character of *format_spec* is not a valid\nalignment option, then it is assumed that both the fill character and\nthe alignment option are absent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Postive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}.\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{align}{fill}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n', 'global': u'\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in an\n``exec`` statement does not affect the code block *containing* the\n``exec`` statement, and code contained in an ``exec`` statement is\nunaffected by ``global`` statements in the code containing the\n``exec`` statement. The same applies to the ``eval()``,\n``execfile()`` and ``compile()`` functions.\n', 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Both ``as`` and ``with`` are only recognized\nwhen the ``with_statement`` future feature has been enabled. It will\nalways be enabled in Python 2.6. See section *The with statement* for\ndetails. Note that using ``as`` and ``with`` as identifiers will\nalways issue a warning, even when the ``with_statement`` future\ndirective is not in effect.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimprt mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case ``\'l\'`` and upper case ``\'L\'`` are allowed as\nsuffix for long integers, it is strongly recommended to always use\n``\'L\'``, since the letter ``\'l\'`` looks too much like the digit\n``\'1\'``.\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n old_lambda_form ::= "lambda" [parameter_list]: old_expression\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_form\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n', 'naming': u"\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no 's') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n", 'numbers': u"\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change.\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', 'operator-summary': u'\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. Function\n ``fmod()`` in the ``math`` module returns a result whose sign\n matches the sign of the first argument instead, and so returns\n ``-1e-100`` in this case. Which approach is more appropriate\n depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': u'\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, ``10**2`` returns ``100``, but\n``10**-2`` returns ``0.01``. (This last feature was added in Python\n2.2. In Python 2.1 and before, if both arguments were of integer types\nand the second argument was negative, an exception was raised).\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``ValueError``.\n', 'print': u'\nThe ``print`` statement\n***********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n``print`` evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except ``\' \'``, or (3) when the last\nwrite operation on standard output was not a ``print`` statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the built-in\n file objects often do not properly emulate this aspect of the file\n object\'s behavior, so it is best not to rely on this.\n\nA ``\'\\n\'`` character is written at the end, unless the ``print``\nstatement ends with a comma. This is the only action if the statement\ncontains just the keyword ``print``.\n\nStandard output is defined as the file object named ``stdout`` in the\nbuilt-in module ``sys``. If no such object exists, or if it does not\nhave a ``write()`` method, a ``RuntimeError`` exception is raised.\n\n``print`` also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n"``print`` chevron." In this form, the first expression after the\n``>>`` must evaluate to a "file-like" object, specifically an object\nthat has a ``write()`` method as described above. With this extended\nform, the subsequent expressions are printed to this file object. If\nthe first expression evaluates to ``None``, then ``sys.stdout`` is\nused as the file for output.\n', 'raise': u'\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``TypeError`` exception is raised indicating that\nthis is an error (if running under IDLE, a ``Queue.Empty`` exception\nis raised instead).\n\nOtherwise, ``raise`` evaluates the expressions to get three objects,\nusing ``None`` as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be ``None``.\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is ``None``, an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not ``None``, it must be a traceback\nobject (see section *The standard type hierarchy*), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or ``None``, a ``TypeError`` exception is raised.\nThe three-expression form of ``raise`` is useful to re-raise an\nexception transparently in an except clause, but ``raise`` with no\nexpressions should be preferred if the exception to be re-raised was\nthe most recently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', 'return': u'\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement is not allowed to\ninclude an **expression_list**. In that context, a bare ``return``\nindicates that the generator is done and will cause ``StopIteration``\nto be raised.\n', 'sequence-methods': u'\nAdditional methods for emulation of sequence types\n**************************************************\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n', 'sequence-types': u"\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python's\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n", 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2, n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,\nn)``. Negative shift counts raise a ``ValueError`` exception.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n``sys.maxint``, respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that ``i <= k < j`` where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in ``Ellipsis`` object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose ``start``, ``stop`` and ``step`` attributes are the values of\nthe expressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': u"\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object's\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type 'bool'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can't tell the type of the\n operands.\n\n[4] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n\n[5] The advantage of leaving the newline on is that returning an empty\n string is then an unambiguous EOF indication. It is also possible\n (in cases where it might matter, for example, if you want to make\n an exact copy of a file while scanning its lines) to tell whether\n the last line of a file ended in a newline or not (yes this\n happens!).\n", 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-conversions': u'\nString conversions\n******************\n\nA string conversion is an expression list enclosed in reverse (a.k.a.\nbackward) quotes:\n\n string_conversion ::= "\'" expression_list "\'"\n\nA string conversion evaluates the contained expression list and\nconverts the resulting object into a string according to rules\nspecific to its type.\n\nIf the object is a string, a number, ``None``, or a tuple, list or\ndictionary containing only objects whose type is one of these, the\nresulting string is a valid Python expression which can be passed to\nthe built-in function ``eval()`` to yield an expression with the same\nvalue (or an approximation, if floating point numbers are involved).\n\n(In particular, converting a string adds quotes around it and converts\n"funny" characters to escape sequences that are safe to print.)\n\nRecursive objects (for example, lists or dictionaries that contain a\nreference to themselves, directly or indirectly) use ``...`` to\nindicate a recursive reference, and the result cannot be passed to\n``eval()`` to get an equal value (``SyntaxError`` will be raised\ninstead).\n\nThe built-in function ``repr()`` performs exactly the same conversion\nin its argument as enclosing it in parentheses and reverse quotes\ndoes. The built-in function ``str()`` performs a similar but more\nuser-friendly conversion.\n', 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the **stringprefix** and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. The two prefix characters may be\ncombined; in this case, ``\'u\'`` must appear before ``\'r\'``.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., ``x[-1]`` selects the last item of\n``x``.) The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u"\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0L``, ``0.0``,\n ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__nonzero__()`` or ``__len__()`` method, when that method returns\n the integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': u'\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *list*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', 'typesmapping': u'\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 2, "two": 3}``:\n\n * ``dict(one=2, two=3)``\n\n * ``dict({\'one\': 2, \'two\': 3})``\n\n * ``dict(zip((\'one\', \'two\'), (2, 3)))``\n\n * ``dict([[\'two\', 3], [\'one\', 2]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as a tuple or other iterable of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', 'typesmethods': u"\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method's class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set a method attribute results in a ``TypeError`` being\nraised. In order to set a method attribute, you need to explicitly\nset it on the underlying function object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.im_func.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n", 'typesmodules': u"\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special member of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n", 'typesseq': u'\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``buffer``, ``xrange``\n************************************************************************************\n\nThere are six sequence types: strings, Unicode strings, lists, tuples,\nbuffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*\'th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [4] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(#)03d quote types.\' % \\\n... {\'language\': "Python", "#": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n', 'typesseq-mutable': u"\nMutable Sequence Types\n**********************\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n", 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of ``x`` is\ndefined as ``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': u'\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', 'with': u'\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': u'\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of **expression_list** is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'}
mina86/linux
refs/heads/master
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
338
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names.\n" \ "For example:\n # apt-get install python-audit (Ubuntu)" \ "\n # yum install audit-libs-python (Fedora)" \ "\n etc.\n" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
twobob/buildroot-kindle
refs/heads/master
output/build/host-python-2.7.2/Lib/bsddb/test/test_cursor_pget_bug.py
112
import unittest import os, glob from test_all import db, test_support, get_new_environment_path, \ get_new_database_path #---------------------------------------------------------------------- class pget_bugTestCase(unittest.TestCase): """Verify that cursor.pget works properly""" db_name = 'test-cursor_pget.db' def setUp(self): self.homeDir = get_new_environment_path() self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) self.primary_db = db.DB(self.env) self.primary_db.open(self.db_name, 'primary', db.DB_BTREE, db.DB_CREATE) self.secondary_db = db.DB(self.env) self.secondary_db.set_flags(db.DB_DUP) self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE) self.primary_db.associate(self.secondary_db, lambda key, data: data) self.primary_db.put('salad', 'eggs') self.primary_db.put('spam', 'ham') self.primary_db.put('omelet', 'eggs') def tearDown(self): self.secondary_db.close() self.primary_db.close() self.env.close() del self.secondary_db del self.primary_db del self.env test_support.rmtree(self.homeDir) def test_pget(self): cursor = self.secondary_db.cursor() self.assertEqual(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET)) self.assertEqual(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP)) self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP)) self.assertEqual(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET)) self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP)) cursor.close() def test_suite(): return unittest.makeSuite(pget_bugTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
davehunt/kuma
refs/heads/master
vendor/packages/pytz/exceptions.py
657
''' Custom exceptions raised by pytz. ''' __all__ = [ 'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError', 'NonExistentTimeError', ] class UnknownTimeZoneError(KeyError): '''Exception raised when pytz is passed an unknown timezone. >>> isinstance(UnknownTimeZoneError(), LookupError) True This class is actually a subclass of KeyError to provide backwards compatibility with code relying on the undocumented behavior of earlier pytz releases. >>> isinstance(UnknownTimeZoneError(), KeyError) True ''' pass class InvalidTimeError(Exception): '''Base class for invalid time exceptions.''' class AmbiguousTimeError(InvalidTimeError): '''Exception raised when attempting to create an ambiguous wallclock time. At the end of a DST transition period, a particular wallclock time will occur twice (once before the clocks are set back, once after). Both possibilities may be correct, unless further information is supplied. See DstTzInfo.normalize() for more info ''' class NonExistentTimeError(InvalidTimeError): '''Exception raised when attempting to create a wallclock time that cannot exist. At the start of a DST transition period, the wallclock time jumps forward. The instants jumped over never occur. '''
luser/steeplechase
refs/heads/master
steeplechase/__init__.py
4
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from runsteeplechase import *
hzlf/openbroadcast
refs/heads/master
website/__filer/models/tools.py
15
#-*- coding: utf-8 -*- from filer.models import Clipboard def discard_clipboard(clipboard): clipboard.files.clear() def delete_clipboard(clipboard): for file in clipboard.files.all(): file.delete() def get_user_clipboard(user): if user.is_authenticated(): clipboard = Clipboard.objects.get_or_create(user=user)[0] return clipboard def move_file_to_clipboard(files, clipboard): count = 0 for file in files: if clipboard.append_file(file): file.folder = None file.save() count += 1 return count def move_files_from_clipboard_to_folder(clipboard, folder): return move_files_to_folder(clipboard.files.all(), folder) def move_files_to_folder(files, folder): for file in files: file.folder = folder file.save() return True
ContinuumIO/numpy
refs/heads/master
numpy/core/tests/test_arrayprint.py
4
# -*- coding: utf-8 -*- from __future__ import division, absolute_import, print_function import sys import numpy as np from numpy.compat import sixu from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal ) class TestArrayRepr(object): def test_nan_inf(self): x = np.array([np.nan, np.inf]) assert_equal(repr(x), 'array([ nan, inf])') class TestComplexArray(TestCase): def test_str(self): rvals = [0, 1, -1, np.inf, -np.inf, np.nan] cvals = [complex(rp, ip) for rp in rvals for ip in rvals] dtypes = [np.complex64, np.cdouble, np.clongdouble] actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] wanted = [ '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]', '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]', '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]', '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]', '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]', '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]', '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]', '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]', '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]', '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]', '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]', '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]', '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]', '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]', '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]', '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]', '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]', '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]', '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]', '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]', '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]', '[ inf+infj]', '[ inf+infj]', '[ inf+infj]', '[ inf-infj]', '[ inf-infj]', '[ inf-infj]', '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]', '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]', '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]', '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]', '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]', '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]', '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]', '[ nan+infj]', '[ nan+infj]', '[ nan+infj]', '[ nan-infj]', '[ nan-infj]', '[ nan-infj]', '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]'] for res, val in zip(actual, wanted): assert_(res == val) class TestArray2String(TestCase): def test_basic(self): """Basic test of array2string.""" a = np.arange(3) assert_(np.array2string(a) == '[0 1 2]') assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]') def test_style_keyword(self): """This should only apply to 0-D arrays. See #1218.""" stylestr = np.array2string(np.array(1.5), style=lambda x: "Value in 0-D array: " + str(x)) assert_(stylestr == 'Value in 0-D array: 1.5') def test_format_function(self): """Test custom format function for each element in array.""" def _format_function(x): if np.abs(x) < 1: return '.' elif np.abs(x) < 2: return 'o' else: return 'O' x = np.arange(3) if sys.version_info[0] >= 3: x_hex = "[0x0 0x1 0x2]" x_oct = "[0o0 0o1 0o2]" else: x_hex = "[0x0L 0x1L 0x2L]" x_oct = "[0L 01L 02L]" assert_(np.array2string(x, formatter={'all':_format_function}) == "[. o O]") assert_(np.array2string(x, formatter={'int_kind':_format_function}) == "[. o O]") assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == "[0.0000 1.0000 2.0000]") assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), x_hex) assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), x_oct) x = np.arange(3.) assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == "[0.00 1.00 2.00]") assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == "[0.00 1.00 2.00]") s = np.array(['abc', 'def']) assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) assert_equal(np.array2string(x), "[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]") # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) A[5:].fill(np.nan) assert_equal(np.array2string(A), "[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " + "('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " + "('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " + "('NaT',) ('NaT',) ('NaT',)]") # See #8160 struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) assert_equal(np.array2string(struct_int), "[([ 1, -1],) ([123, 1],)]") struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], dtype=[('B', 'i4', (2, 2))]) assert_equal(np.array2string(struct_2dint), "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") # See #8172 array_scalar = np.array( (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)") class TestPrintOptions: """Test getting and setting global print options.""" def setUp(self): self.oldopts = np.get_printoptions() def tearDown(self): np.set_printoptions(**self.oldopts) def test_basic(self): x = np.array([1.5, 0, 1.234567890]) assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])") np.set_printoptions(precision=4) assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])") def test_precision_zero(self): np.set_printoptions(precision=0) for values, string in ( ([0.], " 0."), ([.3], " 0."), ([-.3], "-0."), ([.7], " 1."), ([1.5], " 2."), ([-1.5], "-2."), ([-15.34], "-15."), ([100.], " 100."), ([.2, -1, 122.51], " 0., -1., 123."), ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], " 0.-1.j")): x = np.array(values) assert_equal(repr(x), "array([%s])" % string) def test_formatter(self): x = np.arange(3) np.set_printoptions(formatter={'all':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1, 0, 1])") def test_formatter_reset(self): x = np.arange(3) np.set_printoptions(formatter={'all':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1, 0, 1])") np.set_printoptions(formatter={'int':None}) assert_equal(repr(x), "array([0, 1, 2])") np.set_printoptions(formatter={'all':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1, 0, 1])") np.set_printoptions(formatter={'all':None}) assert_equal(repr(x), "array([0, 1, 2])") np.set_printoptions(formatter={'int':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1, 0, 1])") np.set_printoptions(formatter={'int_kind':None}) assert_equal(repr(x), "array([0, 1, 2])") x = np.arange(3.) np.set_printoptions(formatter={'float':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") np.set_printoptions(formatter={'float_kind':None}) assert_equal(repr(x), "array([ 0., 1., 2.])") def test_unicode_object_array(): import sys if sys.version_info[0] >= 3: expected = "array(['é'], dtype=object)" else: expected = "array([u'\\xe9'], dtype=object)" x = np.array([sixu('\xe9')], dtype=object) assert_equal(repr(x), expected) if __name__ == "__main__": run_module_suite()
rfree2/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/prune_targets/gyptest-prune-targets.py
47
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies --root-target removes the unnecessary targets. """ import TestGyp test = TestGyp.TestGyp() # The xcode-ninja generator has its own logic for which targets to include if test.format == 'xcode-ninja': test.skip_test() build_error_code = { 'android': 2, 'cmake': 1, 'make': 2, 'msvs': 1, 'ninja': 1, 'xcode': 65, }[test.format] # By default, everything will be included. test.run_gyp('test1.gyp') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3') test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3') # With deep dependencies of program1 only. test.run_gyp('test1.gyp', '--root-target=program1') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2', status=build_error_code, stderr=None) test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) # With deep dependencies of program2 only. test.run_gyp('test1.gyp', '--root-target=program2') test.build('test2.gyp', 'lib1', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1', status=build_error_code, stderr=None) test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) # With deep dependencies of program1 and program2. test.run_gyp('test1.gyp', '--root-target=program1', '--root-target=program2') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) test.pass_test()
pantsbuild/pex
refs/heads/main
pex/vendor/_vendored/pip/pip/_vendor/certifi/core.py
17
# -*- coding: utf-8 -*- """ certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem or its contents. """ import os try: from importlib.resources import path as get_path, read_text _CACERT_CTX = None _CACERT_PATH = None def where(): # This is slightly terrible, but we want to delay extracting the file # in cases where we're inside of a zipimport situation until someone # actually calls where(), but we don't want to re-extract the file # on every call of where(), so we'll do it once then store it in a # global variable. global _CACERT_CTX global _CACERT_PATH if _CACERT_PATH is None: # This is slightly janky, the importlib.resources API wants you to # manage the cleanup of this file, so it doesn't actually return a # path, it returns a context manager that will give you the path # when you enter it and will do any cleanup when you leave it. In # the common case of not needing a temporary file, it will just # return the file system location and the __exit__() is a no-op. # # We also have to hold onto the actual context manager, because # it will do the cleanup whenever it gets garbage collected, so # we will also store that at the global level as well. _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem") _CACERT_PATH = str(_CACERT_CTX.__enter__()) return _CACERT_PATH except ImportError: # This fallback will work for Python versions prior to 3.7 that lack the # importlib.resources module but relies on the existing `where` function # so won't address issues with environments like PyOxidizer that don't set # __file__ on modules. def read_text(_module, _path, encoding="ascii"): with open(where(), "r", encoding=encoding) as data: return data.read() # If we don't have importlib.resources, then we will just do the old logic # of assuming we're on the filesystem and munge the path directly. def where(): f = os.path.dirname(__file__) return os.path.join(f, "cacert.pem") def contents(): return read_text("certifi", "cacert.pem", encoding="ascii")
lutianming/online-ARMA
refs/heads/master
arma.py
1
import numpy as np from numpy.linalg import inv from scipy.optimize import fmin_bfgs import matplotlib.pyplot as plt import statsmodels.api as sm import pandas as pd import data from statsmodels.tsa.arima_process import arma_generate_sample def K_min(y, A): def f(x): tmp = np.matrix(y).reshape(-1, 1) - np.matrix(x).reshape(-1, 1) result = np.dot(tmp.T, A) result = np.dot(result, tmp) return result[0, 0] return f def arma_ons(X, m, k, q): """ arma online newton step """ D = np.sqrt(2*(m+k)) G = 2*np.sqrt(m+k)*D rate = 0.5*min(1./(m+k), 4*G*D) epsilon = 1./(rate**2 * D**2) A = np.diag([1]*(m+k)) * epsilon A = np.matrix(A) T = X.shape[0] L = np.random.uniform(-0.5, 0.5, (m+k, 1)) L = np.matrix(L) X_p = np.zeros(T) loss = np.zeros(T) for t in range(T): #predict X_t = 0 for i in range(m+k): if t-i-1 < 0: break X_t += L[i]*X[t-i-1] X_p[t] = X_t #loss loss[t] = (X[t]-X_t)**2 #update nabla = np.zeros((m+k, 1)) for i in range(m+k): x = X[t-i-1] if t-i-1 >= 0 else 0 nabla[i, 0] = -2*(X[t]-X_t)*x A = A + np.dot(nabla, nabla.T) # y = L - 1/rate*np.dot(inv(A), nabla) # L = fmin_bfgs(K_min(y, A), L) # L = np.matrix(L).reshape(-1, 1) L = L - 1/rate*np.dot(inv(A), nabla) return X_p, loss def arma_ogd(X, m, k, q): """ ARMA online gradient descent """ D = np.sqrt(2*(m+k)) G = 2*np.sqrt(m+k)*D T = X.shape[0] rate = D/(G*np.sqrt(T)) L = np.random.uniform(-0.5, 0.5, (m+k, 1)) L = np.matrix(L) X_p = np.zeros(T) loss = np.zeros(T) for t in range(T): #predict X_t = 0 for i in range(m+k): if t-i-1 < 0: break X_t += L[i]*X[t-i-1] X_p[t] = X_t #loss loss[t] = (X[t]-X_t)**2 #update nabla = np.zeros((m+k, 1)) for i in range(m+k): x = X[t-i-1] if t-i-1 >= 0 else 0 nabla[i, 0] = -2*(X[t]-X_t)*x L = L - rate*nabla return X_p, loss def gen_errors(loss): n = len(loss) errors = np.zeros(n) for i in range(n): errors[i] = np.sum(loss[0:i+1])/(i+1) return errors def average(datagen, N, arma, n): avg = np.zeros(N) for i in range(n): X = datagen(N) X_p, loss = arma(X, 5, 5, 0) avg += loss avg = avg / n return avg if __name__ == '__main__': n = 10000 iters = 2 t = range(n) X = data.gen_dataset1(n) plt.subplot(221) loss = average(data.gen_dataset1, n, arma_ons, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-ONS") loss = average(data.gen_dataset1, n, arma_ogd, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-OGD") plt.legend() plt.title("Sanity check") plt.subplot(222) loss = average(data.gen_dataset2, n, arma_ons, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-ONS") loss = average(data.gen_dataset2, n, arma_ogd, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-OGD") plt.legend() plt.title("Slowly changing coefficients") plt.subplot(223) loss = average(data.gen_dataset3, n, arma_ons, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-ONS") loss = average(data.gen_dataset3, n, arma_ogd, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-OGD") plt.legend() plt.title("Abrupt change") plt.subplot(224) loss = average(data.gen_dataset4, n, arma_ons, iters) e = gen_errors(loss) plt.plot(t, e, label="ARMA-ONS") # loss = average(data.gen_dataset4, n, arma_ogd, iters) # e = gen_errors(loss) # plt.plot(t, e, label="ARMA-OGD") plt.legend() plt.title("Correlated noise") plt.show() #for real data plt.subplot(121) X = data.gen_temperature() n = X.shape[0] t = range(n) X_p, loss = arma_ons(X, 5, 5, 0) e = gen_errors(loss) plt.plot(t, e, label="ARMA-ONS") X_p, loss = arma_ogd(X, 5, 5, 0) e = gen_errors(loss) plt.plot(t, e, label='AMRA-OGD') plt.legend() plt.subplot(122) X = data.gen_stock() n = X.shape[0] t = range(n) X_p, loss = arma_ons(X, 5, 5, 0) e = gen_errors(loss) plt.plot(t, e, label='ARMA-ONS') X_p, loss = arma_ogd(X, 5, 5, 0) e = gen_errors(loss) plt.plot(t, e, label='ARMA-OGD') plt.legend() plt.show()
johndbritton/gitviz
refs/heads/master
dulwich/dulwich/tests/test_objects.py
3
# test_objects.py -- tests for objects.py # Copyright (C) 2007 James Westby <jw+debian@jameswestby.net> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # of the License or (at your option) any later version of # the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Tests for git base objects.""" # TODO: Round-trip parse-serialize-parse and serialize-parse-serialize tests. from cStringIO import StringIO import datetime import os import stat import warnings from dulwich.errors import ( ObjectFormatException, ) from dulwich._compat import ( permutations, ) from dulwich.objects import ( Blob, Tree, Commit, ShaFile, Tag, format_timezone, hex_to_sha, sha_to_hex, hex_to_filename, check_hexsha, check_identity, parse_timezone, TreeEntry, parse_tree, _parse_tree_py, sorted_tree_items, _sorted_tree_items_py, ) from dulwich.tests import ( TestCase, ) from utils import ( make_commit, make_object, functest_builder, ext_functest_builder, ) a_sha = '6f670c0fb53f9463760b7295fbb814e965fb20c8' b_sha = '2969be3e8ee1c0222396a5611407e4769f14e54b' c_sha = '954a536f7819d40e6f637f849ee187dd10066349' tree_sha = '70c190eb48fa8bbb50ddc692a17b44cb781af7f6' tag_sha = '71033db03a03c6a36721efcf1968dd8f8e0cf023' class TestHexToSha(TestCase): def test_simple(self): self.assertEqual("\xab\xcd" * 10, hex_to_sha("abcd" * 10)) def test_reverse(self): self.assertEqual("abcd" * 10, sha_to_hex("\xab\xcd" * 10)) class BlobReadTests(TestCase): """Test decompression of blobs""" def get_sha_file(self, cls, base, sha): dir = os.path.join(os.path.dirname(__file__), 'data', base) return cls.from_path(hex_to_filename(dir, sha)) def get_blob(self, sha): """Return the blob named sha from the test data dir""" return self.get_sha_file(Blob, 'blobs', sha) def get_tree(self, sha): return self.get_sha_file(Tree, 'trees', sha) def get_tag(self, sha): return self.get_sha_file(Tag, 'tags', sha) def commit(self, sha): return self.get_sha_file(Commit, 'commits', sha) def test_decompress_simple_blob(self): b = self.get_blob(a_sha) self.assertEqual(b.data, 'test 1\n') self.assertEqual(b.sha().hexdigest(), a_sha) def test_hash(self): b = self.get_blob(a_sha) self.assertEqual(hash(b.id), hash(b)) def test_parse_empty_blob_object(self): sha = 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' b = self.get_blob(sha) self.assertEqual(b.data, '') self.assertEqual(b.id, sha) self.assertEqual(b.sha().hexdigest(), sha) def test_create_blob_from_string(self): string = 'test 2\n' b = Blob.from_string(string) self.assertEqual(b.data, string) self.assertEqual(b.sha().hexdigest(), b_sha) def test_legacy_from_file(self): b1 = Blob.from_string("foo") b_raw = b1.as_legacy_object() b2 = b1.from_file(StringIO(b_raw)) self.assertEqual(b1, b2) def test_chunks(self): string = 'test 5\n' b = Blob.from_string(string) self.assertEqual([string], b.chunked) def test_set_chunks(self): b = Blob() b.chunked = ['te', 'st', ' 5\n'] self.assertEqual('test 5\n', b.data) b.chunked = ['te', 'st', ' 6\n'] self.assertEqual('test 6\n', b.as_raw_string()) def test_parse_legacy_blob(self): string = 'test 3\n' b = self.get_blob(c_sha) self.assertEqual(b.data, string) self.assertEqual(b.sha().hexdigest(), c_sha) def test_eq(self): blob1 = self.get_blob(a_sha) blob2 = self.get_blob(a_sha) self.assertEqual(blob1, blob2) def test_read_tree_from_file(self): t = self.get_tree(tree_sha) self.assertEqual(t.items()[0], ('a', 33188, a_sha)) self.assertEqual(t.items()[1], ('b', 33188, b_sha)) def test_read_tag_from_file(self): t = self.get_tag(tag_sha) self.assertEqual(t.object, (Commit, '51b668fd5bf7061b7d6fa525f88803e6cfadaa51')) self.assertEqual(t.name,'signed') self.assertEqual(t.tagger,'Ali Sabil <ali.sabil@gmail.com>') self.assertEqual(t.tag_time, 1231203091) self.assertEqual(t.message, 'This is a signed tag\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.4.9 (GNU/Linux)\n\niEYEABECAAYFAkliqx8ACgkQqSMmLy9u/kcx5ACfakZ9NnPl02tOyYP6pkBoEkU1\n5EcAn0UFgokaSvS371Ym/4W9iJj6vh3h\n=ql7y\n-----END PGP SIGNATURE-----\n') def test_read_commit_from_file(self): sha = '60dacdc733de308bb77bb76ce0fb0f9b44c9769e' c = self.commit(sha) self.assertEqual(c.tree, tree_sha) self.assertEqual(c.parents, ['0d89f20333fbb1d2f3a94da77f4981373d8f4310']) self.assertEqual(c.author, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.committer, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.commit_time, 1174759230) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, 'Test commit\n') def test_read_commit_no_parents(self): sha = '0d89f20333fbb1d2f3a94da77f4981373d8f4310' c = self.commit(sha) self.assertEqual(c.tree, '90182552c4a85a45ec2a835cadc3451bebdfe870') self.assertEqual(c.parents, []) self.assertEqual(c.author, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.committer, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.commit_time, 1174758034) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, 'Test commit\n') def test_read_commit_two_parents(self): sha = '5dac377bdded4c9aeb8dff595f0faeebcc8498cc' c = self.commit(sha) self.assertEqual(c.tree, 'd80c186a03f423a81b39df39dc87fd269736ca86') self.assertEqual(c.parents, ['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', '4cffe90e0a41ad3f5190079d7c8f036bde29cbe6']) self.assertEqual(c.author, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.committer, 'James Westby <jw+debian@jameswestby.net>') self.assertEqual(c.commit_time, 1174773719) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, 'Merge ../b\n') def test_stub_sha(self): sha = '5' * 40 c = make_commit(id=sha, message='foo') self.assertTrue(isinstance(c, Commit)) self.assertEqual(sha, c.id) self.assertNotEqual(sha, c._make_sha()) class ShaFileCheckTests(TestCase): def assertCheckFails(self, cls, data): obj = cls() def do_check(): obj.set_raw_string(data) obj.check() self.assertRaises(ObjectFormatException, do_check) def assertCheckSucceeds(self, cls, data): obj = cls() obj.set_raw_string(data) self.assertEqual(None, obj.check()) small_buffer_zlib_object = ( "\x48\x89\x15\xcc\x31\x0e\xc2\x30\x0c\x40\x51\xe6" "\x9c\xc2\x3b\xaa\x64\x37\xc4\xc1\x12\x42\x5c\xc5" "\x49\xac\x52\xd4\x92\xaa\x78\xe1\xf6\x94\xed\xeb" "\x0d\xdf\x75\x02\xa2\x7c\xea\xe5\x65\xd5\x81\x8b" "\x9a\x61\xba\xa0\xa9\x08\x36\xc9\x4c\x1a\xad\x88" "\x16\xba\x46\xc4\xa8\x99\x6a\x64\xe1\xe0\xdf\xcd" "\xa0\xf6\x75\x9d\x3d\xf8\xf1\xd0\x77\xdb\xfb\xdc" "\x86\xa3\x87\xf1\x2f\x93\xed\x00\xb7\xc7\xd2\xab" "\x2e\xcf\xfe\xf1\x3b\x50\xa4\x91\x53\x12\x24\x38" "\x23\x21\x86\xf0\x03\x2f\x91\x24\x52" ) class ShaFileTests(TestCase): def test_deflated_smaller_window_buffer(self): # zlib on some systems uses smaller buffers, # resulting in a different header. # See https://github.com/libgit2/libgit2/pull/464 sf = ShaFile.from_file(StringIO(small_buffer_zlib_object)) self.assertEqual(sf.type_name, "tag") self.assertEqual(sf.tagger, " <@localhost>") class CommitSerializationTests(TestCase): def make_commit(self, **kwargs): attrs = {'tree': 'd80c186a03f423a81b39df39dc87fd269736ca86', 'parents': ['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', '4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], 'author': 'James Westby <jw+debian@jameswestby.net>', 'committer': 'James Westby <jw+debian@jameswestby.net>', 'commit_time': 1174773719, 'author_time': 1174773719, 'commit_timezone': 0, 'author_timezone': 0, 'message': 'Merge ../b\n'} attrs.update(kwargs) return make_commit(**attrs) def test_encoding(self): c = self.make_commit(encoding='iso8859-1') self.assertTrue('encoding iso8859-1\n' in c.as_raw_string()) def test_short_timestamp(self): c = self.make_commit(commit_time=30) c1 = Commit() c1.set_raw_string(c.as_raw_string()) self.assertEqual(30, c1.commit_time) def test_raw_length(self): c = self.make_commit() self.assertEqual(len(c.as_raw_string()), c.raw_length()) def test_simple(self): c = self.make_commit() self.assertEqual(c.id, '5dac377bdded4c9aeb8dff595f0faeebcc8498cc') self.assertEqual( 'tree d80c186a03f423a81b39df39dc87fd269736ca86\n' 'parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd\n' 'parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6\n' 'author James Westby <jw+debian@jameswestby.net> ' '1174773719 +0000\n' 'committer James Westby <jw+debian@jameswestby.net> ' '1174773719 +0000\n' '\n' 'Merge ../b\n', c.as_raw_string()) def test_timezone(self): c = self.make_commit(commit_timezone=(5 * 60)) self.assertTrue(" +0005\n" in c.as_raw_string()) def test_neg_timezone(self): c = self.make_commit(commit_timezone=(-1 * 3600)) self.assertTrue(" -0100\n" in c.as_raw_string()) def test_deserialize(self): c = self.make_commit() d = Commit() d._deserialize(c.as_raw_chunks()) self.assertEqual(c, d) default_committer = 'James Westby <jw+debian@jameswestby.net> 1174773719 +0000' class CommitParseTests(ShaFileCheckTests): def make_commit_lines(self, tree='d80c186a03f423a81b39df39dc87fd269736ca86', parents=['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', '4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], author=default_committer, committer=default_committer, encoding=None, message='Merge ../b\n', extra=None): lines = [] if tree is not None: lines.append('tree %s' % tree) if parents is not None: lines.extend('parent %s' % p for p in parents) if author is not None: lines.append('author %s' % author) if committer is not None: lines.append('committer %s' % committer) if encoding is not None: lines.append('encoding %s' % encoding) if extra is not None: for name, value in sorted(extra.iteritems()): lines.append('%s %s' % (name, value)) lines.append('') if message is not None: lines.append(message) return lines def make_commit_text(self, **kwargs): return '\n'.join(self.make_commit_lines(**kwargs)) def test_simple(self): c = Commit.from_string(self.make_commit_text()) self.assertEqual('Merge ../b\n', c.message) self.assertEqual('James Westby <jw+debian@jameswestby.net>', c.author) self.assertEqual('James Westby <jw+debian@jameswestby.net>', c.committer) self.assertEqual('d80c186a03f423a81b39df39dc87fd269736ca86', c.tree) self.assertEqual(['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', '4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], c.parents) expected_time = datetime.datetime(2007, 3, 24, 22, 1, 59) self.assertEqual(expected_time, datetime.datetime.utcfromtimestamp(c.commit_time)) self.assertEqual(0, c.commit_timezone) self.assertEqual(expected_time, datetime.datetime.utcfromtimestamp(c.author_time)) self.assertEqual(0, c.author_timezone) self.assertEqual(None, c.encoding) def test_custom(self): c = Commit.from_string(self.make_commit_text( extra={'extra-field': 'data'})) self.assertEqual([('extra-field', 'data')], c.extra) def test_encoding(self): c = Commit.from_string(self.make_commit_text(encoding='UTF-8')) self.assertEqual('UTF-8', c.encoding) def test_check(self): self.assertCheckSucceeds(Commit, self.make_commit_text()) self.assertCheckSucceeds(Commit, self.make_commit_text(parents=None)) self.assertCheckSucceeds(Commit, self.make_commit_text(encoding='UTF-8')) self.assertCheckFails(Commit, self.make_commit_text(tree='xxx')) self.assertCheckFails(Commit, self.make_commit_text( parents=[a_sha, 'xxx'])) bad_committer = "some guy without an email address 1174773719 +0000" self.assertCheckFails(Commit, self.make_commit_text(committer=bad_committer)) self.assertCheckFails(Commit, self.make_commit_text(author=bad_committer)) self.assertCheckFails(Commit, self.make_commit_text(author=None)) self.assertCheckFails(Commit, self.make_commit_text(committer=None)) self.assertCheckFails(Commit, self.make_commit_text( author=None, committer=None)) def test_check_duplicates(self): # duplicate each of the header fields for i in xrange(5): lines = self.make_commit_lines(parents=[a_sha], encoding='UTF-8') lines.insert(i, lines[i]) text = '\n'.join(lines) if lines[i].startswith('parent'): # duplicate parents are ok for now self.assertCheckSucceeds(Commit, text) else: self.assertCheckFails(Commit, text) def test_check_order(self): lines = self.make_commit_lines(parents=[a_sha], encoding='UTF-8') headers = lines[:5] rest = lines[5:] # of all possible permutations, ensure only the original succeeds for perm in permutations(headers): perm = list(perm) text = '\n'.join(perm + rest) if perm == headers: self.assertCheckSucceeds(Commit, text) else: self.assertCheckFails(Commit, text) _TREE_ITEMS = { 'a.c': (0100755, 'd80c186a03f423a81b39df39dc87fd269736ca86'), 'a': (stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), 'a/c': (stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), } _SORTED_TREE_ITEMS = [ TreeEntry('a.c', 0100755, 'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry('a', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry('a/c', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), ] class TreeTests(ShaFileCheckTests): def test_add(self): myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86" x = Tree() x.add("myname", 0100755, myhexsha) self.assertEqual(x["myname"], (0100755, myhexsha)) self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_add_old_order(self): myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86" x = Tree() warnings.simplefilter("ignore", DeprecationWarning) try: x.add(0100755, "myname", myhexsha) finally: warnings.resetwarnings() self.assertEqual(x["myname"], (0100755, myhexsha)) self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_simple(self): myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86" x = Tree() x["myname"] = (0100755, myhexsha) self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_tree_update_id(self): x = Tree() x["a.c"] = (0100755, "d80c186a03f423a81b39df39dc87fd269736ca86") self.assertEqual("0c5c6bc2c081accfbc250331b19e43b904ab9cdd", x.id) x["a.b"] = (stat.S_IFDIR, "d80c186a03f423a81b39df39dc87fd269736ca86") self.assertEqual("07bfcb5f3ada15bbebdfa3bbb8fd858a363925c8", x.id) def test_tree_iteritems_dir_sort(self): x = Tree() for name, item in _TREE_ITEMS.iteritems(): x[name] = item self.assertEqual(_SORTED_TREE_ITEMS, list(x.iteritems())) def test_tree_items_dir_sort(self): x = Tree() for name, item in _TREE_ITEMS.iteritems(): x[name] = item self.assertEqual(_SORTED_TREE_ITEMS, x.items()) def _do_test_parse_tree(self, parse_tree): dir = os.path.join(os.path.dirname(__file__), 'data', 'trees') o = Tree.from_path(hex_to_filename(dir, tree_sha)) self.assertEqual([('a', 0100644, a_sha), ('b', 0100644, b_sha)], list(parse_tree(o.as_raw_string()))) # test a broken tree that has a leading 0 on the file mode broken_tree = '0100644 foo\0' + hex_to_sha(a_sha) def eval_parse_tree(*args, **kwargs): return list(parse_tree(*args, **kwargs)) self.assertEqual([('foo', 0100644, a_sha)], eval_parse_tree(broken_tree)) self.assertRaises(ObjectFormatException, eval_parse_tree, broken_tree, strict=True) test_parse_tree = functest_builder(_do_test_parse_tree, _parse_tree_py) test_parse_tree_extension = ext_functest_builder(_do_test_parse_tree, parse_tree) def _do_test_sorted_tree_items(self, sorted_tree_items): def do_sort(entries): return list(sorted_tree_items(entries, False)) actual = do_sort(_TREE_ITEMS) self.assertEqual(_SORTED_TREE_ITEMS, actual) self.assertTrue(isinstance(actual[0], TreeEntry)) # C/Python implementations may differ in specific error types, but # should all error on invalid inputs. # For example, the C implementation has stricter type checks, so may # raise TypeError where the Python implementation raises AttributeError. errors = (TypeError, ValueError, AttributeError) self.assertRaises(errors, do_sort, 'foo') self.assertRaises(errors, do_sort, {'foo': (1, 2, 3)}) myhexsha = 'd80c186a03f423a81b39df39dc87fd269736ca86' self.assertRaises(errors, do_sort, {'foo': ('xxx', myhexsha)}) self.assertRaises(errors, do_sort, {'foo': (0100755, 12345)}) test_sorted_tree_items = functest_builder(_do_test_sorted_tree_items, _sorted_tree_items_py) test_sorted_tree_items_extension = ext_functest_builder( _do_test_sorted_tree_items, sorted_tree_items) def _do_test_sorted_tree_items_name_order(self, sorted_tree_items): self.assertEqual([ TreeEntry('a', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry('a.c', 0100755, 'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry('a/c', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'), ], list(sorted_tree_items(_TREE_ITEMS, True))) test_sorted_tree_items_name_order = functest_builder( _do_test_sorted_tree_items_name_order, _sorted_tree_items_py) test_sorted_tree_items_name_order_extension = ext_functest_builder( _do_test_sorted_tree_items_name_order, sorted_tree_items) def test_check(self): t = Tree sha = hex_to_sha(a_sha) # filenames self.assertCheckSucceeds(t, '100644 .a\0%s' % sha) self.assertCheckFails(t, '100644 \0%s' % sha) self.assertCheckFails(t, '100644 .\0%s' % sha) self.assertCheckFails(t, '100644 a/a\0%s' % sha) self.assertCheckFails(t, '100644 ..\0%s' % sha) # modes self.assertCheckSucceeds(t, '100644 a\0%s' % sha) self.assertCheckSucceeds(t, '100755 a\0%s' % sha) self.assertCheckSucceeds(t, '160000 a\0%s' % sha) # TODO more whitelisted modes self.assertCheckFails(t, '123456 a\0%s' % sha) self.assertCheckFails(t, '123abc a\0%s' % sha) # should fail check, but parses ok self.assertCheckFails(t, '0100644 foo\0' + sha) # shas self.assertCheckFails(t, '100644 a\0%s' % ('x' * 5)) self.assertCheckFails(t, '100644 a\0%s' % ('x' * 18 + '\0')) self.assertCheckFails(t, '100644 a\0%s\n100644 b\0%s' % ('x' * 21, sha)) # ordering sha2 = hex_to_sha(b_sha) self.assertCheckSucceeds(t, '100644 a\0%s\n100644 b\0%s' % (sha, sha)) self.assertCheckSucceeds(t, '100644 a\0%s\n100644 b\0%s' % (sha, sha2)) self.assertCheckFails(t, '100644 a\0%s\n100755 a\0%s' % (sha, sha2)) self.assertCheckFails(t, '100644 b\0%s\n100644 a\0%s' % (sha2, sha)) def test_iter(self): t = Tree() t["foo"] = (0100644, a_sha) self.assertEqual(set(["foo"]), set(t)) class TagSerializeTests(TestCase): def test_serialize_simple(self): x = make_object(Tag, tagger='Jelmer Vernooij <jelmer@samba.org>', name='0.1', message='Tag 0.1', object=(Blob, 'd80c186a03f423a81b39df39dc87fd269736ca86'), tag_time=423423423, tag_timezone=0) self.assertEqual(('object d80c186a03f423a81b39df39dc87fd269736ca86\n' 'type blob\n' 'tag 0.1\n' 'tagger Jelmer Vernooij <jelmer@samba.org> ' '423423423 +0000\n' '\n' 'Tag 0.1'), x.as_raw_string()) default_tagger = ('Linus Torvalds <torvalds@woody.linux-foundation.org> ' '1183319674 -0700') default_message = """Linux 2.6.22-rc7 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.7 (GNU/Linux) iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql OK2XeQOiEeXtT76rV4t2WR4= =ivrA -----END PGP SIGNATURE----- """ class TagParseTests(ShaFileCheckTests): def make_tag_lines(self, object_sha="a38d6181ff27824c79fc7df825164a212eff6a3f", object_type_name="commit", name="v2.6.22-rc7", tagger=default_tagger, message=default_message): lines = [] if object_sha is not None: lines.append("object %s" % object_sha) if object_type_name is not None: lines.append("type %s" % object_type_name) if name is not None: lines.append("tag %s" % name) if tagger is not None: lines.append("tagger %s" % tagger) lines.append("") if message is not None: lines.append(message) return lines def make_tag_text(self, **kwargs): return "\n".join(self.make_tag_lines(**kwargs)) def test_parse(self): x = Tag() x.set_raw_string(self.make_tag_text()) self.assertEqual( "Linus Torvalds <torvalds@woody.linux-foundation.org>", x.tagger) self.assertEqual("v2.6.22-rc7", x.name) object_type, object_sha = x.object self.assertEqual("a38d6181ff27824c79fc7df825164a212eff6a3f", object_sha) self.assertEqual(Commit, object_type) self.assertEqual(datetime.datetime.utcfromtimestamp(x.tag_time), datetime.datetime(2007, 7, 1, 19, 54, 34)) self.assertEqual(-25200, x.tag_timezone) def test_parse_no_tagger(self): x = Tag() x.set_raw_string(self.make_tag_text(tagger=None)) self.assertEqual(None, x.tagger) self.assertEqual("v2.6.22-rc7", x.name) def test_check(self): self.assertCheckSucceeds(Tag, self.make_tag_text()) self.assertCheckFails(Tag, self.make_tag_text(object_sha=None)) self.assertCheckFails(Tag, self.make_tag_text(object_type_name=None)) self.assertCheckFails(Tag, self.make_tag_text(name=None)) self.assertCheckFails(Tag, self.make_tag_text(name='')) self.assertCheckFails(Tag, self.make_tag_text( object_type_name="foobar")) self.assertCheckFails(Tag, self.make_tag_text( tagger="some guy without an email address 1183319674 -0700")) self.assertCheckFails(Tag, self.make_tag_text( tagger=("Linus Torvalds <torvalds@woody.linux-foundation.org> " "Sun 7 Jul 2007 12:54:34 +0700"))) self.assertCheckFails(Tag, self.make_tag_text(object_sha="xxx")) def test_check_duplicates(self): # duplicate each of the header fields for i in xrange(4): lines = self.make_tag_lines() lines.insert(i, lines[i]) self.assertCheckFails(Tag, '\n'.join(lines)) def test_check_order(self): lines = self.make_tag_lines() headers = lines[:4] rest = lines[4:] # of all possible permutations, ensure only the original succeeds for perm in permutations(headers): perm = list(perm) text = '\n'.join(perm + rest) if perm == headers: self.assertCheckSucceeds(Tag, text) else: self.assertCheckFails(Tag, text) class CheckTests(TestCase): def test_check_hexsha(self): check_hexsha(a_sha, "failed to check good sha") self.assertRaises(ObjectFormatException, check_hexsha, '1' * 39, 'sha too short') self.assertRaises(ObjectFormatException, check_hexsha, '1' * 41, 'sha too long') self.assertRaises(ObjectFormatException, check_hexsha, 'x' * 40, 'invalid characters') def test_check_identity(self): check_identity("Dave Borowitz <dborowitz@google.com>", "failed to check good identity") check_identity("<dborowitz@google.com>", "failed to check good identity") self.assertRaises(ObjectFormatException, check_identity, "Dave Borowitz", "no email") self.assertRaises(ObjectFormatException, check_identity, "Dave Borowitz <dborowitz", "incomplete email") self.assertRaises(ObjectFormatException, check_identity, "dborowitz@google.com>", "incomplete email") self.assertRaises(ObjectFormatException, check_identity, "Dave Borowitz <<dborowitz@google.com>", "typo") self.assertRaises(ObjectFormatException, check_identity, "Dave Borowitz <dborowitz@google.com>>", "typo") self.assertRaises(ObjectFormatException, check_identity, "Dave Borowitz <dborowitz@google.com>xxx", "trailing characters") class TimezoneTests(TestCase): def test_parse_timezone_utc(self): self.assertEqual((0, False), parse_timezone("+0000")) def test_parse_timezone_utc_negative(self): self.assertEqual((0, True), parse_timezone("-0000")) def test_generate_timezone_utc(self): self.assertEqual("+0000", format_timezone(0)) def test_generate_timezone_utc_negative(self): self.assertEqual("-0000", format_timezone(0, True)) def test_parse_timezone_cet(self): self.assertEqual((60 * 60, False), parse_timezone("+0100")) def test_format_timezone_cet(self): self.assertEqual("+0100", format_timezone(60 * 60)) def test_format_timezone_pdt(self): self.assertEqual("-0400", format_timezone(-4 * 60 * 60)) def test_parse_timezone_pdt(self): self.assertEqual((-4 * 60 * 60, False), parse_timezone("-0400")) def test_format_timezone_pdt_half(self): self.assertEqual("-0440", format_timezone(int(((-4 * 60) - 40) * 60))) def test_format_timezone_double_negative(self): self.assertEqual("--700", format_timezone(int(((7 * 60)) * 60), True)) def test_parse_timezone_pdt_half(self): self.assertEqual((((-4 * 60) - 40) * 60, False), parse_timezone("-0440")) def test_parse_timezone_double_negative(self): self.assertEqual( (int(((7 * 60)) * 60), False), parse_timezone("+700")) self.assertEqual( (int(((7 * 60)) * 60), True), parse_timezone("--700"))
vbannai/disk-qos-horizon
refs/heads/master
horizon/dashboards/syspanel/networks/subnets/views.py
2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import api from horizon import exceptions from horizon import forms from .forms import CreateSubnet, UpdateSubnet LOG = logging.getLogger(__name__) class CreateView(forms.ModalFormView): form_class = CreateSubnet template_name = 'syspanel/networks/subnets/create.html' success_url = 'horizon:syspanel:networks:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['network_id'],)) def get_object(self): if not hasattr(self, "_object"): try: network_id = self.kwargs["network_id"] self._object = api.quantum.network_get(self.request, network_id) except: redirect = reverse('horizon:nova:networks:index') msg = _("Unable to retrieve network.") exceptions.handle(self.request, msg, redirect=redirect) return self._object def get_context_data(self, **kwargs): context = super(CreateView, self).get_context_data(**kwargs) context['network'] = self.get_object() return context def get_initial(self): network = self.get_object() return {"network_id": self.kwargs['network_id'], "network_name": network.name} class UpdateView(forms.ModalFormView): form_class = UpdateSubnet template_name = 'syspanel/networks/subnets/update.html' context_object_name = 'subnet' success_url = 'horizon:syspanel:networks:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['network_id'],)) def _get_object(self, *args, **kwargs): if not hasattr(self, "_object"): subnet_id = self.kwargs['subnet_id'] try: self._object = api.quantum.subnet_get(self.request, subnet_id) except: redirect = reverse("horizon:syspanel:networks:detail", args=(self.kwargs['network_id'],)) msg = _('Unable to retrieve subnet details') exceptions.handle(self.request, msg, redirect=redirect) return self._object def get_context_data(self, **kwargs): context = super(UpdateView, self).get_context_data(**kwargs) subnet = self._get_object() context['subnet_id'] = subnet['id'] context['network_id'] = subnet['network_id'] context['cidr'] = subnet['cidr'] context['ip_version'] = {4: 'IPv4', 6: 'IPv6'}[subnet['ip_version']] return context def get_initial(self): subnet = self._get_object() return {'network_id': self.kwargs['network_id'], 'subnet_id': subnet['id'], 'tenant_id': subnet['tenant_id'], 'cidr': subnet['cidr'], 'ip_version': subnet['ip_version'], 'name': subnet['name'], 'gateway_ip': subnet['gateway_ip']}
WANdisco/hive
refs/heads/trunk
service/lib/py/fb303_scripts/__init__.py
214
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # __all__ = ['fb303_simple_mgmt']
moreati/django
refs/heads/master
tests/forms_tests/tests/test_regressions.py
135
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.forms import ( CharField, ChoiceField, Form, HiddenInput, IntegerField, ModelForm, ModelMultipleChoiceField, MultipleChoiceField, RadioSelect, Select, TextInput, ) from django.test import TestCase, ignore_warnings from django.utils import translation from django.utils.translation import gettext_lazy, ugettext_lazy from ..models import Cheese class FormsRegressionsTestCase(TestCase): def test_class(self): # Tests to prevent against recurrences of earlier bugs. extra_attrs = {'class': 'special'} class TestForm(Form): f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs)) f2 = CharField(widget=TextInput(attrs=extra_attrs)) self.assertHTMLEqual( TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n' '<p>F2: <input type="text" class="special" name="f2" /></p>' ) def test_regression_3600(self): # Tests for form i18n # # There were some problems with form translations in #3600 class SomeForm(Form): username = CharField(max_length=10, label=ugettext_lazy('username')) f = SomeForm() self.assertHTMLEqual( f.as_p(), '<p><label for="id_username">username:</label>' '<input id="id_username" type="text" name="username" maxlength="10" /></p>' ) # Translations are done at rendering time, so multi-lingual apps can define forms) with translation.override('de'): self.assertHTMLEqual( f.as_p(), '<p><label for="id_username">Benutzername:</label>' '<input id="id_username" type="text" name="username" maxlength="10" /></p>' ) with translation.override('pl'): self.assertHTMLEqual( f.as_p(), '<p><label for="id_username">u\u017cytkownik:</label>' '<input id="id_username" type="text" name="username" maxlength="10" /></p>' ) def test_regression_5216(self): # There was some problems with form translations in #5216 class SomeForm(Form): field_1 = CharField(max_length=10, label=ugettext_lazy('field_1')) field_2 = CharField( max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}), ) f = SomeForm() self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>') self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>') # Unicode decoding problems... GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen')) class SomeForm(Form): somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf') f = SomeForm() self.assertHTMLEqual( f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label>' '<ul id="id_somechoice">\n' '<li><label for="id_somechoice_0">' '<input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> ' 'En tied\xe4</label></li>\n' '<li><label for="id_somechoice_1">' '<input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> ' 'Mies</label></li>\n<li><label for="id_somechoice_2">' '<input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> ' 'Nainen</label></li>\n</ul></p>' ) # Translated error messages used to be buggy. with translation.override('ru'): f = SomeForm({}) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist"><li>' '\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c' '\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n' '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label>' ' <ul id="id_somechoice">\n<li><label for="id_somechoice_0">' '<input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> ' 'En tied\xe4</label></li>\n' '<li><label for="id_somechoice_1">' '<input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> ' 'Mies</label></li>\n<li><label for="id_somechoice_2">' '<input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> ' 'Nainen</label></li>\n</ul></p>' ) # Deep copying translated text shouldn't raise an error) class CopyForm(Form): degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),))) f = CopyForm() @ignore_warnings(category=UnicodeWarning) def test_regression_5216_b(self): # Testing choice validation with UTF-8 bytestrings as input (these are the # Russian abbreviations "мес." and "шт.". UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'), (b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.')) f = ChoiceField(choices=UNITS) self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.') self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.') def test_misc(self): # There once was a problem with Form fields called "data". Let's make sure that # doesn't come back. class DataForm(Form): data = CharField(max_length=10) f = DataForm({'data': 'xyzzy'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data, {'data': 'xyzzy'}) # A form with *only* hidden fields that has errors is going to be very unusual. class HiddenForm(Form): data = IntegerField(widget=HiddenInput) f = HiddenForm({}) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>\n<p> ' '<input type="hidden" name="data" id="id_data" /></p>' ) self.assertHTMLEqual( f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield">' '<li>(Hidden field data) This field is required.</li></ul>' '<input type="hidden" name="data" id="id_data" /></td></tr>' ) def test_xss_error_messages(self): ################################################### # Tests for XSS vulnerabilities in error messages # ################################################### # The forms layer doesn't escape input values directly because error messages # might be presented in non-HTML contexts. Instead, the message is just marked # for escaping by the template engine. So we'll need to construct a little # silly template to trigger the escaping. from django.template import Template, Context t = Template('{{ form.errors }}') class SomeForm(Form): field = ChoiceField(choices=[('one', 'One')]) f = SomeForm({'field': '<script>'}) self.assertHTMLEqual( t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist">' '<li>Select a valid choice. &lt;script&gt; is not one of the ' 'available choices.</li></ul></li></ul>' ) class SomeForm(Form): field = MultipleChoiceField(choices=[('one', 'One')]) f = SomeForm({'field': ['<script>']}) self.assertHTMLEqual( t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist">' '<li>Select a valid choice. &lt;script&gt; is not one of the ' 'available choices.</li></ul></li></ul>' ) from forms_tests.models import ChoiceModel class SomeForm(Form): field = ModelMultipleChoiceField(ChoiceModel.objects.all()) f = SomeForm({'field': ['<script>']}) self.assertHTMLEqual( t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist">' '<li>&quot;&lt;script&gt;&quot; is not a valid value for a ' 'primary key.</li></ul></li></ul>' ) def test_regression_14234(self): """ Re-cleaning an instance that was added via a ModelForm should not raise a pk uniqueness error. """ class CheeseForm(ModelForm): class Meta: model = Cheese fields = '__all__' form = CheeseForm({ 'name': 'Brie', }) self.assertTrue(form.is_valid()) obj = form.save() obj.name = 'Camembert' obj.full_clean()
bspink/django
refs/heads/master
tests/auth_tests/test_context_processors.py
269
import datetime from django.contrib.auth import authenticate from django.contrib.auth.context_processors import PermLookupDict, PermWrapper from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.db.models import Q from django.test import SimpleTestCase, TestCase, override_settings from .settings import AUTH_MIDDLEWARE_CLASSES, AUTH_TEMPLATES class MockUser(object): def has_module_perms(self, perm): if perm == 'mockapp': return True return False def has_perm(self, perm): if perm == 'mockapp.someperm': return True return False class PermWrapperTests(SimpleTestCase): """ Test some details of the PermWrapper implementation. """ class EQLimiterObject(object): """ This object makes sure __eq__ will not be called endlessly. """ def __init__(self): self.eq_calls = 0 def __eq__(self, other): if self.eq_calls > 0: return True self.eq_calls += 1 return False def test_permwrapper_in(self): """ Test that 'something' in PermWrapper works as expected. """ perms = PermWrapper(MockUser()) # Works for modules and full permissions. self.assertIn('mockapp', perms) self.assertNotIn('nonexisting', perms) self.assertIn('mockapp.someperm', perms) self.assertNotIn('mockapp.nonexisting', perms) def test_permlookupdict_in(self): """ No endless loops if accessed with 'in' - refs #18979. """ pldict = PermLookupDict(MockUser(), 'mockapp') with self.assertRaises(TypeError): self.EQLimiterObject() in pldict @override_settings( PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'], ROOT_URLCONF='auth_tests.urls', TEMPLATES=AUTH_TEMPLATES, USE_TZ=False, # required for loading the fixture ) class AuthContextProcessorTests(TestCase): """ Tests for the ``django.contrib.auth.context_processors.auth`` processor """ @classmethod def setUpTestData(cls): # password = "secret" cls.u1 = User.objects.create( id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super', first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10) ) @override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES) def test_session_not_accessed(self): """ Tests that the session is not accessed simply by including the auth context processor """ response = self.client.get('/auth_processor_no_attr_access/') self.assertContains(response, "Session not accessed") @override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES) def test_session_is_accessed(self): """ Tests that the session is accessed if the auth context processor is used and relevant attributes accessed. """ response = self.client.get('/auth_processor_attr_access/') self.assertContains(response, "Session accessed") def test_perms_attrs(self): u = User.objects.create_user(username='normal', password='secret') u.user_permissions.add( Permission.objects.get( content_type=ContentType.objects.get_for_model(Permission), codename='add_permission')) self.client.login(username='normal', password='secret') response = self.client.get('/auth_processor_perms/') self.assertContains(response, "Has auth permissions") self.assertContains(response, "Has auth.add_permission permissions") self.assertNotContains(response, "nonexisting") def test_perm_in_perms_attrs(self): u = User.objects.create_user(username='normal', password='secret') u.user_permissions.add( Permission.objects.get( content_type=ContentType.objects.get_for_model(Permission), codename='add_permission')) self.client.login(username='normal', password='secret') response = self.client.get('/auth_processor_perm_in_perms/') self.assertContains(response, "Has auth permissions") self.assertContains(response, "Has auth.add_permission permissions") self.assertNotContains(response, "nonexisting") def test_message_attrs(self): self.client.login(username='super', password='secret') response = self.client.get('/auth_processor_messages/') self.assertContains(response, "Message 1") def test_user_attrs(self): """ Test that the lazy objects returned behave just like the wrapped objects. """ # These are 'functional' level tests for common use cases. Direct # testing of the implementation (SimpleLazyObject) is in the 'utils' # tests. self.client.login(username='super', password='secret') user = authenticate(username='super', password='secret') response = self.client.get('/auth_processor_user/') self.assertContains(response, "unicode: super") self.assertContains(response, "id: 100") self.assertContains(response, "username: super") # bug #12037 is tested by the {% url %} in the template: self.assertContains(response, "url: /userpage/super/") # See if this object can be used for queries where a Q() comparing # a user can be used with another Q() (in an AND or OR fashion). # This simulates what a template tag might do with the user from the # context. Note that we don't need to execute a query, just build it. # # The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped # User is a fatal TypeError: "function() takes at least 2 arguments # (0 given)" deep inside deepcopy(). # # Python 2.5 and 2.6 succeeded, but logged internally caught exception # spew: # # Exception RuntimeError: 'maximum recursion depth exceeded while # calling a Python object' in <type 'exceptions.AttributeError'> # ignored" Q(user=response.context['user']) & Q(someflag=True) # Tests for user equality. This is hard because User defines # equality in a non-duck-typing way # See bug #12060 self.assertEqual(response.context['user'], user) self.assertEqual(user, response.context['user'])
westinedu/sovleit
refs/heads/master
django/conf/locale/lt/formats.py
332
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'Y \m. F j \d.' TIME_FORMAT = 'H:i:s' # DATETIME_FORMAT = # YEAR_MONTH_FORMAT = # MONTH_DAY_FORMAT = SHORT_DATE_FORMAT = 'Y.m.d' # SHORT_DATETIME_FORMAT = # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' # NUMBER_GROUPING =
tgoodyear/dpkt
refs/heads/master
tests/test-perf.py
24
#!/usr/bin/env python import time, sys import dnet sys.path.insert(0, '.') import dpkt from impacket import ImpactDecoder, ImpactPacket from openbsd import packet import scapy import xstruct xip = xstruct.structdef('>', [ ('v_hl', ('B', 1), (4 << 4) | (dnet.IP_HDR_LEN >> 2)), ('tos', ('B', 1), dnet.IP_TOS_DEFAULT), ('len', ('H', 1), dnet.IP_HDR_LEN), ('id', ('H', 1), 0), ('off', ('H', 1), 0), ('ttl', ('B', 1), dnet.IP_TTL_DEFAULT), ('p', ('B', 1), 0), ('sum', ('H', 1), 0), ('src', ('s', dnet.IP_ADDR_LEN), dnet.IP_ADDR_ANY), ('dst', ('s', dnet.IP_ADDR_LEN), dnet.IP_ADDR_ANY) ]) xudp = xstruct.structdef('>', [ ('sport', ('B', 1), 0), ('dport', ('B', 1), 0), ('ulen', ('H', 1), dnet.UDP_HDR_LEN), ('sum', ('H', 1), 0) ]) def compare_create(cnt): """ dpkt: 14915.2445937 pps dpkt (manual): 15494.3632903 pps impacket: 3929.30572776 pps openbsd.packet: 1503.7928579 pps scapy: 348.449269721 pps xstruct: 88314.8953732 pps """ src = dnet.addr('1.2.3.4').ip dst = dnet.addr('5.6.7.8').ip data = 'hello world' start = time.time() for i in xrange(cnt): dnet.ip_checksum( str(dpkt.ip.IP(src=src, dst=dst, p=dnet.IP_PROTO_UDP, len = dnet.IP_HDR_LEN + dnet.UDP_HDR_LEN + len(data), data=dpkt.udp.UDP(sport=111, dport=222, ulen=dnet.UDP_HDR_LEN + len(data), data=data)))) print 'dpkt:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): dnet.ip_checksum(str(dpkt.ip.IP(src=src, dst=dst, p=dnet.IP_PROTO_UDP, len=dnet.IP_HDR_LEN + dnet.UDP_HDR_LEN + len(data))) + str(dpkt.udp.UDP(sport=111, dport=222, ulen=dnet.UDP_HDR_LEN + len(data))) + data) print 'dpkt (manual):', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): ip = ImpactPacket.IP() ip.set_ip_src('1.2.3.4') ip.set_ip_dst('5.6.7.8') udp = ImpactPacket.UDP() udp.set_uh_sport(111) udp.set_uh_dport(222) udp.contains(ImpactPacket.Data(data)) ip.contains(udp) ip.get_packet() print 'impacket:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): p = packet.createPacket(packet.IP, packet.UDP) p['ip'].src = '1.2.3.4' p['ip'].dst = '5.6.7.8' p['udp'].sport = 111 p['udp'].dport = 22 p['udp'].payload = data p.finalise() p.getRaw() print 'openbsd.packet:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): ip = scapy.IP(src='1.2.3.4', dst='5.6.7.8') / \ scapy.UDP(sport=111, dport=222) / data ip.build() print 'scapy:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): udp = xudp() udp.sport = 111 udp.dport = 222 udp.ulen = dnet.UDP_HDR_LEN + len(data) ip = xip() ip.src = src ip.dst = dst ip.p = dnet.IP_PROTO_UDP ip.len = dnet.IP_HDR_LEN + udp.ulen dnet.ip_checksum(str(ip) + str(udp) + data) print 'xstruct:', cnt / (time.time() - start), 'pps' def compare_parse(cnt): """ dpkt: 23347.462887 pps impacket: 9937.75963595 pps openbsd.packet: 6826.5955563 pps scapy: 1461.74727127 pps xstruct: 206100.202449 pps """ s = 'E\x00\x00T\xc2\xf3\x00\x00\xff\x01\xe2\x18\n\x00\x01\x92\n\x00\x01\x0b\x08\x00\xfc\x11:g\x00\x00A,\xc66\x00\x0e\xcf\x12\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f!"#$%&\'()*+,-./01234567' start = time.time() for i in xrange(cnt): dpkt.ip.IP(s) print 'dpkt:', cnt / (time.time() - start), 'pps' decoder = ImpactDecoder.IPDecoder() start = time.time() for i in xrange(cnt): decoder.decode(s) print 'impacket:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): packet.Packet(packet.IP, s) print 'openbsd.packet:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): scapy.IP(s) print 'scapy:', cnt / (time.time() - start), 'pps' start = time.time() for i in xrange(cnt): ip = xip(s[:dnet.IP_HDR_LEN]) udp = xudp(s[dnet.IP_HDR_LEN:dnet.IP_HDR_LEN + dnet.UDP_HDR_LEN]) data = s[dnet.IP_HDR_LEN + dnet.UDP_HDR_LEN:] print 'xstruct:', cnt / (time.time() - start), 'pps' def compare_checksum(cnt): s = 'A' * 80 start = time.time() for i in range(cnt): dpkt.in_cksum(s) print 'dpkt.in_cksum:', cnt / (time.time() - start), 'pps' start = time.time() for i in range(cnt): dnet.ip_cksum_carry(dnet.ip_cksum_add(s, 0)) print 'dnet.ip_cksum_add/carry:', cnt / (time.time() - start), 'pps' def main(): import psyco psyco.full() ITER=10000 print 'checksum:' compare_checksum(100000) print 'create:' compare_create(ITER) print 'parse:' compare_parse(ITER) if __name__ == '__main__': main() """ import hotshot, hotshot.stats prof = hotshot.Profile('/var/tmp/dpkt.prof') prof.runcall(main) prof.close() stats = hotshot.stats.load('/var/tmp/dpkt.prof') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(20) """
whitepyro/debian_server_setup
refs/heads/master
lib/pysrt/srtexc.py
94
""" Exception classes """ class Error(Exception): """ Pysrt's base exception """ pass class InvalidTimeString(Error): """ Raised when parser fail on bad formated time strings """ pass class InvalidItem(Error): """ Raised when parser fail to parse a sub title item """ pass class InvalidIndex(InvalidItem): """ Raised when parser fail to parse a sub title index """ pass
youprofit/servo
refs/heads/master
tests/wpt/web-platform-tests/XMLHttpRequest/resources/infinite-redirects.py
228
def main(request, response): location = "%s://%s:%s/%s" % (request.url_parts.scheme, request.url_parts.netloc, request.url_parts.port, request.url_parts.path) page = "alternate"; type = 302; mix = 0; if request.GET.first("page", None) == "alternate": page = "default" if request.GET.first("type", None) == "301": type = 301 if request.GET.first("mix", None) == "1": mix = 1 type = 302 if type == 301 else 301 new_location = "%s?page=%s&type=%s&mix=%s" % (location, page, type, mix) headers = [("Cache-Control", "no-cache"), ("Pragma", "no-cache"), ("Location", new_location)] return 301, headers, "Hello guest. You have been redirected to " + new_location
jonberliner/keras
refs/heads/master
keras/utils/theano_utils.py
19
from __future__ import absolute_import import numpy as np import theano import theano.tensor as T def floatX(X): return np.asarray(X, dtype=theano.config.floatX) def sharedX(X, dtype=theano.config.floatX, name=None): return theano.shared(np.asarray(X, dtype=dtype), name=name) def shared_zeros(shape, dtype=theano.config.floatX, name=None): return sharedX(np.zeros(shape), dtype=dtype, name=name) def shared_scalar(val=0., dtype=theano.config.floatX, name=None): return theano.shared(np.cast[dtype](val)) def shared_ones(shape, dtype=theano.config.floatX, name=None): return sharedX(np.ones(shape), dtype=dtype, name=name) def alloc_zeros_matrix(*dims): return T.alloc(np.cast[theano.config.floatX](0.), *dims)
amol-/depot
refs/heads/master
depot/io/memory.py
1
""" Provides MemoryStorage implementation to keep files in memory. This is useful for caches or tests. """ import uuid import io from datetime import datetime from .interfaces import FileStorage, StoredFile from . import utils from .._compat import unicode_text class MemoryStoredFile(StoredFile): def __init__(self, files, file_id, file_data): _check_file_id(file_id) self._files = files self._files_key = file_id self._file = None super(MemoryStoredFile, self).__init__(file_id=file_id, **file_data['metadata']) def read(self, n=-1): if self._file is None: self._file = io.BytesIO(self._files[self._files_key]['data']) return self._file.read(n) def close(self): if self._file is None: self._file = io.BytesIO(self._files[self._files_key]['data']) self._file.close() @property def closed(self): if self._file is None: return False return self._file.closed class MemoryFileStorage(FileStorage): """:class:`depot.io.interfaces.FileStorage` implementation that keeps files in memory. This is generally useful for caches and tests. """ def __init__(self, **kwargs): self.files = {} def get(self, file_or_id): fileid = self.fileid(file_or_id) _check_file_id(fileid) try: file_data = self.files[fileid] except KeyError: raise IOError('File %s not existing' % fileid) return MemoryStoredFile(self.files, fileid, file_data) def __save_file(self, file_id, content, filename, content_type=None): if hasattr(content, 'read'): data = content.read() else: if isinstance(content, unicode_text): raise TypeError('Only bytes can be stored, not unicode') data = content self.files[file_id] = { 'data': data, 'metadata': { 'filename': filename or 'unknown', 'content_type': content_type, 'content_length': len(data), 'last_modified': datetime.strptime(utils.timestamp(), '%Y-%m-%d %H:%M:%S') } } def create(self, content, filename=None, content_type=None): new_file_id = str(uuid.uuid1()) content, filename, content_type = self.fileinfo(content, filename, content_type) self.__save_file(new_file_id, content, filename, content_type) return new_file_id def replace(self, file_or_id, content, filename=None, content_type=None): fileid = self.fileid(file_or_id) _check_file_id(fileid) # First check file existed and we are not using replace # as a way to force a specific file id on creation. if fileid not in self.files: raise IOError('File %s not existing' % file_or_id) content, filename, content_type = self.fileinfo(content, filename, content_type, lambda: self.get(fileid)) self.delete(fileid) self.__save_file(fileid, content, filename, content_type) return fileid def delete(self, file_or_id): fileid = self.fileid(file_or_id) _check_file_id(fileid) self.files.pop(fileid, None) def exists(self, file_or_id): fileid = self.fileid(file_or_id) _check_file_id(fileid) return fileid in self.files def list(self): return list(self.files.keys()) def _check_file_id(file_id): # Check that the given file id is valid, this also # prevents unsafe paths. try: uuid.UUID('{%s}' % file_id) except: raise ValueError('Invalid file id %s' % file_id)
Kra1o5/android_kernel_bq_curie2qc
refs/heads/cm-11.0
arch/ia64/scripts/unwcheck.py
13143
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
mohammed-alfatih/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/echo_client.py
442
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Simple WebSocket client named echo_client just because of historical reason. mod_pywebsocket directory must be in PYTHONPATH. Example Usage: # server setup % cd $pywebsocket % PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \ -d $cwd/src/example # run client % PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \ -s localhost \ -o http://localhost -r /echo -m test or # run echo client to test IETF HyBi 00 protocol run with --protocol-version=hybi00 """ import base64 import codecs import logging from optparse import OptionParser import os import random import re import socket import struct import sys from mod_pywebsocket import common from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor from mod_pywebsocket.extensions import _PerMessageDeflateFramer from mod_pywebsocket.extensions import _parse_window_bits from mod_pywebsocket.stream import Stream from mod_pywebsocket.stream import StreamHixie75 from mod_pywebsocket.stream import StreamOptions from mod_pywebsocket import util _TIMEOUT_SEC = 10 _UNDEFINED_PORT = -1 _UPGRADE_HEADER = 'Upgrade: websocket\r\n' _UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n' _CONNECTION_HEADER = 'Connection: Upgrade\r\n' # Special message that tells the echo server to start closing handshake _GOODBYE_MESSAGE = 'Goodbye' _PROTOCOL_VERSION_HYBI13 = 'hybi13' _PROTOCOL_VERSION_HYBI08 = 'hybi08' _PROTOCOL_VERSION_HYBI00 = 'hybi00' _PROTOCOL_VERSION_HIXIE75 = 'hixie75' # Constants for the --tls_module flag. _TLS_BY_STANDARD_MODULE = 'ssl' _TLS_BY_PYOPENSSL = 'pyopenssl' # Values used by the --tls-version flag. _TLS_VERSION_SSL23 = 'ssl23' _TLS_VERSION_SSL3 = 'ssl3' _TLS_VERSION_TLS1 = 'tls1' class ClientHandshakeError(Exception): pass def _build_method_line(resource): return 'GET %s HTTP/1.1\r\n' % resource def _origin_header(header, origin): # 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character, # and the /origin/ value, converted to ASCII lowercase, to /fields/. return '%s: %s\r\n' % (header, origin.lower()) def _format_host_header(host, port, secure): # 4.1 9. Let /hostport/ be an empty string. # 4.1 10. Append the /host/ value, converted to ASCII lowercase, to # /hostport/ hostport = host.lower() # 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/ # is true, and /port/ is not 443, then append a U+003A COLON character # (:) followed by the value of /port/, expressed as a base-ten integer, # to /hostport/ if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT) or (secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)): hostport += ':' + str(port) # 4.1 12. concatenation of the string "Host:", a U+0020 SPACE # character, and /hostport/, to /fields/. return '%s: %s\r\n' % (common.HOST_HEADER, hostport) def _receive_bytes(socket, length): bytes = [] remaining = length while remaining > 0: received_bytes = socket.recv(remaining) if not received_bytes: raise IOError( 'Connection closed before receiving requested length ' '(requested %d bytes but received only %d bytes)' % (length, length - remaining)) bytes.append(received_bytes) remaining -= len(received_bytes) return ''.join(bytes) def _get_mandatory_header(fields, name): """Gets the value of the header specified by name from fields. This function expects that there's only one header with the specified name in fields. Otherwise, raises an ClientHandshakeError. """ values = fields.get(name.lower()) if values is None or len(values) == 0: raise ClientHandshakeError( '%s header not found: %r' % (name, values)) if len(values) > 1: raise ClientHandshakeError( 'Multiple %s headers found: %r' % (name, values)) return values[0] def _validate_mandatory_header(fields, name, expected_value, case_sensitive=False): """Gets and validates the value of the header specified by name from fields. If expected_value is specified, compares expected value and actual value and raises an ClientHandshakeError on failure. You can specify case sensitiveness in this comparison by case_sensitive parameter. This function expects that there's only one header with the specified name in fields. Otherwise, raises an ClientHandshakeError. """ value = _get_mandatory_header(fields, name) if ((case_sensitive and value != expected_value) or (not case_sensitive and value.lower() != expected_value.lower())): raise ClientHandshakeError( 'Illegal value for header %s: %r (expected) vs %r (actual)' % (name, expected_value, value)) class _TLSSocket(object): """Wrapper for a TLS connection.""" def __init__(self, raw_socket, tls_module, tls_version, disable_tls_compression): self._logger = util.get_class_logger(self) if tls_module == _TLS_BY_STANDARD_MODULE: if tls_version == _TLS_VERSION_SSL23: version = ssl.PROTOCOL_SSLv23 elif tls_version == _TLS_VERSION_SSL3: version = ssl.PROTOCOL_SSLv3 elif tls_version == _TLS_VERSION_TLS1: version = ssl.PROTOCOL_TLSv1 else: raise ValueError( 'Invalid --tls-version flag: %r' % tls_version) if disable_tls_compression: raise ValueError( '--disable-tls-compression is not available for ssl ' 'module') self._tls_socket = ssl.wrap_socket(raw_socket, ssl_version=version) # Print cipher in use. Handshake is done on wrap_socket call. self._logger.info("Cipher: %s", self._tls_socket.cipher()) elif tls_module == _TLS_BY_PYOPENSSL: if tls_version == _TLS_VERSION_SSL23: version = OpenSSL.SSL.SSLv23_METHOD elif tls_version == _TLS_VERSION_SSL3: version = OpenSSL.SSL.SSLv3_METHOD elif tls_version == _TLS_VERSION_TLS1: version = OpenSSL.SSL.TLSv1_METHOD else: raise ValueError( 'Invalid --tls-version flag: %r' % tls_version) context = OpenSSL.SSL.Context(version) if disable_tls_compression: # OP_NO_COMPRESSION is not defined in OpenSSL module. context.set_options(0x00020000) self._tls_socket = OpenSSL.SSL.Connection(context, raw_socket) # Client mode. self._tls_socket.set_connect_state() self._tls_socket.setblocking(True) # Do handshake now (not necessary). self._tls_socket.do_handshake() else: raise ValueError('No TLS support module is available') def send(self, data): return self._tls_socket.write(data) def sendall(self, data): return self._tls_socket.sendall(data) def recv(self, size=-1): return self._tls_socket.read(size) def close(self): return self._tls_socket.close() def getpeername(self): return self._tls_socket.getpeername() class ClientHandshakeBase(object): """A base class for WebSocket opening handshake processors for each protocol version. """ def __init__(self): self._logger = util.get_class_logger(self) def _read_fields(self): # 4.1 32. let /fields/ be a list of name-value pairs, initially empty. fields = {} while True: # "Field" # 4.1 33. let /name/ and /value/ be empty byte arrays name = '' value = '' # 4.1 34. read /name/ name = self._read_name() if name is None: break # 4.1 35. read spaces # TODO(tyoshino): Skip only one space as described in the spec. ch = self._skip_spaces() # 4.1 36. read /value/ value = self._read_value(ch) # 4.1 37. read a byte from the server ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError( 'Expected LF but found %r while reading value %r for ' 'header %r' % (ch, value, name)) self._logger.debug('Received %r header', name) # 4.1 38. append an entry to the /fields/ list that has the name # given by the string obtained by interpreting the /name/ byte # array as a UTF-8 stream and the value given by the string # obtained by interpreting the /value/ byte array as a UTF-8 byte # stream. fields.setdefault(name, []).append(value) # 4.1 39. return to the "Field" step above return fields def _read_name(self): # 4.1 33. let /name/ be empty byte arrays name = '' while True: # 4.1 34. read a byte from the server ch = _receive_bytes(self._socket, 1) if ch == '\r': # 0x0D return None elif ch == '\n': # 0x0A raise ClientHandshakeError( 'Unexpected LF when reading header name %r' % name) elif ch == ':': # 0x3A return name elif ch >= 'A' and ch <= 'Z': # Range 0x31 to 0x5A ch = chr(ord(ch) + 0x20) name += ch else: name += ch def _skip_spaces(self): # 4.1 35. read a byte from the server while True: ch = _receive_bytes(self._socket, 1) if ch == ' ': # 0x20 continue return ch def _read_value(self, ch): # 4.1 33. let /value/ be empty byte arrays value = '' # 4.1 36. read a byte from server. while True: if ch == '\r': # 0x0D return value elif ch == '\n': # 0x0A raise ClientHandshakeError( 'Unexpected LF when reading header value %r' % value) else: value += ch ch = _receive_bytes(self._socket, 1) def _get_permessage_deflate_framer(extension_response): """Validate the response and return a framer object using the parameters in the response. This method doesn't accept the server_.* parameters. """ client_max_window_bits = None client_no_context_takeover = None client_max_window_bits_name = ( PerMessageDeflateExtensionProcessor. _CLIENT_MAX_WINDOW_BITS_PARAM) client_no_context_takeover_name = ( PerMessageDeflateExtensionProcessor. _CLIENT_NO_CONTEXT_TAKEOVER_PARAM) # We didn't send any server_.* parameter. # Handle those parameters as invalid if found in the response. for param_name, param_value in extension_response.get_parameters(): if param_name == client_max_window_bits_name: if client_max_window_bits is not None: raise ClientHandshakeError( 'Multiple %s found' % client_max_window_bits_name) parsed_value = _parse_window_bits(param_value) if parsed_value is None: raise ClientHandshakeError( 'Bad %s: %r' % (client_max_window_bits_name, param_value)) client_max_window_bits = parsed_value elif param_name == client_no_context_takeover_name: if client_no_context_takeover is not None: raise ClientHandshakeError( 'Multiple %s found' % client_no_context_takeover_name) if param_value is not None: raise ClientHandshakeError( 'Bad %s: Has value %r' % (client_no_context_takeover_name, param_value)) client_no_context_takeover = True if client_no_context_takeover is None: client_no_context_takeover = False return _PerMessageDeflateFramer(client_max_window_bits, client_no_context_takeover) class ClientHandshakeProcessor(ClientHandshakeBase): """WebSocket opening handshake processor for draft-ietf-hybi-thewebsocketprotocol-06 and later. """ def __init__(self, socket, options): super(ClientHandshakeProcessor, self).__init__() self._socket = socket self._options = options self._logger = util.get_class_logger(self) def handshake(self): """Performs opening handshake on the specified socket. Raises: ClientHandshakeError: handshake failed. """ request_line = _build_method_line(self._options.resource) self._logger.debug('Client\'s opening handshake Request-Line: %r', request_line) self._socket.sendall(request_line) fields = [] fields.append(_format_host_header( self._options.server_host, self._options.server_port, self._options.use_tls)) fields.append(_UPGRADE_HEADER) fields.append(_CONNECTION_HEADER) if self._options.origin is not None: if self._options.protocol_version == _PROTOCOL_VERSION_HYBI08: fields.append(_origin_header( common.SEC_WEBSOCKET_ORIGIN_HEADER, self._options.origin)) else: fields.append(_origin_header(common.ORIGIN_HEADER, self._options.origin)) original_key = os.urandom(16) self._key = base64.b64encode(original_key) self._logger.debug( '%s: %r (%s)', common.SEC_WEBSOCKET_KEY_HEADER, self._key, util.hexify(original_key)) fields.append( '%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key)) if self._options.version_header > 0: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, self._options.version_header)) elif self._options.protocol_version == _PROTOCOL_VERSION_HYBI08: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, common.VERSION_HYBI08)) else: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, common.VERSION_HYBI_LATEST)) extensions_to_request = [] if self._options.deflate_frame: extensions_to_request.append( common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)) if self._options.use_permessage_deflate: extension = common.ExtensionParameter( common.PERMESSAGE_DEFLATE_EXTENSION) # Accept the client_max_window_bits extension parameter by default. extension.add_parameter( PerMessageDeflateExtensionProcessor. _CLIENT_MAX_WINDOW_BITS_PARAM, None) extensions_to_request.append(extension) if len(extensions_to_request) != 0: fields.append( '%s: %s\r\n' % (common.SEC_WEBSOCKET_EXTENSIONS_HEADER, common.format_extensions(extensions_to_request))) for field in fields: self._socket.sendall(field) self._socket.sendall('\r\n') self._logger.debug('Sent client\'s opening handshake headers: %r', fields) self._logger.debug('Start reading Status-Line') status_line = '' while True: ch = _receive_bytes(self._socket, 1) status_line += ch if ch == '\n': break m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line) if m is None: raise ClientHandshakeError( 'Wrong status line format: %r' % status_line) status_code = m.group(1) if status_code != '101': self._logger.debug('Unexpected status code %s with following ' 'headers: %r', status_code, self._read_fields()) raise ClientHandshakeError( 'Expected HTTP status code 101 but found %r' % status_code) self._logger.debug('Received valid Status-Line') self._logger.debug('Start reading headers until we see an empty line') fields = self._read_fields() ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError( 'Expected LF but found %r while reading value %r for header ' 'name %r' % (ch, value, name)) self._logger.debug('Received an empty line') self._logger.debug('Server\'s opening handshake headers: %r', fields) _validate_mandatory_header( fields, common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE, False) _validate_mandatory_header( fields, common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE, False) accept = _get_mandatory_header( fields, common.SEC_WEBSOCKET_ACCEPT_HEADER) # Validate try: binary_accept = base64.b64decode(accept) except TypeError, e: raise HandshakeError( 'Illegal value for header %s: %r' % (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept)) if len(binary_accept) != 20: raise ClientHandshakeError( 'Decoded value of %s is not 20-byte long' % common.SEC_WEBSOCKET_ACCEPT_HEADER) self._logger.debug( 'Response for challenge : %r (%s)', accept, util.hexify(binary_accept)) binary_expected_accept = util.sha1_hash( self._key + common.WEBSOCKET_ACCEPT_UUID).digest() expected_accept = base64.b64encode(binary_expected_accept) self._logger.debug( 'Expected response for challenge: %r (%s)', expected_accept, util.hexify(binary_expected_accept)) if accept != expected_accept: raise ClientHandshakeError( 'Invalid %s header: %r (expected: %s)' % (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept)) deflate_frame_accepted = False permessage_deflate_accepted = False extensions_header = fields.get( common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower()) accepted_extensions = [] if extensions_header is not None and len(extensions_header) != 0: accepted_extensions = common.parse_extensions(extensions_header[0]) # TODO(bashi): Support the new style perframe compression extension. for extension in accepted_extensions: extension_name = extension.name() if (extension_name == common.DEFLATE_FRAME_EXTENSION and self._options.deflate_frame): deflate_frame_accepted = True processor = DeflateFrameExtensionProcessor(extension) unused_extension_response = processor.get_extension_response() self._options.deflate_frame = processor continue elif (extension_name == common.PERMESSAGE_DEFLATE_EXTENSION and self._options.use_permessage_deflate): permessage_deflate_accepted = True framer = _get_permessage_deflate_framer(extension) framer.set_compress_outgoing_enabled(True) self._options.use_permessage_deflate = framer continue raise ClientHandshakeError( 'Unexpected extension %r' % extension_name) if (self._options.deflate_frame and not deflate_frame_accepted): raise ClientHandshakeError( 'Requested %s, but the server rejected it' % common.DEFLATE_FRAME_EXTENSION) if (self._options.use_permessage_deflate and not permessage_deflate_accepted): raise ClientHandshakeError( 'Requested %s, but the server rejected it' % common.PERMESSAGE_DEFLATE_EXTENSION) # TODO(tyoshino): Handle Sec-WebSocket-Protocol # TODO(tyoshino): Handle Cookie, etc. class ClientHandshakeProcessorHybi00(ClientHandshakeBase): """WebSocket opening handshake processor for draft-ietf-hybi-thewebsocketprotocol-00 (equivalent to draft-hixie-thewebsocketprotocol-76). """ def __init__(self, socket, options): super(ClientHandshakeProcessorHybi00, self).__init__() self._socket = socket self._options = options self._logger = util.get_class_logger(self) if (self._options.deflate_frame or self._options.use_permessage_deflate): logging.critical('HyBi 00 doesn\'t support extensions.') sys.exit(1) def handshake(self): """Performs opening handshake on the specified socket. Raises: ClientHandshakeError: handshake failed. """ # 4.1 5. send request line. self._socket.sendall(_build_method_line(self._options.resource)) # 4.1 6. Let /fields/ be an empty list of strings. fields = [] # 4.1 7. Add the string "Upgrade: WebSocket" to /fields/. fields.append(_UPGRADE_HEADER_HIXIE75) # 4.1 8. Add the string "Connection: Upgrade" to /fields/. fields.append(_CONNECTION_HEADER) # 4.1 9-12. Add Host: field to /fields/. fields.append(_format_host_header( self._options.server_host, self._options.server_port, self._options.use_tls)) # 4.1 13. Add Origin: field to /fields/. if not self._options.origin: raise ClientHandshakeError( 'Specify the origin of the connection by --origin flag') fields.append(_origin_header(common.ORIGIN_HEADER, self._options.origin)) # TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/. # TODO: 4.1 15 Add cookie headers to /fields/. # 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/. self._number1, key1 = self._generate_sec_websocket_key() self._logger.debug('Number1: %d', self._number1) fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY1_HEADER, key1)) self._number2, key2 = self._generate_sec_websocket_key() self._logger.debug('Number2: %d', self._number2) fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY2_HEADER, key2)) fields.append('%s: 0\r\n' % common.SEC_WEBSOCKET_DRAFT_HEADER) # 4.1 24. For each string in /fields/, in a random order: send the # string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE # RETURN U+000A LINE FEED character pair (CRLF). random.shuffle(fields) for field in fields: self._socket.sendall(field) # 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED # character pair (CRLF). self._socket.sendall('\r\n') # 4.1 26. let /key3/ be a string consisting of eight random bytes (or # equivalently, a random 64 bit integer encoded in a big-endian order). self._key3 = self._generate_key3() # 4.1 27. send /key3/ to the server. self._socket.sendall(self._key3) self._logger.debug( 'Key3: %r (%s)', self._key3, util.hexify(self._key3)) self._logger.info('Sent handshake') # 4.1 28. Read bytes from the server until either the connection # closes, or a 0x0A byte is read. let /field/ be these bytes, including # the 0x0A bytes. field = '' while True: ch = _receive_bytes(self._socket, 1) field += ch if ch == '\n': break # if /field/ is not at least seven bytes long, or if the last # two bytes aren't 0x0D and 0x0A respectively, or if it does not # contain at least two 0x20 bytes, then fail the WebSocket connection # and abort these steps. if len(field) < 7 or not field.endswith('\r\n'): raise ClientHandshakeError('Wrong status line: %r' % field) m = re.match('[^ ]* ([^ ]*) .*', field) if m is None: raise ClientHandshakeError( 'No HTTP status code found in status line: %r' % field) # 4.1 29. let /code/ be the substring of /field/ that starts from the # byte after the first 0x20 byte, and ends with the byte before the # second 0x20 byte. code = m.group(1) # 4.1 30. if /code/ is not three bytes long, or if any of the bytes in # /code/ are not in the range 0x30 to 0x90, then fail the WebSocket # connection and abort these steps. if not re.match('[0-9][0-9][0-9]', code): raise ClientHandshakeError( 'HTTP status code %r is not three digit in status line: %r' % (code, field)) # 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the # next step. if code != '101': raise ClientHandshakeError( 'Expected HTTP status code 101 but found %r in status line: ' '%r' % (code, field)) # 4.1 32-39. read fields into /fields/ fields = self._read_fields() # 4.1 40. _Fields processing_ # read a byte from server ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError('Expected LF but found %r' % ch) # 4.1 41. check /fields/ # TODO(ukai): protocol # if the entry's name is "upgrade" # if the value is not exactly equal to the string "WebSocket", # then fail the WebSocket connection and abort these steps. _validate_mandatory_header( fields, common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75, True) # if the entry's name is "connection" # if the value, converted to ASCII lowercase, is not exactly equal # to the string "upgrade", then fail the WebSocket connection and # abort these steps. _validate_mandatory_header( fields, common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE, False) origin = _get_mandatory_header( fields, common.SEC_WEBSOCKET_ORIGIN_HEADER) location = _get_mandatory_header( fields, common.SEC_WEBSOCKET_LOCATION_HEADER) # TODO(ukai): check origin, location, cookie, .. # 4.1 42. let /challenge/ be the concatenation of /number_1/, # expressed as a big endian 32 bit integer, /number_2/, expressed # as big endian 32 bit integer, and the eight bytes of /key_3/ in the # order they were sent on the wire. challenge = struct.pack('!I', self._number1) challenge += struct.pack('!I', self._number2) challenge += self._key3 self._logger.debug( 'Challenge: %r (%s)', challenge, util.hexify(challenge)) # 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a # big-endian 128 bit string. expected = util.md5_hash(challenge).digest() self._logger.debug( 'Expected challenge response: %r (%s)', expected, util.hexify(expected)) # 4.1 44. read sixteen bytes from the server. # let /reply/ be those bytes. reply = _receive_bytes(self._socket, 16) self._logger.debug( 'Actual challenge response: %r (%s)', reply, util.hexify(reply)) # 4.1 45. if /reply/ does not exactly equal /expected/, then fail # the WebSocket connection and abort these steps. if expected != reply: raise ClientHandshakeError( 'Bad challenge response: %r (expected) != %r (actual)' % (expected, reply)) # 4.1 46. The *WebSocket connection is established*. def _generate_sec_websocket_key(self): # 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive. spaces = random.randint(1, 12) # 4.1 17. let /max_n/ be the largest integer not greater than # 4,294,967,295 divided by /spaces_n/. maxnum = 4294967295 / spaces # 4.1 18. let /number_n/ be a random integer from 0 to /max_n/ # inclusive. number = random.randint(0, maxnum) # 4.1 19. let /product_n/ be the result of multiplying /number_n/ and # /spaces_n/ together. product = number * spaces # 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed # in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to # U+0039 DIGIT NINE (9). key = str(product) # 4.1 21. insert between one and twelve random characters from the # range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random # positions. available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1) n = random.randint(1, 12) for _ in xrange(n): ch = random.choice(available_chars) pos = random.randint(0, len(key)) key = key[0:pos] + chr(ch) + key[pos:] # 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at # random positions other than start or end of the string. for _ in xrange(spaces): pos = random.randint(1, len(key) - 1) key = key[0:pos] + ' ' + key[pos:] return number, key def _generate_key3(self): # 4.1 26. let /key3/ be a string consisting of eight random bytes (or # equivalently, a random 64 bit integer encoded in a big-endian order). return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)]) class ClientConnection(object): """A wrapper for socket object to provide the mp_conn interface. mod_pywebsocket library is designed to be working on Apache mod_python's mp_conn object. """ def __init__(self, socket): self._socket = socket def write(self, data): self._socket.sendall(data) def read(self, n): return self._socket.recv(n) def get_remote_addr(self): return self._socket.getpeername() remote_addr = property(get_remote_addr) class ClientRequest(object): """A wrapper class just to make it able to pass a socket object to functions that expect a mp_request object. """ def __init__(self, socket): self._logger = util.get_class_logger(self) self._socket = socket self.connection = ClientConnection(socket) def _import_ssl(): global ssl try: import ssl return True except ImportError: return False def _import_pyopenssl(): global OpenSSL try: import OpenSSL.SSL return True except ImportError: return False class EchoClient(object): """WebSocket echo client.""" def __init__(self, options): self._options = options self._socket = None self._logger = util.get_class_logger(self) def run(self): """Run the client. Shake hands and then repeat sending message and receiving its echo. """ self._socket = socket.socket() self._socket.settimeout(self._options.socket_timeout) try: self._socket.connect((self._options.server_host, self._options.server_port)) if self._options.use_tls: self._socket = _TLSSocket( self._socket, self._options.tls_module, self._options.tls_version, self._options.disable_tls_compression) version = self._options.protocol_version if (version == _PROTOCOL_VERSION_HYBI08 or version == _PROTOCOL_VERSION_HYBI13): self._handshake = ClientHandshakeProcessor( self._socket, self._options) elif version == _PROTOCOL_VERSION_HYBI00: self._handshake = ClientHandshakeProcessorHybi00( self._socket, self._options) else: raise ValueError( 'Invalid --protocol-version flag: %r' % version) self._handshake.handshake() self._logger.info('Connection established') request = ClientRequest(self._socket) version_map = { _PROTOCOL_VERSION_HYBI08: common.VERSION_HYBI08, _PROTOCOL_VERSION_HYBI13: common.VERSION_HYBI13, _PROTOCOL_VERSION_HYBI00: common.VERSION_HYBI00} request.ws_version = version_map[version] if (version == _PROTOCOL_VERSION_HYBI08 or version == _PROTOCOL_VERSION_HYBI13): stream_option = StreamOptions() stream_option.mask_send = True stream_option.unmask_receive = False if self._options.deflate_frame is not False: processor = self._options.deflate_frame processor.setup_stream_options(stream_option) if self._options.use_permessage_deflate is not False: framer = self._options.use_permessage_deflate framer.setup_stream_options(stream_option) self._stream = Stream(request, stream_option) elif version == _PROTOCOL_VERSION_HYBI00: self._stream = StreamHixie75(request, True) for line in self._options.message.split(','): self._stream.send_message(line) if self._options.verbose: print 'Send: %s' % line try: received = self._stream.receive_message() if self._options.verbose: print 'Recv: %s' % received except Exception, e: if self._options.verbose: print 'Error: %s' % e raise self._do_closing_handshake() finally: self._socket.close() def _do_closing_handshake(self): """Perform closing handshake using the specified closing frame.""" if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE: # requested server initiated closing handshake, so # expecting closing handshake message from server. self._logger.info('Wait for server-initiated closing handshake') message = self._stream.receive_message() if message is None: print 'Recv close' print 'Send ack' self._logger.info( 'Received closing handshake and sent ack') return print 'Send close' self._stream.close_connection() self._logger.info('Sent closing handshake') print 'Recv ack' self._logger.info('Received ack') def main(): sys.stdout = codecs.getwriter('utf-8')(sys.stdout) parser = OptionParser() # We accept --command_line_flag style flags which is the same as Google # gflags in addition to common --command-line-flag style flags. parser.add_option('-s', '--server-host', '--server_host', dest='server_host', type='string', default='localhost', help='server host') parser.add_option('-p', '--server-port', '--server_port', dest='server_port', type='int', default=_UNDEFINED_PORT, help='server port') parser.add_option('-o', '--origin', dest='origin', type='string', default=None, help='origin') parser.add_option('-r', '--resource', dest='resource', type='string', default='/echo', help='resource path') parser.add_option('-m', '--message', dest='message', type='string', help=('comma-separated messages to send. ' '%s will force close the connection from server.' % _GOODBYE_MESSAGE)) parser.add_option('-q', '--quiet', dest='verbose', action='store_false', default=True, help='suppress messages') parser.add_option('-t', '--tls', dest='use_tls', action='store_true', default=False, help='use TLS (wss://). By default, ' 'it looks for ssl and pyOpenSSL module and uses found ' 'one. Use --tls-module option to specify which module ' 'to use') parser.add_option('--tls-module', '--tls_module', dest='tls_module', type='choice', choices=[_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL], help='Use ssl module if "%s" is specified. ' 'Use pyOpenSSL module if "%s" is specified' % (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL)) parser.add_option('--tls-version', '--tls_version', dest='tls_version', type='string', default=_TLS_VERSION_SSL23, help='TLS/SSL version to use. One of \'' + _TLS_VERSION_SSL23 + '\' (SSL version 2 or 3), \'' + _TLS_VERSION_SSL3 + '\' (SSL version 3), \'' + _TLS_VERSION_TLS1 + '\' (TLS version 1)') parser.add_option('--disable-tls-compression', '--disable_tls_compression', dest='disable_tls_compression', action='store_true', default=False, help='Disable TLS compression. Available only when ' 'pyOpenSSL module is used.') parser.add_option('-k', '--socket-timeout', '--socket_timeout', dest='socket_timeout', type='int', default=_TIMEOUT_SEC, help='Timeout(sec) for sockets') parser.add_option('--draft75', dest='draft75', action='store_true', default=False, help='Obsolete option. Don\'t use this.') parser.add_option('--protocol-version', '--protocol_version', dest='protocol_version', type='string', default=_PROTOCOL_VERSION_HYBI13, help='WebSocket protocol version to use. One of \'' + _PROTOCOL_VERSION_HYBI13 + '\', \'' + _PROTOCOL_VERSION_HYBI08 + '\', \'' + _PROTOCOL_VERSION_HYBI00 + '\'') parser.add_option('--version-header', '--version_header', dest='version_header', type='int', default=-1, help='Specify Sec-WebSocket-Version header value') parser.add_option('--deflate-frame', '--deflate_frame', dest='deflate_frame', action='store_true', default=False, help='Use the deflate-frame extension.') parser.add_option('--use-permessage-deflate', '--use_permessage_deflate', dest='use_permessage_deflate', action='store_true', default=False, help='Use the permessage-deflate extension.') parser.add_option('--log-level', '--log_level', type='choice', dest='log_level', default='warn', choices=['debug', 'info', 'warn', 'error', 'critical'], help='Log level.') (options, unused_args) = parser.parse_args() logging.basicConfig(level=logging.getLevelName(options.log_level.upper())) if options.draft75: logging.critical('--draft75 option is obsolete.') sys.exit(1) if options.protocol_version == _PROTOCOL_VERSION_HIXIE75: logging.critical( 'Value %s is obsolete for --protocol_version options' % _PROTOCOL_VERSION_HIXIE75) sys.exit(1) if options.use_tls: if options.tls_module is None: if _import_ssl(): options.tls_module = _TLS_BY_STANDARD_MODULE logging.debug('Using ssl module') elif _import_pyopenssl(): options.tls_module = _TLS_BY_PYOPENSSL logging.debug('Using pyOpenSSL module') else: logging.critical( 'TLS support requires ssl or pyOpenSSL module.') sys.exit(1) elif options.tls_module == _TLS_BY_STANDARD_MODULE: if not _import_ssl(): logging.critical('ssl module is not available') sys.exit(1) elif options.tls_module == _TLS_BY_PYOPENSSL: if not _import_pyopenssl(): logging.critical('pyOpenSSL module is not available') sys.exit(1) else: logging.critical('Invalid --tls-module option: %r', options.tls_module) sys.exit(1) if (options.disable_tls_compression and options.tls_module != _TLS_BY_PYOPENSSL): logging.critical('You can disable TLS compression only when ' 'pyOpenSSL module is used.') sys.exit(1) else: if options.tls_module is not None: logging.critical('Use --tls-module option only together with ' '--use-tls option.') sys.exit(1) if options.disable_tls_compression: logging.critical('Use --disable-tls-compression only together ' 'with --use-tls option.') sys.exit(1) # Default port number depends on whether TLS is used. if options.server_port == _UNDEFINED_PORT: if options.use_tls: options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT else: options.server_port = common.DEFAULT_WEB_SOCKET_PORT # optparse doesn't seem to handle non-ascii default values. # Set default message here. if not options.message: options.message = u'Hello,\u65e5\u672c' # "Japan" in Japanese EchoClient(options).run() if __name__ == '__main__': main() # vi:sts=4 sw=4 et
darkleons/BE
refs/heads/master
addons/hr_contract/base_action_rule.py
389
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.base_action_rule.base_action_rule import get_datetime from openerp.osv import fields, osv class base_action_rule(osv.Model): """ Add resource and calendar for time-based conditions """ _name = 'base.action.rule' _inherit = ['base.action.rule'] _columns = { 'trg_date_resource_field_id': fields.many2one( 'ir.model.fields', 'Use employee work schedule', help='Use the user\'s working schedule.', ), } def _check_delay(self, cr, uid, action, record, record_dt, context=None): """ Override the check of delay to try to use a user-related calendar. If no calendar is found, fallback on the default behavior. """ if action.trg_date_calendar_id and action.trg_date_range_type == 'day' and action.trg_date_resource_field_id: user = record[action.trg_date_resource_field_id.name] if user.employee_ids and user.employee_ids[0].contract_id \ and user.employee_ids[0].contract_id.working_hours: calendar = user.employee_ids[0].contract_id.working_hours start_dt = get_datetime(record_dt) resource_id = user.employee_ids[0].resource_id.id action_dt = self.pool['resource.calendar'].schedule_days_get_date( cr, uid, calendar.id, action.trg_date_range, day_date=start_dt, compute_leaves=True, resource_id=resource_id, context=context ) return action_dt return super(base_action_rule, self)._check_delay(cr, uid, action, record, record_dt, context=context)
acsone/odoo-autodiscover
refs/heads/master
tests/addons/8.0/z1/odoo_addons/__init__.py
179
import pkg_resources pkg_resources.declare_namespace(__name__)
fridex/fabric8-analytics-worker
refs/heads/master
tests/workers/test_recurse.py
1
from f8a_worker.dispatcher.foreach import _is_url_dependency def test__is_url_dependency(): data = [ ({"name": "test2", "version": "1.0.0"}, False), ({"name": "test3", "version": "http://some.tar/ball.tgz"}, True), ({"name": "test4", "version": "git://some.git/repo.git"}, True), ({"name": "git+https://some.other/git/repo", "version": None}, True), ] for d, is_url_dep in data: assert _is_url_dependency(d) == is_url_dep
bnaul/scikit-learn
refs/heads/master
examples/exercises/plot_cv_digits.py
24
""" ============================================= Cross-validation on Digits Dataset Exercise ============================================= A tutorial exercise using Cross-validation with an SVM on the Digits dataset. This exercise is used in the :ref:`cv_generators_tut` part of the :ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np from sklearn.model_selection import cross_val_score from sklearn import datasets, svm X, y = datasets.load_digits(return_X_y=True) svc = svm.SVC(kernel='linear') C_s = np.logspace(-10, 0, 10) scores = list() scores_std = list() for C in C_s: svc.C = C this_scores = cross_val_score(svc, X, y, n_jobs=1) scores.append(np.mean(this_scores)) scores_std.append(np.std(this_scores)) # Do the plotting import matplotlib.pyplot as plt plt.figure() plt.semilogx(C_s, scores) plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--') plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--') locs, labels = plt.yticks() plt.yticks(locs, list(map(lambda x: "%g" % x, locs))) plt.ylabel('CV score') plt.xlabel('Parameter C') plt.ylim(0, 1.1) plt.show()
manassolanki/frappe
refs/heads/develop
frappe/desk/search.py
4
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # Search from __future__ import unicode_literals import frappe, json from frappe.utils import cstr, unique from frappe import _ from six import string_types # this is called by the Link Field @frappe.whitelist() def search_link(doctype, txt, query=None, filters=None, page_length=20, searchfield=None): search_widget(doctype, txt, query, searchfield=searchfield, page_length=page_length, filters=filters) frappe.response['results'] = build_for_autosuggest(frappe.response["values"]) del frappe.response["values"] # this is called by the search box @frappe.whitelist() def search_widget(doctype, txt, query=None, searchfield=None, start=0, page_length=10, filters=None, filter_fields=None, as_dict=False): if isinstance(filters, string_types): filters = json.loads(filters) meta = frappe.get_meta(doctype) if not searchfield: searchfield = "name" standard_queries = frappe.get_hooks().standard_queries or {} if query and query.split()[0].lower()!="select": # by method frappe.response["values"] = frappe.call(query, doctype, txt, searchfield, start, page_length, filters, as_dict=as_dict) elif not query and doctype in standard_queries: # from standard queries search_widget(doctype, txt, standard_queries[doctype][0], searchfield, start, page_length, filters) else: if query: frappe.throw(_("This query style is discontinued")) # custom query # frappe.response["values"] = frappe.db.sql(scrub_custom_query(query, searchfield, txt)) else: if isinstance(filters, dict): filters_items = filters.items() filters = [] for f in filters_items: if isinstance(f[1], (list, tuple)): filters.append([doctype, f[0], f[1][0], f[1][1]]) else: filters.append([doctype, f[0], "=", f[1]]) if filters==None: filters = [] or_filters = [] # build from doctype if txt: search_fields = ["name"] if meta.title_field: search_fields.append(meta.title_field) if meta.search_fields: search_fields.extend(meta.get_search_fields()) for f in search_fields: fmeta = meta.get_field(f.strip()) if f == "name" or (fmeta and fmeta.fieldtype in ["Data", "Text", "Small Text", "Long Text", "Link", "Select", "Read Only", "Text Editor"]): or_filters.append([doctype, f.strip(), "like", "%{0}%".format(txt)]) if meta.get("fields", {"fieldname":"enabled", "fieldtype":"Check"}): filters.append([doctype, "enabled", "=", 1]) if meta.get("fields", {"fieldname":"disabled", "fieldtype":"Check"}): filters.append([doctype, "disabled", "!=", 1]) # format a list of fields combining search fields and filter fields fields = get_std_fields_list(meta, searchfield or "name") if filter_fields: fields = list(set(fields + json.loads(filter_fields))) formatted_fields = ['`tab%s`.`%s`' % (meta.name, f.strip()) for f in fields] # find relevance as location of search term from the beginning of string `name`. used for sorting results. formatted_fields.append("""locate("{_txt}", `tab{doctype}`.`name`) as `_relevance`""".format( _txt=frappe.db.escape((txt or "").replace("%", "")), doctype=frappe.db.escape(doctype))) # In order_by, `idx` gets second priority, because it stores link count from frappe.model.db_query import get_order_by order_by_based_on_meta = get_order_by(doctype, meta) order_by = "if(_relevance, _relevance, 99999), {0}, `tab{1}`.idx desc".format(order_by_based_on_meta, doctype) values = frappe.get_list(doctype, filters=filters, fields=formatted_fields, or_filters = or_filters, limit_start = start, limit_page_length=page_length, order_by=order_by, ignore_permissions = True if doctype == "DocType" else False, # for dynamic links as_list=not as_dict) # remove _relevance from results if as_dict: for r in values: r.pop("_relevance") frappe.response["values"] = values else: frappe.response["values"] = [r[:-1] for r in values] def get_std_fields_list(meta, key): # get additional search fields sflist = meta.search_fields and meta.search_fields.split(",") or [] title_field = [meta.title_field] if (meta.title_field and meta.title_field not in sflist) else [] sflist = ['name'] + sflist + title_field if not key in sflist: sflist = sflist + [key] return sflist def build_for_autosuggest(res): results = [] for r in res: out = {"value": r[0], "description": ", ".join(unique(cstr(d) for d in r if d)[1:])} results.append(out) return results def scrub_custom_query(query, key, txt): if '%(key)s' in query: query = query.replace('%(key)s', key) if '%s' in query: query = query.replace('%s', ((txt or '') + '%')) return query
Mistobaan/tensorflow
refs/heads/master
tensorflow/python/debug/cli/debugger_cli_common.py
68
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Building Blocks of TensorFlow Debugger Command-Line Interface.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import os import re import sre_constants import traceback import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.platform import gfile HELP_INDENT = " " EXPLICIT_USER_EXIT = "explicit_user_exit" REGEX_MATCH_LINES_KEY = "regex_match_lines" INIT_SCROLL_POS_KEY = "init_scroll_pos" MAIN_MENU_KEY = "mm:" class CommandLineExit(Exception): def __init__(self, exit_token=None): Exception.__init__(self) self._exit_token = exit_token @property def exit_token(self): return self._exit_token class RichLine(object): """Rich single-line text. Attributes: text: A plain string, the raw text represented by this object. Should not contain newlines. font_attr_segs: A list of (start, end, font attribute) triples, representing richness information applied to substrings of text. """ def __init__(self, text="", font_attr=None): """Construct a RichLine with no rich attributes or a single attribute. Args: text: Raw text string font_attr: If specified, a single font attribute to be applied to the entire text. Extending this object via concatenation allows creation of text with varying attributes. """ # TODO(ebreck) Make .text and .font_attr protected members when we no # longer need public access. self.text = text if font_attr: self.font_attr_segs = [(0, len(text), font_attr)] else: self.font_attr_segs = [] def __add__(self, other): """Concatenate two chunks of maybe rich text to make a longer rich line. Does not modify self. Args: other: Another piece of text to concatenate with this one. If it is a plain str, it will be appended to this string with no attributes. If it is a RichLine, it will be appended to this string with its attributes preserved. Returns: A new RichLine comprising both chunks of text, with appropriate attributes applied to the corresponding substrings. """ ret = RichLine() if isinstance(other, six.string_types): ret.text = self.text + other ret.font_attr_segs = self.font_attr_segs[:] return ret elif isinstance(other, RichLine): ret.text = self.text + other.text ret.font_attr_segs = self.font_attr_segs[:] old_len = len(self.text) for start, end, font_attr in other.font_attr_segs: ret.font_attr_segs.append((old_len + start, old_len + end, font_attr)) return ret else: raise TypeError("%r cannot be concatenated with a RichLine" % other) def __len__(self): return len(self.text) def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None): """Convert a list of RichLine objects or strings to a RichTextLines object. Args: rich_text_list: a list of RichLine objects or strings annotations: annotatoins for the resultant RichTextLines object. Returns: A corresponding RichTextLines object. """ lines = [] font_attr_segs = {} for i, rl in enumerate(rich_text_list): if isinstance(rl, RichLine): lines.append(rl.text) if rl.font_attr_segs: font_attr_segs[i] = rl.font_attr_segs else: lines.append(rl) return RichTextLines(lines, font_attr_segs, annotations=annotations) class RichTextLines(object): """Rich multi-line text. Line-by-line text output, with font attributes (e.g., color) and annotations (e.g., indices in a multi-dimensional tensor). Used as the text output of CLI commands. Can be rendered on terminal environments such as curses. This is not to be confused with Rich Text Format (RTF). This class is for text lines only. """ def __init__(self, lines, font_attr_segs=None, annotations=None): """Constructor of RichTextLines. Args: lines: A list of str or a single str, representing text output to screen. The latter case is for convenience when the text output is single-line. font_attr_segs: A map from 0-based row index to a list of 3-tuples. It lists segments in each row that have special font attributes, such as colors, that are not the default attribute. For example: {1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]} In each tuple, the 1st element is the start index of the segment. The 2nd element is the end index, in an "open interval" fashion. The 3rd element is an object or a list of objects that represents the font attribute. Colors are represented as strings as in the examples above. annotations: A map from 0-based row index to any object for annotating the row. A typical use example is annotating rows of the output as indices in a multi-dimensional tensor. For example, consider the following text representation of a 3x2x2 tensor: [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]] The annotation can indicate the indices of the first element shown in each row, i.e., {0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]} This information can make display of tensors on screen clearer and can help the user navigate (scroll) to the desired location in a large tensor. Raises: ValueError: If lines is of invalid type. """ if isinstance(lines, list): self._lines = lines elif isinstance(lines, six.string_types): self._lines = [lines] else: raise ValueError("Unexpected type in lines: %s" % type(lines)) self._font_attr_segs = font_attr_segs if not self._font_attr_segs: self._font_attr_segs = {} # TODO(cais): Refactor to collections.defaultdict(list) to simplify code. self._annotations = annotations if not self._annotations: self._annotations = {} # TODO(cais): Refactor to collections.defaultdict(list) to simplify code. @property def lines(self): return self._lines @property def font_attr_segs(self): return self._font_attr_segs @property def annotations(self): return self._annotations def num_lines(self): return len(self._lines) def slice(self, begin, end): """Slice a RichTextLines object. The object itself is not changed. A sliced instance is returned. Args: begin: (int) Beginning line index (inclusive). Must be >= 0. end: (int) Ending line index (exclusive). Must be >= 0. Returns: (RichTextLines) Sliced output instance of RichTextLines. Raises: ValueError: If begin or end is negative. """ if begin < 0 or end < 0: raise ValueError("Encountered negative index.") # Copy lines. lines = self.lines[begin:end] # Slice font attribute segments. font_attr_segs = {} for key in self.font_attr_segs: if key >= begin and key < end: font_attr_segs[key - begin] = self.font_attr_segs[key] # Slice annotations. annotations = {} for key in self.annotations: if not isinstance(key, int): # Annotations can contain keys that are not line numbers. annotations[key] = self.annotations[key] elif key >= begin and key < end: annotations[key - begin] = self.annotations[key] return RichTextLines( lines, font_attr_segs=font_attr_segs, annotations=annotations) def extend(self, other): """Extend this instance of RichTextLines with another instance. The extension takes effect on the text lines, the font attribute segments, as well as the annotations. The line indices in the font attribute segments and the annotations are adjusted to account for the existing lines. If there are duplicate, non-line-index fields in the annotations, the value from the input argument "other" will override that in this instance. Args: other: (RichTextLines) The other RichTextLines instance to be appended at the end of this instance. """ orig_num_lines = self.num_lines() # Record original number of lines. # Merge the lines. self._lines.extend(other.lines) # Merge the font_attr_segs. for line_index in other.font_attr_segs: self._font_attr_segs[orig_num_lines + line_index] = ( other.font_attr_segs[line_index]) # Merge the annotations. for key in other.annotations: if isinstance(key, int): self._annotations[orig_num_lines + key] = (other.annotations[key]) else: self._annotations[key] = other.annotations[key] def _extend_before(self, other): """Add another RichTextLines object to the front. Args: other: (RichTextLines) The other object to add to the front to this object. """ other_num_lines = other.num_lines() # Record original number of lines. # Merge the lines. self._lines = other.lines + self._lines # Merge the font_attr_segs. new_font_attr_segs = {} for line_index in self.font_attr_segs: new_font_attr_segs[other_num_lines + line_index] = ( self.font_attr_segs[line_index]) new_font_attr_segs.update(other.font_attr_segs) self._font_attr_segs = new_font_attr_segs # Merge the annotations. new_annotations = {} for key in self._annotations: if isinstance(key, int): new_annotations[other_num_lines + key] = (self.annotations[key]) else: new_annotations[key] = other.annotations[key] new_annotations.update(other.annotations) self._annotations = new_annotations def append(self, line, font_attr_segs=None): """Append a single line of text. Args: line: (str) The text to be added to the end. font_attr_segs: (list of tuples) Font attribute segments of the appended line. """ self._lines.append(line) if font_attr_segs: self._font_attr_segs[len(self._lines) - 1] = font_attr_segs def append_rich_line(self, rich_line): self.append(rich_line.text, rich_line.font_attr_segs) def prepend(self, line, font_attr_segs=None): """Prepend (i.e., add to the front) a single line of text. Args: line: (str) The text to be added to the front. font_attr_segs: (list of tuples) Font attribute segments of the appended line. """ other = RichTextLines(line) if font_attr_segs: other.font_attr_segs[0] = font_attr_segs self._extend_before(other) def write_to_file(self, file_path): """Write the object itself to file, in a plain format. The font_attr_segs and annotations are ignored. Args: file_path: (str) path of the file to write to. """ with gfile.Open(file_path, "w") as f: for line in self._lines: f.write(line + "\n") # TODO(cais): Add a method to allow appending to a line in RichTextLines with # both text and font_attr_segs. def regex_find(orig_screen_output, regex, font_attr): """Perform regex match in rich text lines. Produces a new RichTextLines object with font_attr_segs containing highlighted regex matches. Example use cases include: 1) search for specific items in a large list of items, and 2) search for specific numerical values in a large tensor. Args: orig_screen_output: The original RichTextLines, in which the regex find is to be performed. regex: The regex used for matching. font_attr: Font attribute used for highlighting the found result. Returns: A modified copy of orig_screen_output. Raises: ValueError: If input str regex is not a valid regular expression. """ new_screen_output = RichTextLines( orig_screen_output.lines, font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs), annotations=orig_screen_output.annotations) try: re_prog = re.compile(regex) except sre_constants.error: raise ValueError("Invalid regular expression: \"%s\"" % regex) regex_match_lines = [] for i in xrange(len(new_screen_output.lines)): line = new_screen_output.lines[i] find_it = re_prog.finditer(line) match_segs = [] for match in find_it: match_segs.append((match.start(), match.end(), font_attr)) if match_segs: if i not in new_screen_output.font_attr_segs: new_screen_output.font_attr_segs[i] = match_segs else: new_screen_output.font_attr_segs[i].extend(match_segs) new_screen_output.font_attr_segs[i] = sorted( new_screen_output.font_attr_segs[i], key=lambda x: x[0]) regex_match_lines.append(i) new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines return new_screen_output def wrap_rich_text_lines(inp, cols): """Wrap RichTextLines according to maximum number of columns. Produces a new RichTextLines object with the text lines, font_attr_segs and annotations properly wrapped. This ought to be used sparingly, as in most cases, command handlers producing RichTextLines outputs should know the screen/panel width via the screen_info kwarg and should produce properly length-limited lines in the output accordingly. Args: inp: Input RichTextLines object. cols: Number of columns, as an int. Returns: 1) A new instance of RichTextLines, with line lengths limited to cols. 2) A list of new (wrapped) line index. For example, if the original input consists of three lines and only the second line is wrapped, and it's wrapped into two lines, this return value will be: [0, 1, 3]. Raises: ValueError: If inputs have invalid types. """ new_line_indices = [] if not isinstance(inp, RichTextLines): raise ValueError("Invalid type of input screen_output") if not isinstance(cols, int): raise ValueError("Invalid type of input cols") out = RichTextLines([]) row_counter = 0 # Counter for new row index for i in xrange(len(inp.lines)): new_line_indices.append(out.num_lines()) line = inp.lines[i] if i in inp.annotations: out.annotations[row_counter] = inp.annotations[i] if len(line) <= cols: # No wrapping. out.lines.append(line) if i in inp.font_attr_segs: out.font_attr_segs[row_counter] = inp.font_attr_segs[i] row_counter += 1 else: # Wrap. wlines = [] # Wrapped lines. osegs = [] if i in inp.font_attr_segs: osegs = inp.font_attr_segs[i] idx = 0 while idx < len(line): if idx + cols > len(line): rlim = len(line) else: rlim = idx + cols wlines.append(line[idx:rlim]) for seg in osegs: if (seg[0] < rlim) and (seg[1] >= idx): # Calculate left bound within wrapped line. if seg[0] >= idx: lb = seg[0] - idx else: lb = 0 # Calculate right bound within wrapped line. if seg[1] < rlim: rb = seg[1] - idx else: rb = rlim - idx if rb > lb: # Omit zero-length segments. wseg = (lb, rb, seg[2]) if row_counter not in out.font_attr_segs: out.font_attr_segs[row_counter] = [wseg] else: out.font_attr_segs[row_counter].append(wseg) idx += cols row_counter += 1 out.lines.extend(wlines) # Copy over keys of annotation that are not row indices. for key in inp.annotations: if not isinstance(key, int): out.annotations[key] = inp.annotations[key] return out, new_line_indices class CommandHandlerRegistry(object): """Registry of command handlers for CLI. Handler methods (callables) for user commands can be registered with this class, which then is able to dispatch commands to the correct handlers and retrieve the RichTextLines output. For example, suppose you have the following handler defined: def echo(argv, screen_info=None): return RichTextLines(["arguments = %s" % " ".join(argv), "screen_info = " + repr(screen_info)]) you can register the handler with the command prefix "echo" and alias "e": registry = CommandHandlerRegistry() registry.register_command_handler("echo", echo, "Echo arguments, along with screen info", prefix_aliases=["e"]) then to invoke this command handler with some arguments and screen_info, do: registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80}) or with the prefix alias: registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80}) The call will return a RichTextLines object which can be rendered by a CLI. """ HELP_COMMAND = "help" HELP_COMMAND_ALIASES = ["h"] def __init__(self): # A dictionary from command prefix to handler. self._handlers = {} # A dictionary from prefix alias to prefix. self._alias_to_prefix = {} # A dictionary from prefix to aliases. self._prefix_to_aliases = {} # A dictionary from command prefix to help string. self._prefix_to_help = {} # Introductory text to help information. self._help_intro = None # Register a default handler for the command "help". self.register_command_handler( self.HELP_COMMAND, self._help_handler, "Print this help message.", prefix_aliases=self.HELP_COMMAND_ALIASES) def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None): """Register a callable as a command handler. Args: prefix: Command prefix, i.e., the first word in a command, e.g., "print" as in "print tensor_1". handler: A callable of the following signature: foo_handler(argv, screen_info=None), where argv is the argument vector (excluding the command prefix) and screen_info is a dictionary containing information about the screen, such as number of columns, e.g., {"cols": 100}. The callable should return: 1) a RichTextLines object representing the screen output. The callable can also raise an exception of the type CommandLineExit, which if caught by the command-line interface, will lead to its exit. The exception can optionally carry an exit token of arbitrary type. help_info: A help string. prefix_aliases: Aliases for the command prefix, as a list of str. E.g., shorthands for the command prefix: ["p", "pr"] Raises: ValueError: If 1) the prefix is empty, or 2) handler is not callable, or 3) a handler is already registered for the prefix, or 4) elements in prefix_aliases clash with existing aliases. 5) help_info is not a str. """ if not prefix: raise ValueError("Empty command prefix") if prefix in self._handlers: raise ValueError( "A handler is already registered for command prefix \"%s\"" % prefix) # Make sure handler is callable. if not callable(handler): raise ValueError("handler is not callable") # Make sure that help info is a string. if not isinstance(help_info, six.string_types): raise ValueError("help_info is not a str") # Process prefix aliases. if prefix_aliases: for alias in prefix_aliases: if self._resolve_prefix(alias): raise ValueError( "The prefix alias \"%s\" clashes with existing prefixes or " "aliases." % alias) self._alias_to_prefix[alias] = prefix self._prefix_to_aliases[prefix] = prefix_aliases # Store handler. self._handlers[prefix] = handler # Store help info. self._prefix_to_help[prefix] = help_info def dispatch_command(self, prefix, argv, screen_info=None): """Handles a command by dispatching it to a registered command handler. Args: prefix: Command prefix, as a str, e.g., "print". argv: Command argument vector, excluding the command prefix, represented as a list of str, e.g., ["tensor_1"] screen_info: A dictionary containing screen info, e.g., {"cols": 100}. Returns: An instance of RichTextLines or None. If any exception is caught during the invocation of the command handler, the RichTextLines will wrap the error type and message. Raises: ValueError: If 1) prefix is empty, or 2) no command handler is registered for the command prefix, or 3) the handler is found for the prefix, but it fails to return a RichTextLines or raise any exception. CommandLineExit: If the command handler raises this type of exception, this method will simply pass it along. """ if not prefix: raise ValueError("Prefix is empty") resolved_prefix = self._resolve_prefix(prefix) if not resolved_prefix: raise ValueError("No handler is registered for command prefix \"%s\"" % prefix) handler = self._handlers[resolved_prefix] try: output = handler(argv, screen_info=screen_info) except CommandLineExit as e: raise e except SystemExit as e: # Special case for syntax errors caught by argparse. lines = ["Syntax error for command: %s" % prefix, "For help, do \"help %s\"" % prefix] output = RichTextLines(lines) except BaseException as e: # pylint: disable=broad-except lines = ["Error occurred during handling of command: %s %s:" % (resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))] # Include traceback of the exception. lines.append("") lines.extend(traceback.format_exc().split("\n")) output = RichTextLines(lines) if not isinstance(output, RichTextLines) and output is not None: raise ValueError( "Return value from command handler %s is not None or a RichTextLines " "instance" % str(handler)) return output def is_registered(self, prefix): """Test if a command prefix or its alias is has a registered handler. Args: prefix: A prefix or its alias, as a str. Returns: True iff a handler is registered for prefix. """ return self._resolve_prefix(prefix) is not None def get_help(self, cmd_prefix=None): """Compile help information into a RichTextLines object. Args: cmd_prefix: Optional command prefix. As the prefix itself or one of its aliases. Returns: A RichTextLines object containing the help information. If cmd_prefix is None, the return value will be the full command-line help. Otherwise, it will be the help information for the specified command. """ if not cmd_prefix: # Print full help information, in sorted order of the command prefixes. help_info = RichTextLines([]) if self._help_intro: # If help intro is available, show it at the beginning. help_info.extend(self._help_intro) sorted_prefixes = sorted(self._handlers) for cmd_prefix in sorted_prefixes: lines = self._get_help_for_command_prefix(cmd_prefix) lines.append("") lines.append("") help_info.extend(RichTextLines(lines)) return help_info else: return RichTextLines(self._get_help_for_command_prefix(cmd_prefix)) def set_help_intro(self, help_intro): """Set an introductory message to help output. Args: help_intro: (RichTextLines) Rich text lines appended to the beginning of the output of the command "help", as introductory information. """ self._help_intro = help_intro def _help_handler(self, args, screen_info=None): """Command handler for "help". "help" is a common command that merits built-in support from this class. Args: args: Command line arguments to "help" (not including "help" itself). screen_info: (dict) Information regarding the screen, e.g., the screen width in characters: {"cols": 80} Returns: (RichTextLines) Screen text output. """ _ = screen_info # Unused currently. if not args: return self.get_help() elif len(args) == 1: return self.get_help(args[0]) else: return RichTextLines(["ERROR: help takes only 0 or 1 input argument."]) def _resolve_prefix(self, token): """Resolve command prefix from the prefix itself or its alias. Args: token: a str to be resolved. Returns: If resolvable, the resolved command prefix. If not resolvable, None. """ if token in self._handlers: return token elif token in self._alias_to_prefix: return self._alias_to_prefix[token] else: return None def _get_help_for_command_prefix(self, cmd_prefix): """Compile the help information for a given command prefix. Args: cmd_prefix: Command prefix, as the prefix itself or one of its aliases. Returns: A list of str as the help information fo cmd_prefix. If the cmd_prefix does not exist, the returned list of str will indicate that. """ lines = [] resolved_prefix = self._resolve_prefix(cmd_prefix) if not resolved_prefix: lines.append("Invalid command prefix: \"%s\"" % cmd_prefix) return lines lines.append(resolved_prefix) if resolved_prefix in self._prefix_to_aliases: lines.append(HELP_INDENT + "Aliases: " + ", ".join( self._prefix_to_aliases[resolved_prefix])) lines.append("") help_lines = self._prefix_to_help[resolved_prefix].split("\n") for line in help_lines: lines.append(HELP_INDENT + line) return lines class TabCompletionRegistry(object): """Registry for tab completion responses.""" def __init__(self): self._comp_dict = {} # TODO(cais): Rename method names with "comp" to "*completion*" to avoid # confusion. def register_tab_comp_context(self, context_words, comp_items): """Register a tab-completion context. Register that, for each word in context_words, the potential tab-completions are the words in comp_items. A context word is a pre-existing, completed word in the command line that determines how tab-completion works for another, incomplete word in the same command line. Completion items consist of potential candidates for the incomplete word. To give a general example, a context word can be "drink", and the completion items can be ["coffee", "tea", "water"] Note: A context word can be empty, in which case the context is for the top-level commands. Args: context_words: A list of context words belonging to the context being registered. It is a list of str, instead of a single string, to support synonym words triggering the same tab-completion context, e.g., both "drink" and the short-hand "dr" can trigger the same context. comp_items: A list of completion items, as a list of str. Raises: TypeError: if the input arguments are not all of the correct types. """ if not isinstance(context_words, list): raise TypeError("Incorrect type in context_list: Expected list, got %s" % type(context_words)) if not isinstance(comp_items, list): raise TypeError("Incorrect type in comp_items: Expected list, got %s" % type(comp_items)) # Sort the completion items on registration, so that later during # get_completions calls, no sorting will be necessary. sorted_comp_items = sorted(comp_items) for context_word in context_words: self._comp_dict[context_word] = sorted_comp_items def deregister_context(self, context_words): """Deregister a list of context words. Args: context_words: A list of context words to deregister, as a list of str. Raises: KeyError: if there are word(s) in context_words that do not correspond to any registered contexts. """ for context_word in context_words: if context_word not in self._comp_dict: raise KeyError("Cannot deregister unregistered context word \"%s\"" % context_word) for context_word in context_words: del self._comp_dict[context_word] def extend_comp_items(self, context_word, new_comp_items): """Add a list of completion items to a completion context. Args: context_word: A single completion word as a string. The extension will also apply to all other context words of the same context. new_comp_items: (list of str) New completion items to add. Raises: KeyError: if the context word has not been registered. """ if context_word not in self._comp_dict: raise KeyError("Context word \"%s\" has not been registered" % context_word) self._comp_dict[context_word].extend(new_comp_items) self._comp_dict[context_word] = sorted(self._comp_dict[context_word]) def remove_comp_items(self, context_word, comp_items): """Remove a list of completion items from a completion context. Args: context_word: A single completion word as a string. The removal will also apply to all other context words of the same context. comp_items: Completion items to remove. Raises: KeyError: if the context word has not been registered. """ if context_word not in self._comp_dict: raise KeyError("Context word \"%s\" has not been registered" % context_word) for item in comp_items: self._comp_dict[context_word].remove(item) def get_completions(self, context_word, prefix): """Get the tab completions given a context word and a prefix. Args: context_word: The context word. prefix: The prefix of the incomplete word. Returns: (1) None if no registered context matches the context_word. A list of str for the matching completion items. Can be an empty list of a matching context exists, but no completion item matches the prefix. (2) Common prefix of all the words in the first return value. If the first return value is None, this return value will be None, too. If the first return value is not None, i.e., a list, this return value will be a str, which can be an empty str if there is no common prefix among the items of the list. """ if context_word not in self._comp_dict: return None, None comp_items = self._comp_dict[context_word] comp_items = sorted( [item for item in comp_items if item.startswith(prefix)]) return comp_items, self._common_prefix(comp_items) def _common_prefix(self, m): """Given a list of str, returns the longest common prefix. Args: m: (list of str) A list of strings. Returns: (str) The longest common prefix. """ if not m: return "" s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1 class CommandHistory(object): """Keeps command history and supports lookup.""" _HISTORY_FILE_NAME = ".tfdbg_history" def __init__(self, limit=100, history_file_path=None): """CommandHistory constructor. Args: limit: Maximum number of the most recent commands that this instance keeps track of, as an int. history_file_path: (str) Manually specified path to history file. Used in testing. """ self._commands = [] self._limit = limit self._history_file_path = ( history_file_path or self._get_default_history_file_path()) self._load_history_from_file() def _load_history_from_file(self): if os.path.isfile(self._history_file_path): try: with open(self._history_file_path, "rt") as history_file: commands = history_file.readlines() self._commands = [command.strip() for command in commands if command.strip()] # Limit the size of the history file. if len(self._commands) > self._limit: self._commands = self._commands[-self._limit:] with open(self._history_file_path, "wt") as history_file: for command in self._commands: history_file.write(command + "\n") except IOError: print("WARNING: writing history file failed.") def _add_command_to_history_file(self, command): try: with open(self._history_file_path, "at") as history_file: history_file.write(command + "\n") except IOError: pass @classmethod def _get_default_history_file_path(cls): return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME) def add_command(self, command): """Add a command to the command history. Args: command: The history command, as a str. Raises: TypeError: if command is not a str. """ if self._commands and command == self._commands[-1]: # Ignore repeating commands in a row. return if not isinstance(command, six.string_types): raise TypeError("Attempt to enter non-str entry to command history") self._commands.append(command) if len(self._commands) > self._limit: self._commands = self._commands[-self._limit:] self._add_command_to_history_file(command) def most_recent_n(self, n): """Look up the n most recent commands. Args: n: Number of most recent commands to look up. Returns: A list of n most recent commands, or all available most recent commands, if n exceeds size of the command history, in chronological order. """ return self._commands[-n:] def lookup_prefix(self, prefix, n): """Look up the n most recent commands that starts with prefix. Args: prefix: The prefix to lookup. n: Number of most recent commands to look up. Returns: A list of n most recent commands that have the specified prefix, or all available most recent commands that have the prefix, if n exceeds the number of history commands with the prefix. """ commands = [cmd for cmd in self._commands if cmd.startswith(prefix)] return commands[-n:] # TODO(cais): Lookup by regex. class MenuItem(object): """A class for an item in a text-based menu.""" def __init__(self, caption, content, enabled=True): """Menu constructor. TODO(cais): Nested menu is currently not supported. Support it. Args: caption: (str) caption of the menu item. content: Content of the menu item. For a menu item that triggers a command, for example, content is the command string. enabled: (bool) whether this menu item is enabled. """ self._caption = caption self._content = content self._enabled = enabled @property def caption(self): return self._caption @property def type(self): return self._node_type @property def content(self): return self._content def is_enabled(self): return self._enabled def disable(self): self._enabled = False def enable(self): self._enabled = True class Menu(object): """A class for text-based menu.""" def __init__(self, name=None): """Menu constructor. Args: name: (str or None) name of this menu. """ self._name = name self._items = [] def append(self, item): """Append an item to the Menu. Args: item: (MenuItem) the item to be appended. """ self._items.append(item) def insert(self, index, item): self._items.insert(index, item) def num_items(self): return len(self._items) def captions(self): return [item.caption for item in self._items] def caption_to_item(self, caption): """Get a MenuItem from the caption. Args: caption: (str) The caption to look up. Returns: (MenuItem) The first-match menu item with the caption, if any. Raises: LookupError: If a menu item with the caption does not exist. """ captions = self.captions() if caption not in captions: raise LookupError("There is no menu item with the caption \"%s\"" % caption) return self._items[captions.index(caption)] def format_as_single_line(self, prefix=None, divider=" | ", enabled_item_attrs=None, disabled_item_attrs=None): """Format the menu as a single-line RichTextLines object. Args: prefix: (str) String added to the beginning of the line. divider: (str) The dividing string between the menu items. enabled_item_attrs: (list or str) Attributes applied to each enabled menu item, e.g., ["bold", "underline"]. disabled_item_attrs: (list or str) Attributes applied to each disabled menu item, e.g., ["red"]. Returns: (RichTextLines) A single-line output representing the menu, with font_attr_segs marking the individual menu items. """ if (enabled_item_attrs is not None and not isinstance(enabled_item_attrs, list)): enabled_item_attrs = [enabled_item_attrs] if (disabled_item_attrs is not None and not isinstance(disabled_item_attrs, list)): disabled_item_attrs = [disabled_item_attrs] menu_line = prefix if prefix is not None else "" attr_segs = [] for item in self._items: menu_line += item.caption item_name_begin = len(menu_line) - len(item.caption) if item.is_enabled(): final_attrs = [item] if enabled_item_attrs: final_attrs.extend(enabled_item_attrs) attr_segs.append((item_name_begin, len(menu_line), final_attrs)) else: if disabled_item_attrs: attr_segs.append( (item_name_begin, len(menu_line), disabled_item_attrs)) menu_line += divider return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
FabricaSocial/sic
refs/heads/master
sic/auth/models.py
10644
from django.db import models # Create your models here.
wbc2010/django1.2.5
refs/heads/master
django1.2.5/django/contrib/contenttypes/management.py
315
from django.contrib.contenttypes.models import ContentType from django.db.models import get_apps, get_models, signals from django.utils.encoding import smart_unicode def update_contenttypes(app, created_models, verbosity=2, **kwargs): """ Creates content types for models in the given app, removing any model entries that no longer have a matching model class. """ ContentType.objects.clear_cache() content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2])) app_models = get_models(app) if not app_models: return for klass in app_models: opts = klass._meta try: ct = ContentType.objects.get(app_label=opts.app_label, model=opts.object_name.lower()) content_types.remove(ct) except ContentType.DoesNotExist: ct = ContentType(name=smart_unicode(opts.verbose_name_raw), app_label=opts.app_label, model=opts.object_name.lower()) ct.save() if verbosity >= 2: print "Adding content type '%s | %s'" % (ct.app_label, ct.model) # The presence of any remaining content types means the supplied app has an # undefined model. Confirm that the content type is stale before deletion. if content_types: if kwargs.get('interactive', False): content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types]) ok_to_delete = raw_input("""The following content types are stale and need to be deleted: %s Any objects related to these content types by a foreign key will also be deleted. Are you sure you want to delete these content types? If you're unsure, answer 'no'. Type 'yes' to continue, or 'no' to cancel: """ % content_type_display) else: ok_to_delete = False if ok_to_delete == 'yes': for ct in content_types: if verbosity >= 2: print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model) ct.delete() else: if verbosity >= 2: print "Stale content types remain." def update_all_contenttypes(verbosity=2, **kwargs): for app in get_apps(): update_contenttypes(app, None, verbosity, **kwargs) signals.post_syncdb.connect(update_contenttypes) if __name__ == "__main__": update_all_contenttypes()
opencivicdata/python-opencivicdata-django
refs/heads/master
opencivicdata/tests/test_division.py
2
#!/u/bin/env python # -*- coding: utf-8 -*- from opencivicdata.divisions import Division def test_get(): div = Division.get("ocd-division/country:de/state:by/cd:248") assert div.name == "Bad Kissingen" assert div.name in str(div) def test_children(): us = Division.get("ocd-division/country:ua") assert len(list(us.children("region", duplicates=False))) == 25
felixfontein/ansible
refs/heads/devel
test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py
35
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import os import pytest from ansible.module_utils.facts.system.distribution import DistributionFiles @pytest.mark.parametrize( ('distro_file', 'expected_version'), ( ('Slackware', '14.1'), ('SlackwareCurrent', '14.2+'), ) ) def test_parse_distribution_file_slackware(mock_module, distro_file, expected_version): test_input = { 'name': 'Slackware', 'data': open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files', distro_file)).read(), 'path': '/etc/os-release', 'collected_facts': None, } result = ( True, { 'distribution': 'Slackware', 'distribution_version': expected_version } ) distribution = DistributionFiles(module=mock_module()) assert result == distribution.parse_distribution_file_Slackware(**test_input)
Jusedawg/SickRage
refs/heads/develop
lib/imdb/parser/http/searchPersonParser.py
76
""" parser.http.searchPersonParser module (imdb package). This module provides the HTMLSearchPersonParser class (and the search_person_parser instance), used to parse the results of a search for a given person. E.g., when searching for the name "Mel Gibson", the parsed page would be: http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20 Copyright 2004-2013 Davide Alberani <da@erlug.linux.it> 2008 H. Turgut Uyar <uyar@tekir.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ import re from imdb.utils import analyze_name, build_name from utils import Extractor, Attribute, analyze_imdbid from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser def _cleanName(n): """Clean the name in a title tag.""" if not n: return u'' n = n.replace('Filmography by type for', '') # FIXME: temporary. return n class DOMBasicPersonParser(DOMBasicMovieParser): """Simply get the name of a person and the imdbID. It's used by the DOMHTMLSearchPersonParser class to return a result for a direct match (when a search on IMDb results in a single person, the web server sends directly the movie page.""" _titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1) _reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)', re.I | re.M) class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser): """Parse the html page that the IMDb web server shows when the "new search system" is used, for persons.""" _BaseParser = DOMBasicPersonParser _notDirectHitTitle = '<title>find - imdb' _titleBuilder = lambda self, x: build_name(x, canonical=True) _linkPrefix = '/name/nm' _attrs = [Attribute(key='data', multi=True, path={ 'link': "./a[1]/@href", 'name': "./a[1]/text()", 'index': "./text()[1]", 'akas': ".//div[@class='_imdbpyAKA']/text()" }, postprocess=lambda x: ( analyze_imdbid(x.get('link') or u''), analyze_name((x.get('name') or u'') + \ (x.get('index') or u''), canonical=1), x.get('akas') ))] extractors = [Extractor(label='search', path="//td[@class='result_text']/a[starts-with(@href, '/name/nm')]/..", attrs=_attrs)] def preprocess_string(self, html_string): if self._notDirectHitTitle in html_string[:10240].lower(): html_string = _reAKASp.sub( r'\1<div class="_imdbpyAKA">\2::</div>\3', html_string) return DOMHTMLSearchMovieParser.preprocess_string(self, html_string) _OBJECTS = { 'search_person_parser': ((DOMHTMLSearchPersonParser,), {'kind': 'person', '_basic_parser': DOMBasicPersonParser}) }
ClearCorp/odoo-costa-rica
refs/heads/9.0
TODO-9.0/l10n_cr_account/__init__.py
2
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_invoice
sparkslabs/kamaelia
refs/heads/master
Code/Python/Kamaelia/Kamaelia/Support/DVB/CRC.py
3
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """\ CRC algorithm used to verify the integrity of data in DVB transport streams. """ # # old slow CRC algorithm. # # ought to recode a faster algorithm sometime soon # # def crc32(data): # poly = 0x4c11db7 # crc = 0xffffffff # for byte in data: # byte = ord(byte) # for bit in range(7,-1,-1): # MSB to LSB # z32 = crc>>31 # top bit # crc = crc << 1 # if ((byte>>bit)&1) ^ z32: # crc = crc ^ poly # crc = crc & 0xffffffff # return crc # # def dvbcrc(data): # return not crc32(data) def __MakeCRC32(polynomial = 0x4c11db7,initial=0xffffffff): """\ MakeCRC32([polynomial][,inital]) -> (string -> 32bit CRC of binary string data) Returns a function that calculatees the 32 bit CRC of binary data in a string, using the specified CRC polynomial and initial value. """ # precalculate the effect on the CRC of processing a byte of data # create a table of values to xor by, indexed by # new_byte_of_data xor most-sig-byte of current CRC xorvals = [] for x in range(0,256): # x is the result of top byte of crc xored with new data byte crc = long(x)<<24 for bit in range(7,-1,-1): # MSB to LSB z32 = crc>>31 # top bit crc = crc << 1 if z32: crc = crc ^ polynomial crc = crc & 0xffffffff xorvals.append(crc & 0xffffffff) # only interested in bottom 24 bits # define the function that will do the crc, using the table we've just # precalculated. def fastcrc32(data): crc = 0xffffffff for byte in data: byte = ord(byte) xv = xorvals[byte ^ (crc>>24)] crc = xv ^ ((crc & 0xffffff)<<8) return crc return fastcrc32 __dvbcrc = __MakeCRC32(polynomial = 0x04c11db7) dvbcrc = lambda data : not __dvbcrc(data)
tarzan0820/odoo
refs/heads/8.0
openerp/addons/base/ir/ir_values.py
228
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import pickle from openerp import tools from openerp.osv import osv, fields from openerp.osv.orm import except_orm EXCLUDED_FIELDS = set(( 'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data', 'search_view', )) #: Possible slots to bind an action to with :meth:`~.set_action` ACTION_SLOTS = [ "client_action_multi", # sidebar wizard action "client_print_multi", # sidebar report printing button "client_action_relate", # sidebar related link "tree_but_open", # double-click on item in tree view "tree_but_action", # deprecated: same as tree_but_open ] class ir_values(osv.osv): """Holds internal model-specific action bindings and user-defined default field values. definitions. This is a legacy internal model, mixing two different concepts, and will likely be updated or replaced in a future version by cleaner, separate models. You should not depend explicitly on it. The purpose of each ``ir.values`` entry depends on its type, defined by the ``key`` column: * 'default': user-defined default values, used when creating new records of this model: * 'action': binding of an action to a particular *action slot* of this model, making the action easily available in the user interface for this model. The ``key2`` column acts as a qualifier, further refining the type of the entry. The possible values are: * for 'default' entries: an optional condition restricting the cases where this particular default value will be applicable, or ``False`` for no condition * for 'action' entries: the ``key2`` qualifier is one of the available action slots, defining how this action can be invoked: * ``'client_print_multi'`` for report printing actions that will be available on views displaying items from this model * ``'client_action_multi'`` for assistants (wizards) actions that will be available in views displaying objects of this model * ``'client_action_relate'`` for links towards related documents that should be available in views displaying objects of this model * ``'tree_but_open'`` for actions that will be triggered when double-clicking an item from this model in a hierarchical tree view Each entry is specific to a model (``model`` column), and for ``'actions'`` type, may even be made specific to a given record of that model when the ``res_id`` column contains a record ID (``False`` means it's global for all records). The content of the entry is defined by the ``value`` column, which may either contain an arbitrary value, or a reference string defining the action that should be executed. .. rubric:: Usage: default values The ``'default'`` entries are usually defined manually by the users, and set by their UI clients calling :meth:`~.set_default`. These default values are then automatically used by the ORM every time a new record is about to be created, i.e. when :meth:`~openerp.osv.osv.osv.default_get` or :meth:`~openerp.osv.osv.osv.create` are called. .. rubric:: Usage: action bindings Business applications will usually bind their actions during installation, and OpenERP UI clients will apply them as defined, based on the list of actions included in the result of :meth:`~openerp.osv.osv.osv.fields_view_get`, or directly returned by explicit calls to :meth:`~.get_actions`. """ _name = 'ir.values' def _value_unpickle(self, cursor, user, ids, name, arg, context=None): res = {} for record in self.browse(cursor, user, ids, context=context): value = record[name[:-9]] if record.key == 'default' and value: # default values are pickled on the fly try: value = str(pickle.loads(value)) except Exception: pass res[record.id] = value return res def _value_pickle(self, cursor, user, id, name, value, arg, context=None): if context is None: context = {} ctx = context.copy() if self.CONCURRENCY_CHECK_FIELD in ctx: del ctx[self.CONCURRENCY_CHECK_FIELD] record = self.browse(cursor, user, id, context=context) if record.key == 'default': # default values are pickled on the fly value = pickle.dumps(value) self.write(cursor, user, id, {name[:-9]: value}, context=ctx) def onchange_object_id(self, cr, uid, ids, object_id, context=None): if not object_id: return {} act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context) return { 'value': {'model': act.model} } def onchange_action_id(self, cr, uid, ids, action_id, context=None): if not action_id: return {} act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context) return { 'value': {'value_unpickle': act.type+','+str(act.id)} } _columns = { 'name': fields.char('Name', required=True), 'model': fields.char('Model Name', select=True, required=True, help="Model to which this entry applies"), # TODO: model_id and action_id should be read-write function fields 'model_id': fields.many2one('ir.model', 'Model (change only)', size=128, help="Model to which this entry applies - " "helper field for setting a model, will " "automatically set the correct model name"), 'action_id': fields.many2one('ir.actions.actions', 'Action (change only)', help="Action bound to this entry - " "helper field for binding an action, will " "automatically set the correct reference"), 'value': fields.text('Value', help="Default value (pickled) or reference to an action"), 'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle, type='text', string='Default value or action reference'), 'key': fields.selection([('action','Action'),('default','Default')], 'Type', select=True, required=True, help="- Action: an action attached to one slot of the given model\n" "- Default: a default value for a model field"), 'key2' : fields.char('Qualifier', select=True, help="For actions, one of the possible action slots: \n" " - client_action_multi\n" " - client_print_multi\n" " - client_action_relate\n" " - tree_but_open\n" "For defaults, an optional condition" ,), 'res_id': fields.integer('Record ID', select=True, help="Database identifier of the record to which this applies. " "0 = for all records"), 'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True, help="If set, action binding only applies for this user."), 'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True, help="If set, action binding only applies for this company") } _defaults = { 'key': 'action', 'key2': 'tree_but_open', } def _auto_init(self, cr, context=None): super(ir_values, self)._auto_init(cr, context) cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'') if not cr.fetchone(): cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)') def create(self, cr, uid, vals, context=None): res = super(ir_values, self).create(cr, uid, vals, context=context) self.get_defaults_dict.clear_cache(self) return res def write(self, cr, uid, ids, vals, context=None): res = super(ir_values, self).write(cr, uid, ids, vals, context=context) self.get_defaults_dict.clear_cache(self) return res def unlink(self, cr, uid, ids, context=None): res = super(ir_values, self).unlink(cr, uid, ids, context=context) self.get_defaults_dict.clear_cache(self) return res def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False): """Defines a default value for the given model and field_name. Any previous default for the same scope (model, field_name, value, for_all_users, company_id, condition) will be replaced and lost in the process. Defaults can be later retrieved via :meth:`~.get_defaults`, which will return the highest priority default for any given field. Defaults that are more specific have a higher priority, in the following order (highest to lowest): * specific to user and company * specific to user only * specific to company only * global to everyone :param string model: model name :param string field_name: field name to which the default applies :param value: the default field value to set :type value: any serializable Python value :param bool for_all_users: whether the default should apply to everybody or only the user calling the method :param int company_id: optional ID of the company to which the default should apply. If omitted, the default will be global. If True is passed, the current user's company will be used. :param string condition: optional condition specification that can be used to restrict the applicability of the default values (e.g. based on another field's value). This is an opaque string as far as the API is concerned, but client stacks typically use single-field conditions in the form ``'key=stringified_value'``. (Currently, the condition is trimmed to 200 characters, so values that share the same first 200 characters always match) :return: id of the newly created ir.values entry """ if isinstance(value, unicode): value = value.encode('utf8') if company_id is True: # should be company-specific, need to get company id user = self.pool.get('res.users').browse(cr, uid, uid) company_id = user.company_id.id # remove existing defaults for the same scope search_criteria = [ ('key', '=', 'default'), ('key2', '=', condition and condition[:200]), ('model', '=', model), ('name', '=', field_name), ('user_id', '=', False if for_all_users else uid), ('company_id','=', company_id) ] self.unlink(cr, uid, self.search(cr, uid, search_criteria)) return self.create(cr, uid, { 'name': field_name, 'value': pickle.dumps(value), 'model': model, 'key': 'default', 'key2': condition and condition[:200], 'user_id': False if for_all_users else uid, 'company_id': company_id, }) def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False): """ Return the default value defined for model, field_name, users, company and condition. Return ``None`` if no such default exists. """ search_criteria = [ ('key', '=', 'default'), ('key2', '=', condition and condition[:200]), ('model', '=', model), ('name', '=', field_name), ('user_id', '=', False if for_all_users else uid), ('company_id','=', company_id) ] defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria)) return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None def get_defaults(self, cr, uid, model, condition=False): """Returns any default values that are defined for the current model and user, (and match ``condition``, if specified), previously registered via :meth:`~.set_default`. Defaults are global to a model, not field-specific, but an optional ``condition`` can be provided to restrict matching default values to those that were defined for the same condition (usually based on another field's value). Default values also have priorities depending on whom they apply to: only the highest priority value will be returned for any field. See :meth:`~.set_default` for more details. :param string model: model name :param string condition: optional condition specification that can be used to restrict the applicability of the default values (e.g. based on another field's value). This is an opaque string as far as the API is concerned, but client stacks typically use single-field conditions in the form ``'key=stringified_value'``. (Currently, the condition is trimmed to 200 characters, so values that share the same first 200 characters always match) :return: list of default values tuples of the form ``(id, field_name, value)`` (``id`` is the ID of the default entry, usually irrelevant) """ # use a direct SQL query for performance reasons, # this is called very often query = """SELECT v.id, v.name, v.value FROM ir_values v LEFT JOIN res_users u ON (v.user_id = u.id) WHERE v.key = %%s AND v.model = %%s AND (v.user_id = %%s OR v.user_id IS NULL) AND (v.company_id IS NULL OR v.company_id = (SELECT company_id from res_users where id = %%s) ) %s ORDER BY v.user_id, u.company_id""" params = ('default', model, uid, uid) if condition: query %= 'AND v.key2 = %s' params += (condition[:200],) else: query %= 'AND v.key2 is NULL' cr.execute(query, params) # keep only the highest priority default for each field defaults = {} for row in cr.dictfetchall(): defaults.setdefault(row['name'], (row['id'], row['name'], pickle.loads(row['value'].encode('utf-8')))) return defaults.values() # use ormcache: this is called a lot by BaseModel.default_get()! @tools.ormcache(skiparg=2) def get_defaults_dict(self, cr, uid, model, condition=False): """ Returns a dictionary mapping field names with their corresponding default value. This method simply improves the returned value of :meth:`~.get_defaults`. """ return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition)) def set_action(self, cr, uid, name, action_slot, model, action, res_id=False): """Binds an the given action to the given model's action slot - for later retrieval via :meth:`~.get_actions`. Any existing binding of the same action to the same slot is first removed, allowing an update of the action's name. See the class description for more details about the various action slots: :class:`~ir_values`. :param string name: action label, usually displayed by UI client :param string action_slot: the action slot to which the action should be bound to - one of ``client_action_multi``, ``client_print_multi``, ``client_action_relate``, ``tree_but_open``. :param string model: model name :param string action: action reference, in the form ``'model,id'`` :param int res_id: optional record id - will bind the action only to a specific record of the model, not all records. :return: id of the newly created ir.values entry """ assert isinstance(action, basestring) and ',' in action, \ 'Action definition must be an action reference, e.g. "ir.actions.act_window,42"' assert action_slot in ACTION_SLOTS, \ 'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS) # remove existing action definition of same slot and value search_criteria = [ ('key', '=', 'action'), ('key2', '=', action_slot), ('model', '=', model), ('res_id', '=', res_id or 0), # int field -> NULL == 0 ('value', '=', action), ] self.unlink(cr, uid, self.search(cr, uid, search_criteria)) return self.create(cr, uid, { 'key': 'action', 'key2': action_slot, 'model': model, 'res_id': res_id, 'name': name, 'value': action, }) def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None): """Retrieves the list of actions bound to the given model's action slot. See the class description for more details about the various action slots: :class:`~.ir_values`. :param string action_slot: the action slot to which the actions should be bound to - one of ``client_action_multi``, ``client_print_multi``, ``client_action_relate``, ``tree_but_open``. :param string model: model name :param int res_id: optional record id - will bind the action only to a specific record of the model, not all records. :return: list of action tuples of the form ``(id, name, action_def)``, where ``id`` is the ID of the default entry, ``name`` is the action label, and ``action_def`` is a dict containing the action definition as obtained by calling :meth:`~openerp.osv.osv.osv.read` on the action record. """ assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot # use a direct SQL query for performance reasons, # this is called very often query = """SELECT v.id, v.name, v.value FROM ir_values v WHERE v.key = %s AND v.key2 = %s AND v.model = %s AND (v.res_id = %s OR v.res_id IS NULL OR v.res_id = 0) ORDER BY v.id""" cr.execute(query, ('action', action_slot, model, res_id or None)) results = {} for action in cr.dictfetchall(): if not action['value']: continue # skip if undefined action_model_name, action_id = action['value'].split(',') if action_model_name not in self.pool: continue # unknow model? skip it action_model = self.pool[action_model_name] fields = [field for field in action_model._fields if field not in EXCLUDED_FIELDS] # FIXME: needs cleanup try: action_def = action_model.read(cr, uid, int(action_id), fields, context) if action_def: if action_model_name in ('ir.actions.report.xml', 'ir.actions.act_window'): groups = action_def.get('groups_id') if groups: cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s', (tuple(groups), uid)) if not cr.fetchone(): if action['name'] == 'Menuitem': raise osv.except_osv('Error!', 'You do not have the permission to perform this operation!!!') continue # keep only the first action registered for each action name results[action['name']] = (action['id'], action['name'], action_def) except except_orm: continue return sorted(results.values()) def _map_legacy_model_list(self, model_list, map_fn, merge_results=False): """Apply map_fn to the various models passed, according to legacy way to specify models/records. """ assert isinstance(model_list, (list, tuple)), \ "model_list should be in the form [model,..] or [(model,res_id), ..]" results = [] for model in model_list: res_id = False if isinstance(model, (list, tuple)): model, res_id = model result = map_fn(model, res_id) # some of the functions return one result at a time (tuple or id) # and some return a list of many of them - care for both if merge_results: results.extend(result) else: results.append(result) return results # Backards-compatibility adapter layer to retrofit into split API def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False): """Deprecated legacy method to set default values and bind actions to models' action slots. Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default` (``key=='default'``) or :meth:`~.set_action` (``key == 'action'``). :deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly. """ assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']" if key == 'default': def do_set(model,res_id): return self.set_default(cr, uid, model, field_name=name, value=value, for_all_users=(not preserve_user), company_id=company, condition=key2) elif key == 'action': def do_set(model,res_id): return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id) return self._map_legacy_model_list(models, do_set) def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True): """Deprecated legacy method to get the list of default values or actions bound to models' action slots. Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults` (``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``) :deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly. """ assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']" if key == 'default': def do_get(model,res_id): return self.get_defaults(cr, uid, model, condition=key2) elif key == 'action': def do_get(model,res_id): return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context) return self._map_legacy_model_list(models, do_get, merge_results=True) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
cecep-edu/edx-platform
refs/heads/eucalyptus.2
scripts/release.py
37
#!/usr/bin/env python """ a release-master multitool """ from __future__ import print_function, unicode_literals import sys import argparse from datetime import date, timedelta import re import collections import functools import textwrap import json import getpass try: from path import Path as path from git import Repo, Commit from git.refs.symbolic import SymbolicReference from dateutil.parser import parse as parse_datestring import requests import yaml except ImportError: print("Error: missing dependencies! Please run this command to install them:") print("pip install path.py requests python-dateutil GitPython PyYAML") sys.exit(1) try: from pygments.console import colorize except ImportError: colorize = lambda color, text: text JIRA_RE = re.compile(r"\b[A-Z]{2,}-\d+\b") PR_BRANCH_RE = re.compile(r"remotes/edx/pr/(\d+)") def project_root(): directory = path(__file__).abspath().dirname() while not (directory / ".git").exists(): directory = directory.parent return directory PROJECT_ROOT = project_root() repo = Repo(PROJECT_ROOT) git = repo.git PEOPLE_YAML = "https://raw.githubusercontent.com/edx/repo-tools-data/master/people.yaml" class memoized(object): """ Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up. return self.func(*args) if args in self.cache: return self.cache[args] else: value = self.func(*args) self.cache[args] = value return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) def make_parser(): parser = argparse.ArgumentParser(description="release master multitool") parser.add_argument( '--previous', '--prev', '-p', metavar="GITREV", default="edx/release", help="previous release [%(default)s]") parser.add_argument( '--current', '--curr', '-c', metavar="GITREV", default="HEAD", help="current release candidate [%(default)s]") parser.add_argument( '--date', '-d', help="expected release date: defaults to " "next Tuesday [{}]".format(default_release_date())) parser.add_argument( '--merge', '-m', action="store_true", default=False, help="include merge commits") parser.add_argument( '--table', '-t', action="store_true", default=False, help="only print table") return parser def ensure_pr_fetch(): """ Make sure that the git repository contains a remote called "edx" that has two fetch URLs; one for the main codebase, and one for pull requests. Returns True if the environment was modified in any way, False otherwise. """ modified = False remotes = git.remote().splitlines() if 'edx' not in remotes: git.remote("add", "edx", "https://github.com/edx/edx-platform.git") modified = True # it would be nice to use the git-python API to do this, but it doesn't seem # to support configurations with more than one value per key. :( edx_fetches = git.config("remote.edx.fetch", get_all=True).splitlines() pr_fetch = '+refs/pull/*/head:refs/remotes/edx/pr/*' if pr_fetch not in edx_fetches: git.config("remote.edx.fetch", pr_fetch, add=True) modified = True git.fetch("edx") return modified def get_github_creds(): """ Returns GitHub credentials if they exist, as a two-tuple of (username, token). Otherwise, return None. """ netrc_auth = requests.utils.get_netrc_auth("https://api.github.com") if netrc_auth: return netrc_auth config_file = path("~/.config/edx-release").expand() if config_file.isfile(): with open(config_file) as f: config = json.load(f) github_creds = config.get("credentials", {}).get("api.github.com", {}) username = github_creds.get("username", "") token = github_creds.get("token", "") if username and token: return (username, token) return None def create_github_creds(): """ https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization """ headers = {"User-Agent": "edx-release"} payload = { "note": "edx-release", "scopes": ["repo"], } username = raw_input("GitHub username: ") password = getpass.getpass("GitHub password: ") response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) # is the user using two-factor authentication? otp_header = response.headers.get("X-GitHub-OTP") if not response.ok and otp_header and otp_header.startswith("required;"): # get two-factor code, redo the request headers["X-GitHub-OTP"] = raw_input("Two-factor authentication code: ") response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) if not response.ok: message = response.json()["message"] if message != "Validation Failed": raise requests.exceptions.RequestException(message) else: # A token called "edx-release" already exists on GitHub. # Delete it, and try again. token_id = get_github_auth_id(username, password, "edx-release") if token_id: delete_github_auth_token(username, password, token_id) response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) return (username, response.json()["token"]) def get_github_auth_id(username, password, note): """ Return the ID associated with the GitHub auth token with the given note. If no such auth token exists, return None. """ response = requests.get( "https://api.github.com/authorizations", auth=(username, password), headers={"User-Agent": "edx-release"}, ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) for auth_token in response.json(): if auth_token["note"] == "edx-release": return auth_token["id"] return None def delete_github_auth_token(username, password, token_id): response = requests.delete( "https://api.github.com/authorizations/{id}".format(id=token_id), auth=(username, password), headers={"User-Agent": "edx-release"}, ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) def ensure_github_creds(attempts=3): """ Make sure that we have GitHub OAuth credentials. This will check the user's .netrc file, as well as the ~/.config/edx-release file. If no credentials exist in either place, it will prompt the user to create OAuth credentials, and store them in ~/.config/edx-release. Returns False if we found credentials, True if we had to create them. """ if get_github_creds(): return False # Looks like we need to create the OAuth creds print("We need to set up OAuth authentication with GitHub's API. " "Your password will not be stored.", file=sys.stderr) token = None for _ in range(attempts): try: username, token = create_github_creds() except requests.exceptions.RequestException as e: print( "Invalid authentication: {}".format(e.message), file=sys.stderr, ) continue else: break if token: print("Successfully authenticated to GitHub", file=sys.stderr) if not token: print("Too many invalid authentication attempts.", file=sys.stderr) return False config_file = path("~/.config/edx-release").expand() # make sure parent directory exists config_file.parent.makedirs_p() # read existing config if it exists if config_file.isfile(): with open(config_file) as f: config = json.load(f) else: config = {} # update config if 'credentials' not in config: config["credentials"] = {} if 'api.github.com' not in config['credentials']: config["credentials"]["api.github.com"] = {} config["credentials"]["api.github.com"]["username"] = username config["credentials"]["api.github.com"]["token"] = token # write it back out with open(config_file, "w") as f: json.dump(config, f) return True def default_release_date(): """ Returns a date object corresponding to the expected date of the next release: normally, this Tuesday. """ today = date.today() TUESDAY = 2 days_until_tuesday = (TUESDAY - today.isoweekday()) % 7 return today + timedelta(days=days_until_tuesday) def parse_ticket_references(text): """ Given a commit message, return a list of all JIRA ticket references in that message. If there are no ticket references, return an empty list. """ return set(JIRA_RE.findall(text)) class DoesNotExist(Exception): def __init__(self, message, commit, branch): self.message = message self.commit = commit self.branch = branch Exception.__init__(self, message) def get_merge_commit(commit, branch="master"): """ Given a commit that was merged into the given branch, return the merge commit for that event. http://stackoverflow.com/questions/8475448/find-merge-commit-which-include-a-specific-commit """ commit_range = "{}..{}".format(commit, branch) ancestry_paths = git.rev_list(commit_range, ancestry_path=True).splitlines() first_parents = git.rev_list(commit_range, first_parent=True).splitlines() both = set(ancestry_paths) & set(first_parents) for commit_hash in reversed(ancestry_paths): if commit_hash in both: return repo.commit(commit_hash) # no merge commit! msg = "No merge commit for {commit} in {branch}!".format( commit=commit, branch=branch, ) raise DoesNotExist(msg, commit, branch) def get_pr_info(num): """ Returns the info from the GitHub API """ url = "https://api.github.com/repos/edx/edx-platform/pulls/{num}".format(num=num) username, token = get_github_creds() headers = { "Authorization": "token {}".format(token), "User-Agent": "edx-release", } response = requests.get(url, headers=headers) result = response.json() if not response.ok: raise requests.exceptions.RequestException(result["message"]) return result def get_merged_prs(start_ref, end_ref): """ Return the set of all pull requests (as integers) that were merged between the start_ref and end_ref. """ ensure_pr_fetch() start_unmerged_branches = set( branch.strip() for branch in git.branch(all=True, no_merged=start_ref).splitlines() ) end_merged_branches = set( branch.strip() for branch in git.branch(all=True, merged=end_ref).splitlines() ) merged_between_refs = start_unmerged_branches & end_merged_branches merged_prs = set() for branch in merged_between_refs: match = PR_BRANCH_RE.search(branch) if match: merged_prs.add(int(match.group(1))) return merged_prs @memoized def prs_by_email(start_ref, end_ref): """ Returns an ordered dictionary of {email: pr_list} Email is the email address of the person who merged the pull request The dictionary is alphabetically ordered by email address The pull request list is ordered by merge date """ username, token = get_github_creds() headers = { "Authorization": "token {}".format(token), "User-Agent": "edx-release", } # `emails` maps from other_emails to primary email, based on people.yaml. emails = {} people_resp = requests.get(PEOPLE_YAML, headers=headers) people_resp.raise_for_status() people = yaml.safe_load(people_resp.text) for person in people.itervalues(): if 'other_emails' in person: for other_email in person['other_emails']: emails[other_email] = person['email'] unordered_data = collections.defaultdict(set) for pr_num in get_merged_prs(start_ref, end_ref): ref = "refs/remotes/edx/pr/{num}".format(num=pr_num) branch = SymbolicReference(repo, ref) try: merge = get_merge_commit(branch.commit, end_ref) except DoesNotExist: pass # this commit will be included in the commits_without_prs table else: email = emails.get(merge.author.email, merge.author.email) if email.endswith("@users.noreply.github.com"): # A bogus GitHub address, look up their GitHub name in # people.yaml username = email.split("@")[0] try: email = people[username]['email'] except KeyError: pass unordered_data[email].add((pr_num, merge)) ordered_data = collections.OrderedDict() for email in sorted(unordered_data.keys()): ordered = sorted(unordered_data[email], key=lambda pair: pair[1].authored_date) ordered_data[email] = [num for num, merge in ordered] return ordered_data def generate_pr_table(start_ref, end_ref): """ Return a UTF-8 string corresponding to a pull request table to embed in Confluence. """ header = "|| Merged By || Author || Title || PR || JIRA || Release Notes? || Verified? ||" pr_link = "[#{num}|https://github.com/edx/edx-platform/pull/{num}]" user_link = "[@{user}|https://github.com/{user}]" rows = [header] prbe = prs_by_email(start_ref, end_ref) for email, pull_requests in prbe.items(): for i, pull_request in enumerate(pull_requests): try: pr_info = get_pr_info(pull_request) title = pr_info["title"] or "" body = pr_info["body"] or "" author = pr_info["user"]["login"] except requests.exceptions.RequestException as e: message = ( "Warning: could not fetch data for #{num}: " "{message}".format(num=pull_request, message=e.message) ) print(colorize("red", message), file=sys.stderr) title = "?" body = "?" author = "" rows.append("| {merged_by} | {author} | {title} | {pull_request} | {jira} | {release_notes} | {verified} |".format( merged_by=email if i == 0 else "", author=user_link.format(user=author) if author else "", title=title.replace("|", "\|").replace('{', '\{').replace('}', '\}'), pull_request=pr_link.format(num=pull_request), jira=", ".join(parse_ticket_references(body)), release_notes="", verified="", )) return "\n".join(rows).encode("utf8") @memoized def get_commits_not_in_prs(start_ref, end_ref): """ Return a tuple of commits that exist between start_ref and end_ref, but were not merged to the end_ref. If everyone is following the pull request process correctly, this should return an empty tuple. """ return tuple(Commit.iter_items( repo, "{start}..{end}".format(start=start_ref, end=end_ref), first_parent=True, no_merges=True, )) def generate_commit_table(start_ref, end_ref): """ Return a string corresponding to a commit table to embed in Comfluence. The commits in the table should only be commits that are not in the pull request table. """ header = "|| Author || Summary || Commit || JIRA || Release Notes? || Verified? ||" commit_link = "[commit|https://github.com/edx/edx-platform/commit/{sha}]" rows = [header] commits = get_commits_not_in_prs(start_ref, end_ref) for commit in commits: rows.append("| {author} | {summary} | {commit} | {jira} | {release_notes} | {verified} |".format( author=commit.author.email, summary=commit.summary.replace("|", "\|"), commit=commit_link.format(sha=commit.hexsha), jira=", ".join(parse_ticket_references(commit.message)), release_notes="", verified="", )) return "\n".join(rows) def generate_email(start_ref, end_ref, release_date=None): """ Returns a string roughly approximating an email. """ if release_date is None: release_date = default_release_date() prbe = prs_by_email(start_ref, end_ref) email = """ To: {emails} You merged at least one pull request for edx-platform that is going out in this upcoming release, and you are responsible for verifying those changes on the staging servers before the code is released. Please go to the release page to do so: https://openedx.atlassian.net/wiki/display/ENG/{date}+Release The staging server is: https://stage.edx.org Note that you are responsible for verifying any pull requests that you merged, whether you wrote the code or not. (If you didn't write the code, you can and should try to get the person who wrote the code to help verify the changes -- but even if you can't, you're still responsible!) If you find any bugs, please notify me and record the bugs on the release page. Thanks! By the way, if you have an @edx.org email address and are having trouble logging into stage, you may need to reset your password. If you would prefer this email be sent to a different email address of yours, send a request to oscm@edx.org with the details. """.format( emails=", ".join(prbe.keys()), date=release_date.isoformat(), ) return textwrap.dedent(email).strip() def main(): parser = make_parser() args = parser.parse_args() if isinstance(args.date, basestring): # user passed in a custom date, so we need to parse it args.date = parse_datestring(args.date).date() ensure_github_creds() if args.table: print(generate_pr_table(args.previous, args.current)) return print("Generating stage verification email and its list of recipients. This may take around a minute...") print(generate_email(args.previous, args.current, release_date=args.date).encode('UTF-8')) print("\n") print("Wiki Table:") print( "Type Ctrl+Shift+D on Confluence to embed the following table " "in your release wiki page" ) print("\n") print(generate_pr_table(args.previous, args.current)) commits_without_prs = get_commits_not_in_prs(args.previous, args.current) if commits_without_prs: num = len(commits_without_prs) plural = num > 1 print("\n") print( "There {are} {num} {commits} in this release that did not come in " "through pull requests!".format( num=num, are="are" if plural else "is", commits="commits" if plural else "commit" ) ) print("\n") print(generate_commit_table(args.previous, args.current)) if __name__ == "__main__": main()
openshift/openshift-tools
refs/heads/prod
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/openshift_health_checker/test/fluentd_test.py
55
import pytest import json from openshift_checks.logging.fluentd import Fluentd, OpenShiftCheckExceptionList, OpenShiftCheckException def assert_error_in_list(expect_err, errorlist): assert any(err.name == expect_err for err in errorlist), "{} in {}".format(str(expect_err), str(errorlist)) fluentd_pod_node1 = { "metadata": { "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"}, "name": "logging-fluentd-1", }, "spec": {"host": "node1", "nodeName": "node1"}, "status": { "containerStatuses": [{"ready": True}], "conditions": [{"status": "True", "type": "Ready"}], } } fluentd_pod_node2_down = { "metadata": { "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"}, "name": "logging-fluentd-2", }, "spec": {"host": "node2", "nodeName": "node2"}, "status": { "containerStatuses": [{"ready": False}], "conditions": [{"status": "False", "type": "Ready"}], } } fluentd_node1 = { "metadata": { "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"}, "name": "node1", }, "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]}, } fluentd_node2 = { "metadata": { "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"}, "name": "node2", }, "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]}, } fluentd_node3_unlabeled = { "metadata": { "labels": {"kubernetes.io/hostname": "hostname"}, "name": "node3", }, "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]}, } def test_get_fluentd_pods(): check = Fluentd() check.exec_oc = lambda *_: json.dumps(dict(items=[fluentd_node1])) check.get_pods_for_component = lambda *_: [fluentd_pod_node1] assert not check.run() @pytest.mark.parametrize('pods, nodes, expect_error', [ ( [], [], 'NoNodesDefined', ), ( [], [fluentd_node3_unlabeled], 'NoNodesLabeled', ), ( [], [fluentd_node1, fluentd_node3_unlabeled], 'NodesUnlabeled', ), ( [], [fluentd_node2], 'MissingFluentdPod', ), ( [fluentd_pod_node1, fluentd_pod_node1], [fluentd_node1], 'TooManyFluentdPods', ), ( [fluentd_pod_node2_down], [fluentd_node2], 'FluentdNotRunning', ), ]) def test_get_fluentd_pods_errors(pods, nodes, expect_error): check = Fluentd() check.exec_oc = lambda *_: json.dumps(dict(items=nodes)) with pytest.raises(OpenShiftCheckException) as excinfo: check.check_fluentd(pods) if isinstance(excinfo.value, OpenShiftCheckExceptionList): assert_error_in_list(expect_error, excinfo.value) else: assert expect_error == excinfo.value.name def test_bad_oc_node_list(): check = Fluentd() check.exec_oc = lambda *_: "this isn't even json" with pytest.raises(OpenShiftCheckException) as excinfo: check.get_nodes_by_name() assert 'BadOcNodeList' == excinfo.value.name
cyclecomputing/boto
refs/heads/2.27.x
boto/s3/multidelete.py
244
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto import handler import xml.sax class Deleted(object): """ A successfully deleted object in a multi-object delete request. :ivar key: Key name of the object that was deleted. :ivar version_id: Version id of the object that was deleted. :ivar delete_marker: If True, indicates the object deleted was a DeleteMarker. :ivar delete_marker_version_id: Version ID of the delete marker deleted. """ def __init__(self, key=None, version_id=None, delete_marker=False, delete_marker_version_id=None): self.key = key self.version_id = version_id self.delete_marker = delete_marker self.delete_marker_version_id = delete_marker_version_id def __repr__(self): if self.version_id: return '<Deleted: %s.%s>' % (self.key, self.version_id) else: return '<Deleted: %s>' % self.key def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Key': self.key = value elif name == 'VersionId': self.version_id = value elif name == 'DeleteMarker': if value.lower() == 'true': self.delete_marker = True elif name == 'DeleteMarkerVersionId': self.delete_marker_version_id = value else: setattr(self, name, value) class Error(object): """ An unsuccessful deleted object in a multi-object delete request. :ivar key: Key name of the object that was not deleted. :ivar version_id: Version id of the object that was not deleted. :ivar code: Status code of the failed delete operation. :ivar message: Status message of the failed delete operation. """ def __init__(self, key=None, version_id=None, code=None, message=None): self.key = key self.version_id = version_id self.code = code self.message = message def __repr__(self): if self.version_id: return '<Error: %s.%s(%s)>' % (self.key, self.version_id, self.code) else: return '<Error: %s(%s)>' % (self.key, self.code) def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Key': self.key = value elif name == 'VersionId': self.version_id = value elif name == 'Code': self.code = value elif name == 'Message': self.message = value else: setattr(self, name, value) class MultiDeleteResult(object): """ The status returned from a MultiObject Delete request. :ivar deleted: A list of successfully deleted objects. Note that if the quiet flag was specified in the request, this list will be empty because only error responses would be returned. :ivar errors: A list of unsuccessfully deleted objects. """ def __init__(self, bucket=None): self.bucket = None self.deleted = [] self.errors = [] def startElement(self, name, attrs, connection): if name == 'Deleted': d = Deleted() self.deleted.append(d) return d elif name == 'Error': e = Error() self.errors.append(e) return e return None def endElement(self, name, value, connection): setattr(self, name, value)
ian-garrett/meetMe
refs/heads/master
env/lib/python3.4/site-packages/setuptools/command/egg_info.py
301
"""setuptools.command.egg_info Create a distribution's .egg-info directory and contents""" from distutils.filelist import FileList as _FileList from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys try: from setuptools_svn import svn_utils except ImportError: pass from setuptools import Command from setuptools.command.sdist import sdist from setuptools.compat import basestring, PY3, StringIO from setuptools.command.sdist import walk_revctrl from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from pkg_resources import packaging class egg_info(Command): description = "create a distribution's .egg-info directory" user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" " (default: top of the source tree)"), ('tag-svn-revision', 'r', "Add subversion revision ID to version number"), ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), ('tag-build=', 'b', "Specify explicit tag to add to version number"), ('no-svn-revision', 'R', "Don't add subversion revision ID [default]"), ('no-date', 'D', "Don't include date stamp [default]"), ] boolean_options = ['tag-date', 'tag-svn-revision'] negative_opt = {'no-svn-revision': 'tag-svn-revision', 'no-date': 'tag-date'} def initialize_options(self): self.egg_name = None self.egg_version = None self.egg_base = None self.egg_info = None self.tag_build = None self.tag_svn_revision = 0 self.tag_date = 0 self.broken_egg_info = False self.vtags = None def save_version_info(self, filename): from setuptools.command.setopt import edit_config values = dict( egg_info=dict( tag_svn_revision=0, tag_date=0, tag_build=self.tags(), ) ) edit_config(filename, values) def finalize_options(self): self.egg_name = safe_name(self.distribution.get_name()) self.vtags = self.tags() self.egg_version = self.tagged_version() parsed_version = parse_version(self.egg_version) try: is_version = isinstance(parsed_version, packaging.version.Version) spec = ( "%s==%s" if is_version else "%s===%s" ) list( parse_requirements(spec % (self.egg_name, self.egg_version)) ) except ValueError: raise distutils.errors.DistutilsOptionError( "Invalid distribution name or version syntax: %s-%s" % (self.egg_name, self.egg_version) ) if self.egg_base is None: dirs = self.distribution.package_dir self.egg_base = (dirs or {}).get('', os.curdir) self.ensure_dirname('egg_base') self.egg_info = to_filename(self.egg_name) + '.egg-info' if self.egg_base != os.curdir: self.egg_info = os.path.join(self.egg_base, self.egg_info) if '-' in self.egg_name: self.check_broken_egg_info() # Set package version for the benefit of dumber commands # (e.g. sdist, bdist_wininst, etc.) # self.distribution.metadata.version = self.egg_version # If we bootstrapped around the lack of a PKG-INFO, as might be the # case in a fresh checkout, make sure that any special tags get added # to the version info # pd = self.distribution._patched_dist if pd is not None and pd.key == self.egg_name.lower(): pd._version = self.egg_version pd._parsed_version = parse_version(self.egg_version) self.distribution._patched_dist = None def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename) def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if PY3: data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close() def delete_file(self, filename): """Delete `filename` (if not a dry run) after announcing it""" log.info("deleting %s", filename) if not self.dry_run: os.unlink(filename) def tagged_version(self): version = self.distribution.get_version() # egg_info may be called more than once for a distribution, # in which case the version string already contains all tags. if self.vtags and version.endswith(self.vtags): return safe_version(version) return safe_version(version + self.vtags) def run(self): self.mkpath(self.egg_info) installer = self.distribution.fetch_build_egg for ep in iter_entry_points('egg_info.writers'): ep.require(installer=installer) writer = ep.resolve() writer(self, ep.name, os.path.join(self.egg_info, ep.name)) # Get rid of native_libs.txt if it was put there by older bdist_egg nl = os.path.join(self.egg_info, "native_libs.txt") if os.path.exists(nl): self.delete_file(nl) self.find_sources() def tags(self): version = '' if self.tag_build: version += self.tag_build if self.tag_svn_revision: rev = self.get_svn_revision() if rev: # is 0 if it's not an svn working copy version += '-r%s' % rev if self.tag_date: import time version += time.strftime("-%Y%m%d") return version @staticmethod def get_svn_revision(): if 'svn_utils' not in globals(): return "0" return str(svn_utils.SvnInfo.load(os.curdir).get_revision()) def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist def check_broken_egg_info(self): bei = self.egg_name + '.egg-info' if self.egg_base != os.curdir: bei = os.path.join(self.egg_base, bei) if os.path.exists(bei): log.warn( "-" * 78 + '\n' "Note: Your current .egg-info directory has a '-' in its name;" '\nthis will not work correctly with "setup.py develop".\n\n' 'Please rename %s to %s to correct this problem.\n' + '-' * 78, bei, self.egg_info ) self.broken_egg_info = self.egg_info self.egg_info = bei # make it work for now class FileList(_FileList): """File list that accepts only existing, platform-independent paths""" def append(self, item): if item.endswith('\r'): # Fix older sdists built on Windows item = item[:-1] path = convert_path(item) if self._safe_path(path): self.files.append(path) def extend(self, paths): self.files.extend(filter(self._safe_path, paths)) def _repair(self): """ Replace self.files with only safe paths Because some owners of FileList manipulate the underlying ``files`` attribute directly, this method must be called to repair those paths. """ self.files = list(filter(self._safe_path, self.files)) def _safe_path(self, path): enc_warn = "'%s' not %s encodable -- skipping" # To avoid accidental trans-codings errors, first to unicode u_path = unicode_utils.filesys_decode(path) if u_path is None: log.warn("'%s' in unexpected encoding -- skipping" % path) return False # Must ensure utf-8 encodability utf8_path = unicode_utils.try_encode(u_path, "utf-8") if utf8_path is None: log.warn(enc_warn, path, 'utf-8') return False try: # accept is either way checks out if os.path.exists(u_path) or os.path.exists(utf8_path): return True # this will catch any encode errors decoding u_path except UnicodeEncodeError: log.warn(enc_warn, path, sys.getfilesystemencoding()) class manifest_maker(sdist): template = "MANIFEST.in" def initialize_options(self): self.use_defaults = 1 self.prune = 1 self.manifest_only = 1 self.force_manifest = 1 def finalize_options(self): pass def run(self): self.filelist = FileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list self.filelist.findall() self.add_defaults() if os.path.exists(self.template): self.read_template() self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() def _manifest_normalize(self, path): path = unicode_utils.filesys_decode(path) return path.replace(os.sep, '/') def write_manifest(self): """ Write the file list in 'self.filelist' to the manifest file named by 'self.manifest'. """ self.filelist._repair() # Now _repairs should encodability, but not unicode files = [self._manifest_normalize(f) for f in self.filelist.files] msg = "writing manifest file '%s'" % self.manifest self.execute(write_file, (self.manifest, files), msg) def warn(self, msg): # suppress missing-file warnings from sdist if not msg.startswith("standard file not found:"): sdist.warn(self, msg) def add_defaults(self): sdist.add_defaults(self) self.filelist.append(self.template) self.filelist.append(self.manifest) rcfiles = list(walk_revctrl()) if rcfiles: self.filelist.extend(rcfiles) elif os.path.exists(self.manifest): self.read_manifest() ei_cmd = self.get_finalized_command('egg_info') self._add_egg_info(cmd=ei_cmd) self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) def _add_egg_info(self, cmd): """ Add paths for egg-info files for an external egg-base. The egg-info files are written to egg-base. If egg-base is outside the current working directory, this method searchs the egg-base directory for files to include in the manifest. Uses distutils.filelist.findall (which is really the version monkeypatched in by setuptools/__init__.py) to perform the search. Since findall records relative paths, prefix the returned paths with cmd.egg_base, so add_default's include_pattern call (which is looking for the absolute cmd.egg_info) will match them. """ if cmd.egg_base == os.curdir: # egg-info files were already added by something else return discovered = distutils.filelist.findall(cmd.egg_base) resolved = (os.path.join(cmd.egg_base, path) for path in discovered) self.filelist.allfiles.extend(resolved) def prune_file_list(self): build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() self.filelist.exclude_pattern(None, prefix=build.build_base) self.filelist.exclude_pattern(None, prefix=base_dir) sep = re.escape(os.sep) self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, is_regex=1) def write_file(filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ contents = "\n".join(contents) # assuming the contents has been vetted for utf-8 encoding contents = contents.encode("utf-8") with open(filename, "wb") as f: # always write POSIX-style manifest f.write(contents) def write_pkg_info(cmd, basename, filename): log.info("writing %s", filename) if not cmd.dry_run: metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it metadata.write_pkg_info(cmd.egg_info) finally: metadata.name, metadata.version = oldname, oldver safe = getattr(cmd.distribution, 'zip_safe', None) from setuptools.command import bdist_egg bdist_egg.write_safety_flag(cmd.egg_info, safe) def warn_depends_obsolete(cmd, basename, filename): if os.path.exists(filename): log.warn( "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) append_cr = lambda line: line + '\n' lines = map(append_cr, lines) stream.writelines(lines) def write_requirements(cmd, basename, filename): dist = cmd.distribution data = StringIO() _write_requirements(data, dist.install_requires) extras_require = dist.extras_require or {} for extra in sorted(extras_require): data.write('\n[{extra}]\n'.format(**vars())) _write_requirements(data, extras_require[extra]) cmd.write_or_delete_file("requirements", filename, data.getvalue()) def write_setup_requirements(cmd, basename, filename): data = StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) def write_toplevel_names(cmd, basename, filename): pkgs = dict.fromkeys( [ k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names() ] ) cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') def overwrite_arg(cmd, basename, filename): write_arg(cmd, basename, filename, True) def write_arg(cmd, basename, filename, force=False): argname = os.path.splitext(basename)[0] value = getattr(cmd.distribution, argname, None) if value is not None: value = '\n'.join(value) + '\n' cmd.write_or_delete_file(argname, filename, value, force) def write_entries(cmd, basename, filename): ep = cmd.distribution.entry_points if isinstance(ep, basestring) or ep is None: data = ep elif ep is not None: data = [] for section, contents in sorted(ep.items()): if not isinstance(contents, basestring): contents = EntryPoint.parse_group(section, contents) contents = '\n'.join(sorted(map(str, contents.values()))) data.append('[%s]\n%s\n\n' % (section, contents)) data = ''.join(data) cmd.write_or_delete_file('entry points', filename, data, True) def get_pkg_info_revision(): # See if we can get a -r### off of PKG-INFO, in case this is an sdist of # a subversion revision # if os.path.exists('PKG-INFO'): f = open('PKG-INFO', 'rU') for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) f.close() return 0
sbellem/django
refs/heads/master
tests/contenttypes_tests/tests.py
81
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.apps.registry import Apps, apps from django.contrib.contenttypes import management from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.core import checks from django.db import connections, models from django.test import TestCase, override_settings from django.test.utils import captured_stdout from django.utils.encoding import force_str, force_text from .models import Article, Author, SchemeIncludedURL @override_settings(ROOT_URLCONF='contenttypes_tests.urls') class ContentTypesViewsTests(TestCase): @classmethod def setUpTestData(cls): # don't use the manager because we want to ensure the site exists # with pk=1, regardless of whether or not it already exists. cls.site1 = Site(pk=1, domain='testserver', name='testserver') cls.site1.save() cls.author1 = Author.objects.create(name='Boris') cls.article1 = Article.objects.create( title='Old Article', slug='old_article', author=cls.author1, date_created=datetime.datetime(2001, 1, 1, 21, 22, 23) ) cls.article2 = Article.objects.create( title='Current Article', slug='current_article', author=cls.author1, date_created=datetime.datetime(2007, 9, 17, 21, 22, 23) ) cls.article3 = Article.objects.create( title='Future Article', slug='future_article', author=cls.author1, date_created=datetime.datetime(3000, 1, 1, 21, 22, 23) ) cls.scheme1 = SchemeIncludedURL.objects.create(url='http://test_scheme_included_http/') cls.scheme2 = SchemeIncludedURL.objects.create(url='https://test_scheme_included_https/') cls.scheme3 = SchemeIncludedURL.objects.create(url='//test_default_scheme_kept/') def test_shortcut_with_absolute_url(self): "Can view a shortcut for an Author object that has a get_absolute_url method" for obj in Author.objects.all(): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk) response = self.client.get(short_url) self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(), status_code=302, target_status_code=404) def test_shortcut_with_absolute_url_including_scheme(self): """ Can view a shortcut when object's get_absolute_url returns a full URL the tested URLs are: "http://...", "https://..." and "//..." """ for obj in SchemeIncludedURL.objects.all(): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk) response = self.client.get(short_url) self.assertRedirects(response, obj.get_absolute_url(), status_code=302, fetch_redirect_response=False) def test_shortcut_no_absolute_url(self): "Shortcuts for an object that has no get_absolute_url method raises 404" for obj in Article.objects.all(): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_wrong_type_pk(self): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects') response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_shortcut_bad_pk(self): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242') response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_nonint_content_type(self): an_author = Author.objects.all()[0] short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_bad_content_type(self): an_author = Author.objects.all()[0] short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_create_contenttype_on_the_spot(self): """ Make sure ContentTypeManager.get_for_model creates the corresponding content type if it doesn't exist in the database (for some reason). """ class ModelCreatedOnTheFly(models.Model): name = models.CharField() class Meta: verbose_name = 'a model created on the fly' app_label = 'my_great_app' apps = Apps() ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly) self.assertEqual(ct.app_label, 'my_great_app') self.assertEqual(ct.model, 'modelcreatedonthefly') self.assertEqual(force_text(ct), 'modelcreatedonthefly') class IsolatedModelsTestCase(TestCase): def setUp(self): # The unmanaged models need to be removed after the test in order to # prevent bad interactions with the flush operation in other tests. self._old_models = apps.app_configs['contenttypes_tests'].models.copy() def tearDown(self): apps.app_configs['contenttypes_tests'].models = self._old_models apps.all_models['contenttypes_tests'] = self._old_models apps.clear_cache() @override_settings(SILENCED_SYSTEM_CHECKS=['fields.W342']) # ForeignKey(unique=True) class GenericForeignKeyTests(IsolatedModelsTestCase): def test_str(self): class Model(models.Model): field = GenericForeignKey() expected = "contenttypes_tests.Model.field" actual = force_str(Model.field) self.assertEqual(expected, actual) def test_missing_content_type_field(self): class TaggedItem(models.Model): # no content_type field object_id = models.PositiveIntegerField() content_object = GenericForeignKey() errors = TaggedItem.content_object.check() expected = [ checks.Error( "The GenericForeignKey content type references the non-existent field 'TaggedItem.content_type'.", hint=None, obj=TaggedItem.content_object, id='contenttypes.E002', ) ] self.assertEqual(errors, expected) def test_invalid_content_type_field(self): class Model(models.Model): content_type = models.IntegerField() # should be ForeignKey object_id = models.PositiveIntegerField() content_object = GenericForeignKey( 'content_type', 'object_id') errors = Model.content_object.check() expected = [ checks.Error( "'Model.content_type' is not a ForeignKey.", hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.", obj=Model.content_object, id='contenttypes.E003', ) ] self.assertEqual(errors, expected) def test_content_type_field_pointing_to_wrong_model(self): class Model(models.Model): content_type = models.ForeignKey('self', models.CASCADE) # should point to ContentType object_id = models.PositiveIntegerField() content_object = GenericForeignKey( 'content_type', 'object_id') errors = Model.content_object.check() expected = [ checks.Error( "'Model.content_type' is not a ForeignKey to 'contenttypes.ContentType'.", hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.", obj=Model.content_object, id='contenttypes.E004', ) ] self.assertEqual(errors, expected) def test_missing_object_id_field(self): class TaggedItem(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) # missing object_id field content_object = GenericForeignKey() errors = TaggedItem.content_object.check() expected = [ checks.Error( "The GenericForeignKey object ID references the non-existent field 'object_id'.", hint=None, obj=TaggedItem.content_object, id='contenttypes.E001', ) ] self.assertEqual(errors, expected) def test_field_name_ending_with_underscore(self): class Model(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object_ = GenericForeignKey( 'content_type', 'object_id') errors = Model.content_object_.check() expected = [ checks.Error( 'Field names must not end with an underscore.', hint=None, obj=Model.content_object_, id='fields.E001', ) ] self.assertEqual(errors, expected) @override_settings(INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'contenttypes_tests']) def test_generic_foreign_key_checks_are_performed(self): class MyGenericForeignKey(GenericForeignKey): def check(self, **kwargs): return ['performed!'] class Model(models.Model): content_object = MyGenericForeignKey() errors = checks.run_checks() self.assertEqual(errors, ['performed!']) class GenericRelationshipTests(IsolatedModelsTestCase): def test_valid_generic_relationship(self): class TaggedItem(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() class Bookmark(models.Model): tags = GenericRelation('TaggedItem') errors = Bookmark.tags.field.check() self.assertEqual(errors, []) def test_valid_generic_relationship_with_explicit_fields(self): class TaggedItem(models.Model): custom_content_type = models.ForeignKey(ContentType, models.CASCADE) custom_object_id = models.PositiveIntegerField() content_object = GenericForeignKey( 'custom_content_type', 'custom_object_id') class Bookmark(models.Model): tags = GenericRelation('TaggedItem', content_type_field='custom_content_type', object_id_field='custom_object_id') errors = Bookmark.tags.field.check() self.assertEqual(errors, []) def test_pointing_to_missing_model(self): class Model(models.Model): rel = GenericRelation('MissingModel') errors = Model.rel.field.check() expected = [ checks.Error( ("Field defines a relation with model 'MissingModel', " "which is either not installed, or is abstract."), hint=None, obj=Model.rel.field, id='fields.E300', ) ] self.assertEqual(errors, expected) def test_valid_self_referential_generic_relationship(self): class Model(models.Model): rel = GenericRelation('Model') content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey( 'content_type', 'object_id') errors = Model.rel.field.check() self.assertEqual(errors, []) def test_missing_generic_foreign_key(self): class TaggedItem(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() class Bookmark(models.Model): tags = GenericRelation('TaggedItem') errors = Bookmark.tags.field.check() expected = [ checks.Error( ("The GenericRelation defines a relation with the model " "'contenttypes_tests.TaggedItem', but that model does not have a " "GenericForeignKey."), hint=None, obj=Bookmark.tags.field, id='contenttypes.E004', ) ] self.assertEqual(errors, expected) @override_settings(TEST_SWAPPED_MODEL='contenttypes_tests.Replacement') def test_pointing_to_swapped_model(self): class Replacement(models.Model): pass class SwappedModel(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() class Meta: swappable = 'TEST_SWAPPED_MODEL' class Model(models.Model): rel = GenericRelation('SwappedModel') errors = Model.rel.field.check() expected = [ checks.Error( ("Field defines a relation with the model " "'contenttypes_tests.SwappedModel', " "which has been swapped out."), hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.", obj=Model.rel.field, id='fields.E301', ) ] self.assertEqual(errors, expected) def test_field_name_ending_with_underscore(self): class TaggedItem(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() class InvalidBookmark(models.Model): tags_ = GenericRelation('TaggedItem') errors = InvalidBookmark.tags_.field.check() expected = [ checks.Error( 'Field names must not end with an underscore.', hint=None, obj=InvalidBookmark.tags_.field, id='fields.E001', ) ] self.assertEqual(errors, expected) class UpdateContentTypesTests(TestCase): def setUp(self): self.before_count = ContentType.objects.count() ContentType.objects.create(app_label='contenttypes_tests', model='Fake') self.app_config = apps.get_app_config('contenttypes_tests') def test_interactive_true(self): """ interactive mode of update_contenttypes() (the default) should delete stale contenttypes. """ management.input = lambda x: force_str("yes") with captured_stdout() as stdout: management.update_contenttypes(self.app_config) self.assertIn("Deleting stale content type", stdout.getvalue()) self.assertEqual(ContentType.objects.count(), self.before_count) def test_interactive_false(self): """ non-interactive mode of update_contenttypes() shouldn't delete stale content types. """ with captured_stdout() as stdout: management.update_contenttypes(self.app_config, interactive=False) self.assertIn("Stale content types remain.", stdout.getvalue()) self.assertEqual(ContentType.objects.count(), self.before_count + 1) class TestRouter(object): def db_for_read(self, model, **hints): return 'other' def db_for_write(self, model, **hints): return 'default' @override_settings(DATABASE_ROUTERS=[TestRouter()]) class ContentTypesMultidbTestCase(TestCase): def setUp(self): # Whenever a test starts executing, only the "default" database is # connected. We explicitly connect to the "other" database here. If we # don't do it, then it will be implicitly connected later when we query # it, but in that case some database backends may automatically perform # extra queries upon connecting (notably mysql executes # "SET SQL_AUTO_IS_NULL = 0"), which will affect assertNumQueries(). connections['other'].ensure_connection() def test_multidb(self): """ Test that, when using multiple databases, we use the db_for_read (see #20401). """ ContentType.objects.clear_cache() with self.assertNumQueries(0, using='default'), \ self.assertNumQueries(1, using='other'): ContentType.objects.get_for_model(Author)
openstack/trove
refs/heads/master
trove/tests/scenario/groups/instance_actions_group.py
1
# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_actions_group" class InstanceActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_actions_runners' _runner_cls = 'InstanceActionsRunner' @test(depends_on_groups=[groups.INST_LOG], groups=[GROUP, groups.INST_ACTIONS]) class InstanceActionsGroup(TestGroup): """Test Instance Actions functionality.""" def __init__(self): super(InstanceActionsGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def add_test_data(self): """Add test data.""" self.test_runner.run_add_test_data() @test(depends_on=[add_test_data]) def verify_test_data(self): """Verify test data.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data]) def instance_restart(self): """Restart an existing instance.""" self.test_runner.run_instance_restart() @test(depends_on=[verify_test_data, instance_restart]) def verify_test_data_after_restart(self): """Verify test data after restart.""" self.test_runner.run_verify_test_data() @test(depends_on=[instance_restart], runs_after=[verify_test_data_after_restart]) def instance_resize_volume(self): """Resize attached volume.""" self.test_runner.run_instance_resize_volume() @test(depends_on=[verify_test_data, instance_resize_volume]) def verify_test_data_after_volume_resize(self): """Verify test data after volume resize.""" self.test_runner.run_verify_test_data() @test(depends_on=[add_test_data], runs_after=[verify_test_data_after_volume_resize]) def remove_test_data(self): """Remove test data.""" self.test_runner.run_remove_test_data() @test(depends_on_classes=[InstanceActionsGroup], groups=[GROUP, groups.INST_ACTIONS_RESIZE]) class InstanceActionsResizeGroup(TestGroup): """Test Instance Actions Resize functionality.""" def __init__(self): super(InstanceActionsResizeGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def add_test_data(self): """Add test data.""" self.test_runner.run_add_test_data() @test(depends_on=[add_test_data]) def verify_test_data(self): """Verify test data.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data]) def instance_resize_flavor(self): """Resize instance flavor.""" self.test_runner.run_instance_resize_flavor() @test(depends_on_classes=[InstanceActionsResizeGroup], groups=[GROUP, groups.INST_ACTIONS_RESIZE_WAIT]) class InstanceActionsResizeWaitGroup(TestGroup): """Test that Instance Actions Resize Completes.""" def __init__(self): super(InstanceActionsResizeWaitGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def wait_for_instance_resize_flavor(self): """Wait for resize instance flavor to complete.""" self.test_runner.run_wait_for_instance_resize_flavor() @test(depends_on=[wait_for_instance_resize_flavor]) def verify_test_data_after_flavor_resize(self): """Verify test data after flavor resize.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data_after_flavor_resize]) def remove_test_data(self): """Remove test data.""" self.test_runner.run_remove_test_data()
aeischeid/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/util.py
497
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """WebSocket utilities. """ import array import errno # Import hash classes from a module available and recommended for each Python # version and re-export those symbol. Use sha and md5 module in Python 2.4, and # hashlib module in Python 2.6. try: import hashlib md5_hash = hashlib.md5 sha1_hash = hashlib.sha1 except ImportError: import md5 import sha md5_hash = md5.md5 sha1_hash = sha.sha import StringIO import logging import os import re import socket import traceback import zlib try: from mod_pywebsocket import fast_masking except ImportError: pass def get_stack_trace(): """Get the current stack trace as string. This is needed to support Python 2.3. TODO: Remove this when we only support Python 2.4 and above. Use traceback.format_exc instead. """ out = StringIO.StringIO() traceback.print_exc(file=out) return out.getvalue() def prepend_message_to_exception(message, exc): """Prepend message to the exception.""" exc.args = (message + str(exc),) return def __translate_interp(interp, cygwin_path): """Translate interp program path for Win32 python to run cygwin program (e.g. perl). Note that it doesn't support path that contains space, which is typically true for Unix, where #!-script is written. For Win32 python, cygwin_path is a directory of cygwin binaries. Args: interp: interp command line cygwin_path: directory name of cygwin binary, or None Returns: translated interp command line. """ if not cygwin_path: return interp m = re.match('^[^ ]*/([^ ]+)( .*)?', interp) if m: cmd = os.path.join(cygwin_path, m.group(1)) return cmd + m.group(2) return interp def get_script_interp(script_path, cygwin_path=None): """Gets #!-interpreter command line from the script. It also fixes command path. When Cygwin Python is used, e.g. in WebKit, it could run "/usr/bin/perl -wT hello.pl". When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix "/usr/bin/perl" to "<cygwin_path>\perl.exe". Args: script_path: pathname of the script cygwin_path: directory name of cygwin binary, or None Returns: #!-interpreter command line, or None if it is not #!-script. """ fp = open(script_path) line = fp.readline() fp.close() m = re.match('^#!(.*)', line) if m: return __translate_interp(m.group(1), cygwin_path) return None def wrap_popen3_for_win(cygwin_path): """Wrap popen3 to support #!-script on Windows. Args: cygwin_path: path for cygwin binary if command path is needed to be translated. None if no translation required. """ __orig_popen3 = os.popen3 def __wrap_popen3(cmd, mode='t', bufsize=-1): cmdline = cmd.split(' ') interp = get_script_interp(cmdline[0], cygwin_path) if interp: cmd = interp + ' ' + cmd return __orig_popen3(cmd, mode, bufsize) os.popen3 = __wrap_popen3 def hexify(s): return ' '.join(map(lambda x: '%02x' % ord(x), s)) def get_class_logger(o): return logging.getLogger( '%s.%s' % (o.__class__.__module__, o.__class__.__name__)) class NoopMasker(object): """A masking object that has the same interface as RepeatedXorMasker but just returns the string passed in without making any change. """ def __init__(self): pass def mask(self, s): return s class RepeatedXorMasker(object): """A masking object that applies XOR on the string given to mask method with the masking bytes given to the constructor repeatedly. This object remembers the position in the masking bytes the last mask method call ended and resumes from that point on the next mask method call. """ def __init__(self, masking_key): self._masking_key = masking_key self._masking_key_index = 0 def _mask_using_swig(self, s): masked_data = fast_masking.mask( s, self._masking_key, self._masking_key_index) self._masking_key_index = ( (self._masking_key_index + len(s)) % len(self._masking_key)) return masked_data def _mask_using_array(self, s): result = array.array('B') result.fromstring(s) # Use temporary local variables to eliminate the cost to access # attributes masking_key = map(ord, self._masking_key) masking_key_size = len(masking_key) masking_key_index = self._masking_key_index for i in xrange(len(result)): result[i] ^= masking_key[masking_key_index] masking_key_index = (masking_key_index + 1) % masking_key_size self._masking_key_index = masking_key_index return result.tostring() if 'fast_masking' in globals(): mask = _mask_using_swig else: mask = _mask_using_array # By making wbits option negative, we can suppress CMF/FLG (2 octet) and # ADLER32 (4 octet) fields of zlib so that we can use zlib module just as # deflate library. DICTID won't be added as far as we don't set dictionary. # LZ77 window of 32K will be used for both compression and decompression. # For decompression, we can just use 32K to cover any windows size. For # compression, we use 32K so receivers must use 32K. # # Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level # to decode. # # See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of # Python. See also RFC1950 (ZLIB 3.3). class _Deflater(object): def __init__(self, window_bits): self._logger = get_class_logger(self) self._compress = zlib.compressobj( zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits) def compress(self, bytes): compressed_bytes = self._compress.compress(bytes) self._logger.debug('Compress input %r', bytes) self._logger.debug('Compress result %r', compressed_bytes) return compressed_bytes def compress_and_flush(self, bytes): compressed_bytes = self._compress.compress(bytes) compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH) self._logger.debug('Compress input %r', bytes) self._logger.debug('Compress result %r', compressed_bytes) return compressed_bytes def compress_and_finish(self, bytes): compressed_bytes = self._compress.compress(bytes) compressed_bytes += self._compress.flush(zlib.Z_FINISH) self._logger.debug('Compress input %r', bytes) self._logger.debug('Compress result %r', compressed_bytes) return compressed_bytes class _Inflater(object): def __init__(self, window_bits): self._logger = get_class_logger(self) self._window_bits = window_bits self._unconsumed = '' self.reset() def decompress(self, size): if not (size == -1 or size > 0): raise Exception('size must be -1 or positive') data = '' while True: if size == -1: data += self._decompress.decompress(self._unconsumed) # See Python bug http://bugs.python.org/issue12050 to # understand why the same code cannot be used for updating # self._unconsumed for here and else block. self._unconsumed = '' else: data += self._decompress.decompress( self._unconsumed, size - len(data)) self._unconsumed = self._decompress.unconsumed_tail if self._decompress.unused_data: # Encountered a last block (i.e. a block with BFINAL = 1) and # found a new stream (unused_data). We cannot use the same # zlib.Decompress object for the new stream. Create a new # Decompress object to decompress the new one. # # It's fine to ignore unconsumed_tail if unused_data is not # empty. self._unconsumed = self._decompress.unused_data self.reset() if size >= 0 and len(data) == size: # data is filled. Don't call decompress again. break else: # Re-invoke Decompress.decompress to try to decompress all # available bytes before invoking read which blocks until # any new byte is available. continue else: # Here, since unused_data is empty, even if unconsumed_tail is # not empty, bytes of requested length are already in data. We # don't have to "continue" here. break if data: self._logger.debug('Decompressed %r', data) return data def append(self, data): self._logger.debug('Appended %r', data) self._unconsumed += data def reset(self): self._logger.debug('Reset') self._decompress = zlib.decompressobj(-self._window_bits) # Compresses/decompresses given octets using the method introduced in RFC1979. class _RFC1979Deflater(object): """A compressor class that applies DEFLATE to given byte sequence and flushes using the algorithm described in the RFC1979 section 2.1. """ def __init__(self, window_bits, no_context_takeover): self._deflater = None if window_bits is None: window_bits = zlib.MAX_WBITS self._window_bits = window_bits self._no_context_takeover = no_context_takeover def filter(self, bytes, end=True, bfinal=False): if self._deflater is None: self._deflater = _Deflater(self._window_bits) if bfinal: result = self._deflater.compress_and_finish(bytes) # Add a padding block with BFINAL = 0 and BTYPE = 0. result = result + chr(0) self._deflater = None return result result = self._deflater.compress_and_flush(bytes) if end: # Strip last 4 octets which is LEN and NLEN field of a # non-compressed block added for Z_SYNC_FLUSH. result = result[:-4] if self._no_context_takeover and end: self._deflater = None return result class _RFC1979Inflater(object): """A decompressor class for byte sequence compressed and flushed following the algorithm described in the RFC1979 section 2.1. """ def __init__(self, window_bits=zlib.MAX_WBITS): self._inflater = _Inflater(window_bits) def filter(self, bytes): # Restore stripped LEN and NLEN field of a non-compressed block added # for Z_SYNC_FLUSH. self._inflater.append(bytes + '\x00\x00\xff\xff') return self._inflater.decompress(-1) class DeflateSocket(object): """A wrapper class for socket object to intercept send and recv to perform deflate compression and decompression transparently. """ # Size of the buffer passed to recv to receive compressed data. _RECV_SIZE = 4096 def __init__(self, socket): self._socket = socket self._logger = get_class_logger(self) self._deflater = _Deflater(zlib.MAX_WBITS) self._inflater = _Inflater(zlib.MAX_WBITS) def recv(self, size): """Receives data from the socket specified on the construction up to the specified size. Once any data is available, returns it even if it's smaller than the specified size. """ # TODO(tyoshino): Allow call with size=0. It should block until any # decompressed data is available. if size <= 0: raise Exception('Non-positive size passed') while True: data = self._inflater.decompress(size) if len(data) != 0: return data read_data = self._socket.recv(DeflateSocket._RECV_SIZE) if not read_data: return '' self._inflater.append(read_data) def sendall(self, bytes): self.send(bytes) def send(self, bytes): self._socket.sendall(self._deflater.compress_and_flush(bytes)) return len(bytes) # vi:sts=4 sw=4 et
grap/OCB
refs/heads/7.0
addons/mrp_operations/report/mrp_workorder_analysis.py
53
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp import tools import openerp.addons.decimal_precision as dp class mrp_workorder(osv.osv): _name = "mrp.workorder" _description = "Work Order Report" _auto = False _columns = { 'year': fields.char('Year', size=64, readonly=True), 'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True), 'day': fields.char('Day', size=64, readonly=True), 'nbr': fields.integer('# of Lines', readonly=True), 'date': fields.date('Date', readonly=True), 'product_id': fields.many2one('product.product', 'Product', readonly=True), 'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), readonly=True), 'state': fields.selection([('draft','Draft'),('startworking', 'In Progress'),('pause','Pause'),('cancel','Cancelled'),('done','Finished')],'Status', readonly=True), 'total_hours': fields.float('Total Hours', readonly=True), 'total_cycles': fields.float('Total Cycles', readonly=True), 'delay': fields.float('Delay', readonly=True), 'production_id': fields.many2one('mrp.production', 'Production', readonly=True), 'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', readonly=True) } def init(self, cr): tools.drop_view_if_exists(cr, 'mrp_workorder') cr.execute(""" create or replace view mrp_workorder as ( select to_date(to_char(wl.date_planned, 'MM-dd-YYYY'),'MM-dd-YYYY') as date, to_char(wl.date_planned, 'YYYY') as year, to_char(wl.date_planned, 'MM') as month, to_char(wl.date_planned, 'YYYY-MM-DD') as day, min(wl.id) as id, mp.product_id as product_id, sum(wl.hour) as total_hours, avg(wl.delay) as delay, (w.costs_hour*sum(wl.hour)) as total_cost, wl.production_id as production_id, wl.workcenter_id as workcenter_id, sum(wl.cycle) as total_cycles, count(*) as nbr, sum(mp.product_qty) as product_qty, wl.state as state from mrp_production_workcenter_line wl left join mrp_workcenter w on (w.id = wl.workcenter_id) left join mrp_production mp on (mp.id = wl.production_id) group by w.costs_hour, mp.product_id, mp.name, wl.state, wl.date_planned, wl.production_id, wl.workcenter_id )""") mrp_workorder() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Alexoner/mooc
refs/heads/master
coursera/nlpintro-001/Assignment1/code/providedcode/transitionparser.py
1
# Code modified from the Natural Language Toolkit # Original author: Long Duong <longdt219@gmail.com> import tempfile import pickle import os import copy import operator import scipy.sparse as sparse import numpy as np from sklearn.datasets import load_svmlight_file from sklearn import svm class Configuration(object): """ Class for holding configuration which is the partial analysis of the input sentence. The transition based parser aims at finding set of operators that transfer the initial configuration to the terminal configuration. The configuration includes: - Stack: for storing partially proceeded words - Buffer: for storing remaining input words - Set of arcs: for storing partially built dependency tree This class also provides a method to represent a configuration as list of features. """ def __init__(self, dep_graph, feature_extractor): """ :param dep_graph: the representation of an input in the form of dependency graph. :type dep_graph: DependencyGraph where the dependencies are not specified. :param feature_extractor: a function which operates on tokens, the stack, the buffer and returns a list of string features """ # dep_graph.nodes contain list of token for a sentence self.stack = [0] # The root element # The rest is in the buffer self.buffer = range(1, len(dep_graph.nodes)) self.arcs = [] # empty set of arc self._tokens = dep_graph.nodes self._max_address = len(self.buffer) self._user_feature_extractor = feature_extractor def __str__(self): return 'Stack : ' + \ str(self.stack) + ' Buffer : ' + \ str(self.buffer) + ' Arcs : ' + str(self.arcs) def extract_features(self): """ Extracts features from the configuration :return: list(str) """ return self._user_feature_extractor(self._tokens, self.buffer, self.stack, self.arcs) class TransitionParser(object): """ An arc-eager transition parser """ def __init__(self, transition, feature_extractor): self._dictionary = {} self._transition = {} self._match_transition = {} self._model = None self._user_feature_extractor = feature_extractor self.transitions = transition def _get_dep_relation(self, idx_parent, idx_child, depgraph): p_node = depgraph.nodes[idx_parent] c_node = depgraph.nodes[idx_child] if c_node['word'] is None: return None # Root word if c_node['head'] == p_node['address']: return c_node['rel'] else: return None def _convert_to_binary_features(self, features): """ This function converts a feature into libsvm format, and adds it to the feature dictionary :param features: list of feature string which is needed to convert to binary features :type features: list(str) :return : string of binary features in libsvm format which is 'featureID:value' pairs """ unsorted_result = [] for feature in features: self._dictionary.setdefault(feature, len(self._dictionary)) unsorted_result.append(self._dictionary[feature]) # Default value of each feature is 1.0 return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result)) @staticmethod def _is_projective(depgraph): """ Checks if a dependency graph is projective """ arc_list = set() for key in depgraph.nodes: node = depgraph.nodes[key] if 'head' in node: childIdx = node['address'] parentIdx = node['head'] arc_list.add((parentIdx, childIdx)) for (parentIdx, childIdx) in arc_list: # Ensure that childIdx < parentIdx if childIdx > parentIdx: temp = childIdx childIdx = parentIdx parentIdx = temp for k in range(childIdx + 1, parentIdx): for m in range(len(depgraph.nodes)): if (m < childIdx) or (m > parentIdx): if (k, m) in arc_list: return False if (m, k) in arc_list: return False return True def _write_to_file(self, key, binary_features, input_file): """ write the binary features to input file and update the transition dictionary """ self._transition.setdefault(key, len(self._transition) + 1) self._match_transition[self._transition[key]] = key input_str = str(self._transition[key]) + ' ' + binary_features + '\n' input_file.write(input_str.encode('utf-8')) def _create_training_examples_arc_eager(self, depgraphs, input_file): """ Create the training example in the libsvm format and write it to the input_file. Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre """ training_seq = [] projective_dependency_graphs = [ dg for dg in depgraphs if TransitionParser._is_projective(dg)] countProj = len(projective_dependency_graphs) for depgraph in projective_dependency_graphs: conf = Configuration( depgraph, self._user_feature_extractor.extract_features) while conf.buffer: b0 = conf.buffer[0] features = conf.extract_features() binary_features = self._convert_to_binary_features(features) if conf.stack: s0 = conf.stack[-1] # Left-arc operation rel = self._get_dep_relation(b0, s0, depgraph) if rel is not None: key = self.transitions.LEFT_ARC + ':' + rel self._write_to_file(key, binary_features, input_file) self.transitions.left_arc(conf, rel) training_seq.append(key) continue # Right-arc operation rel = self._get_dep_relation(s0, b0, depgraph) if rel is not None: key = self.transitions.RIGHT_ARC + ':' + rel self._write_to_file(key, binary_features, input_file) self.transitions.right_arc(conf, rel) training_seq.append(key) continue # reduce operation flag = False for k in range(s0): if self._get_dep_relation(k, b0, depgraph) is not None: flag = True if self._get_dep_relation(b0, k, depgraph) is not None: flag = True if flag: key = self.transitions.REDUCE self._write_to_file(key, binary_features, input_file) self.transitions.reduce(conf) training_seq.append(key) continue # Shift operation as the default key = self.transitions.SHIFT self._write_to_file(key, binary_features, input_file) self.transitions.shift(conf) training_seq.append(key) print(" Number of training examples : {}".format(len(depgraphs))) print(" Number of valid (projective) examples : {}".format(countProj)) return training_seq def train(self, depgraphs): """ :param depgraphs : list of DependencyGraph as the training data :type depgraphs : DependencyGraph """ try: input_file = tempfile.NamedTemporaryFile( prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) self._create_training_examples_arc_eager(depgraphs, input_file) input_file.close() # Using the temporary file to train the libsvm classifier x_train, y_train = load_svmlight_file(input_file.name) # The parameter is set according to the paper: # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre # this is very slow. self._model = svm.SVC( kernel='poly', degree=2, coef0=0, gamma=0.2, C=0.5, verbose=False, probability=True) print('Training support vector machine from input data file{}...'.format( input_file.name)) self._model.fit(x_train, y_train) print('done!') finally: # os.remove(input_file.name) pass def parse(self, depgraphs): """ :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy :type depgraphs: list(DependencyGraph) :return: list (DependencyGraph) with the 'head' and 'rel' information """ result = [] if not self._model: raise ValueError('No model trained!') for depgraph in depgraphs: conf = Configuration( depgraph, self._user_feature_extractor.extract_features) while conf.buffer: features = conf.extract_features() col = [] row = [] data = [] for feature in features: if feature in self._dictionary: col.append(self._dictionary[feature]) row.append(0) data.append(1.0) np_col = np.array(sorted(col)) # NB : index must be sorted np_row = np.array(row) np_data = np.array(data) x_test = sparse.csr_matrix( (np_data, (np_row, np_col)), shape=(1, len(self._dictionary))) pred_prob = self._model.predict_proba(x_test)[0] sorted_predicted_values = [ self._model.classes_[x[0]] for x in sorted(enumerate(pred_prob), key=operator.itemgetter(1), reverse=True)] # Note that SHIFT is always a valid operation for y_pred in sorted_predicted_values: if y_pred in self._match_transition: strTransition = self._match_transition[y_pred] try: baseTransition, relation = strTransition.split(":") except ValueError: baseTransition = strTransition if baseTransition == self.transitions.LEFT_ARC: if self.transitions.left_arc(conf, relation) != -1: break elif baseTransition == self.transitions.RIGHT_ARC: if self.transitions.right_arc(conf, relation) != -1: break elif baseTransition == self.transitions.REDUCE: if self.transitions.reduce(conf) != -1: break elif baseTransition == self.transitions.SHIFT: if self.transitions.shift(conf) != -1: break else: raise ValueError( "The predicted transition is not recognized, expected errors") # Finish with operations build the dependency graph from Conf.arcs new_depgraph = copy.deepcopy(depgraph) for key in new_depgraph.nodes: node = new_depgraph.nodes[key] node['rel'] = '' # With the default, all the token depend on the Root node['head'] = 0 for (head, rel, child) in conf.arcs: c_node = new_depgraph.nodes[child] c_node['head'] = head c_node['rel'] = rel result.append(new_depgraph) return result def save(self, filepath): """ Save the parameters with pickle """ with open(filepath, 'wb') as f: pickle.dump(self, f) @staticmethod def load(filepath): with open(filepath) as f: return pickle.load(f)
Voxel8/OctoPrint
refs/heads/master
src/octoprint/plugins/softwareupdate/version_checks/git_commit.py
32
# coding=utf-8 from __future__ import absolute_import __author__ = "Gina Häußge <osd@foosel.net>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" import errno import subprocess import sys import logging from ..exceptions import ConfigurationInvalid def _get_git_executables(): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] return GITS def _git(args, cwd, hide_stderr=False): commands = _get_git_executables() for c in commands: try: p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue return None, None else: return None, None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: return p.returncode, None return p.returncode, stdout def get_latest(target, check): if not "checkout_folder" in check: raise ConfigurationInvalid("Update configuration for %s needs checkout_folder" % target) checkout_folder = check["checkout_folder"] returncode, _ = _git(["fetch"], checkout_folder) if returncode != 0: return None, True returncode, local_commit = _git(["rev-parse", "@{0}"], checkout_folder) if returncode != 0: return None, True returncode, remote_commit = _git(["rev-parse", "@{u}"], checkout_folder) if returncode != 0: return None, True return_code, base = _git(["merge-base", "@{0}", "@{u}"], checkout_folder) if returncode != 0: return None, True if local_commit == remote_commit or remote_commit == base: information = dict( local=dict(name="Commit %s" % local_commit, value=local_commit), remote=dict(name="Commit %s" % local_commit, value=local_commit) ) is_current = True else: information = dict( local=dict(name="Commit %s" % local_commit, value=local_commit), remote=dict(name="Commit %s" % remote_commit, value=remote_commit) ) is_current = local_commit == remote_commit logger = logging.getLogger("octoprint.plugins.softwareupdate.version_checks.git_commit") logger.debug("Target: %s, local: %s, remote: %s" % (target, local_commit, remote_commit)) return information, is_current
AndrewGrossman/django
refs/heads/master
django/contrib/staticfiles/management/commands/runserver.py
248
from django.conf import settings from django.contrib.staticfiles.handlers import StaticFilesHandler from django.core.management.commands.runserver import \ Command as RunserverCommand class Command(RunserverCommand): help = "Starts a lightweight Web server for development and also serves static files." def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument('--nostatic', action="store_false", dest='use_static_handler', default=True, help='Tells Django to NOT automatically serve static files at STATIC_URL.') parser.add_argument('--insecure', action="store_true", dest='insecure_serving', default=False, help='Allows serving static files even if DEBUG is False.') def get_handler(self, *args, **options): """ Returns the static files serving handler wrapping the default handler, if static files should be served. Otherwise just returns the default handler. """ handler = super(Command, self).get_handler(*args, **options) use_static_handler = options.get('use_static_handler', True) insecure_serving = options.get('insecure_serving', False) if use_static_handler and (settings.DEBUG or insecure_serving): return StaticFilesHandler(handler) return handler
Kast0rTr0y/ansible
refs/heads/devel
lib/ansible/modules/web_infrastructure/jenkins_plugin.py
21
#!/usr/bin/python # encoding: utf-8 # (c) 2016, Jiri Tyr <jiri.tyr@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: jenkins_plugin author: Jiri Tyr (@jtyr) version_added: '2.2' short_description: Add or remove Jenkins plugin description: - Ansible module which helps to manage Jenkins plugins. options: group: required: false default: jenkins description: - Name of the Jenkins group on the OS. jenkins_home: required: false default: /var/lib/jenkins description: - Home directory of the Jenkins user. mode: required: false default: '0664' description: - File mode applied on versioned plugins. name: required: true description: - Plugin name. owner: required: false default: jenkins description: - Name of the Jenkins user on the OS. params: required: false default: null description: - Option used to allow the user to overwrite any of the other options. To remove an option, set the value of the option to C(null). state: required: false choices: [absent, present, pinned, unpinned, enabled, disabled, latest] default: present description: - Desired plugin state. - If the C(latest) is set, the check for new version will be performed every time. This is suitable to keep the plugin up-to-date. timeout: required: false default: 30 description: - Server connection timeout in secs. updates_expiration: required: false default: 86400 description: - Number of seconds after which a new copy of the I(update-center.json) file is downloaded. This is used to avoid the need to download the plugin to calculate its checksum when C(latest) is specified. - Set it to C(0) if no cache file should be used. In that case, the plugin file will always be downloaded to calculate its checksum when C(latest) is specified. updates_url: required: false default: https://updates.jenkins-ci.org description: - URL of the Update Centre. - Used as the base URL to download the plugins and the I(update-center.json) JSON file. url: required: false default: http://localhost:8080 description: - URL of the Jenkins server. version: required: false default: null description: - Plugin version number. - If this option is specified, all plugin dependencies must be installed manually. - It might take longer to verify that the correct version is installed. This is especially true if a specific version number is specified. - Quote the version to prevent the value to be interpreted as float. For example if C(1.20) would be unquoted, it would become C(1.2). with_dependencies: required: false choices: ['yes', 'no'] default: 'yes' description: - Defines whether to install plugin dependencies. - This option takes effect only if the I(version) is not defined. notes: - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials. - It's necessary to notify the handler or call the I(service) module to restart the Jenkins service after a new plugin was installed. - Pinning works only if the plugin is installed and Jenkis service was successfully restarted after the plugin installation. - It is not possible to run the module remotely by changing the I(url) parameter to point to the Jenkins server. The module must be used on the host where Jenkins runs as it needs direct access to the plugin files. ''' EXAMPLES = ''' - name: Install plugin jenkins_plugin: name: build-pipeline-plugin - name: Install plugin without its dependencies jenkins_plugin: name: build-pipeline-plugin with_dependencies: no - name: Make sure the plugin is always up-to-date jenkins_plugin: name: token-macro state: latest - name: Install specific version of the plugin jenkins_plugin: name: token-macro version: "1.15" - name: Pin the plugin jenkins_plugin: name: token-macro state: pinned - name: Unpin the plugin jenkins_plugin: name: token-macro state: unpinned - name: Enable the plugin jenkins_plugin: name: token-macro state: enabled - name: Disable the plugin jenkins_plugin: name: token-macro state: disabled - name: Uninstall plugin jenkins_plugin: name: build-pipeline-plugin state: absent # # Example of how to use the params # # Define a variable and specify all default parameters you want to use across # all jenkins_plugin calls: # # my_jenkins_params: # url_username: admin # url_password: p4ssw0rd # url: http://localhost:8888 # - name: Install plugin jenkins_plugin: name: build-pipeline-plugin params: "{{ my_jenkins_params }}" # # Example of a Play which handles Jenkins restarts during the state changes # - name: Jenkins Master play hosts: jenkins-master vars: my_jenkins_plugins: token-macro: enabled: yes build-pipeline-plugin: version: "1.4.9" pinned: no enabled: yes tasks: - name: Install plugins without a specific version jenkins_plugin: name: "{{ item.key }}" register: my_jenkins_plugin_unversioned when: > 'version' not in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Install plugins with a specific version jenkins_plugin: name: "{{ item.key }}" version: "{{ item.value['version'] }}" register: my_jenkins_plugin_versioned when: > 'version' in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Initiate the fact set_fact: jenkins_restart_required: no - name: Check if restart is required by any of the versioned plugins set_fact: jenkins_restart_required: yes when: item.changed with_items: "{{ my_jenkins_plugin_versioned.results }}" - name: Check if restart is required by any of the unversioned plugins set_fact: jenkins_restart_required: yes when: item.changed with_items: "{{ my_jenkins_plugin_unversioned.results }}" - name: Restart Jenkins if required service: name: jenkins state: restarted when: jenkins_restart_required - name: Wait for Jenkins to start up uri: url: http://localhost:8080 status_code: 200 timeout: 5 register: jenkins_service_status # Keep trying for 5 mins in 5 sec intervals retries: 60 delay: 5 until: > 'status' in jenkins_service_status and jenkins_service_status['status'] == 200 when: jenkins_restart_required - name: Reset the fact set_fact: jenkins_restart_required: no when: jenkins_restart_required - name: Plugin pinning jenkins_plugin: name: "{{ item.key }}" state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" when: > 'pinned' in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Plugin enabling jenkins_plugin: name: "{{ item.key }}" state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" when: > 'enabled' in item.value with_dict: "{{ my_jenkins_plugins }}" ''' RETURN = ''' plugin: description: plugin name returned: success type: string sample: build-pipeline-plugin state: description: state of the target, after execution returned: success type: string sample: "present" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.urls import fetch_url, url_argument_spec from ansible.module_utils._text import to_native import base64 import hashlib import json import os import tempfile import time import urllib class JenkinsPlugin(object): def __init__(self, module): # To be able to call fail_json self.module = module # Shortcuts for the params self.params = self.module.params self.url = self.params['url'] self.timeout = self.params['timeout'] # Crumb self.crumb = {} if self._csrf_enabled(): self.crumb = self._get_crumb() # Get list of installed plugins self._get_installed_plugins() def _csrf_enabled(self): csrf_data = self._get_json_data( "%s/%s" % (self.url, "api/json"), 'CSRF') return csrf_data["useCrumbs"] def _get_json_data(self, url, what, **kwargs): # Get the JSON data r = self._get_url_data(url, what, **kwargs) # Parse the JSON data try: json_data = json.load(r) except Exception: e = get_exception() self.module.fail_json( msg="Cannot parse %s JSON data." % what, details=e.message) return json_data def _get_url_data( self, url, what=None, msg_status=None, msg_exception=None, **kwargs): # Compose default messages if msg_status is None: msg_status = "Cannot get %s" % what if msg_exception is None: msg_exception = "Retrieval of %s failed." % what # Get the URL data try: response, info = fetch_url( self.module, url, timeout=self.timeout, **kwargs) if info['status'] != 200: self.module.fail_json(msg=msg_status, details=info['msg']) except Exception: e = get_exception() self.module.fail_json(msg=msg_exception, details=e.message) return response def _get_crumb(self): crumb_data = self._get_json_data( "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: ret = { crumb_data['crumbRequestField']: crumb_data['crumb'] } else: self.module.fail_json( msg="Required fields not found in the Crum response.", details=crumb_data) return ret def _get_installed_plugins(self): plugins_data = self._get_json_data( "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), 'list of plugins') # Check if we got valid data if 'plugins' not in plugins_data: self.module.fail_json(msg="No valid plugin data found.") # Create final list of installed/pined plugins self.is_installed = False self.is_pinned = False self.is_enabled = False for p in plugins_data['plugins']: if p['shortName'] == self.params['name']: self.is_installed = True if p['pinned']: self.is_pinned = True if p['enabled']: self.is_enabled = True break def install(self): changed = False plugin_file = ( '%s/plugins/%s.jpi' % ( self.params['jenkins_home'], self.params['name'])) if not self.is_installed and self.params['version'] is None: if not self.module.check_mode: # Install the plugin (with dependencies) install_script = ( 'd = Jenkins.instance.updateCenter.getPlugin("%s")' '.deploy(); d.get();' % self.params['name']) if self.params['with_dependencies']: install_script = ( 'Jenkins.instance.updateCenter.getPlugin("%s")' '.getNeededDependencies().each{it.deploy()}; %s' % ( self.params['name'], install_script)) script_data = { 'script': install_script } script_data.update(self.crumb) data = urllib.urlencode(script_data) # Send the installation request r = self._get_url_data( "%s/scriptText" % self.url, msg_status="Cannot install plugin.", msg_exception="Plugin installation has failed.", data=data) hpi_file = '%s/plugins/%s.hpi' % ( self.params['jenkins_home'], self.params['name']) if os.path.isfile(hpi_file): os.remove(hpi_file) changed = True else: # Check if the plugin directory exists if not os.path.isdir(self.params['jenkins_home']): self.module.fail_json( msg="Jenkins home directory doesn't exist.") md5sum_old = None if os.path.isfile(plugin_file): # Make the checksum of the currently installed plugin md5sum_old = hashlib.md5( open(plugin_file, 'rb').read()).hexdigest() if self.params['version'] in [None, 'latest']: # Take latest version plugin_url = ( "%s/latest/%s.hpi" % ( self.params['updates_url'], self.params['name'])) else: # Take specific version plugin_url = ( "{0}/download/plugins/" "{1}/{2}/{1}.hpi".format( self.params['updates_url'], self.params['name'], self.params['version'])) if ( self.params['updates_expiration'] == 0 or self.params['version'] not in [None, 'latest'] or md5sum_old is None): # Download the plugin file directly r = self._download_plugin(plugin_url) # Write downloaded plugin into file if checksums don't match if md5sum_old is None: # No previously installed plugin if not self.module.check_mode: self._write_file(plugin_file, r) changed = True else: # Get data for the MD5 data = r.read() # Make new checksum md5sum_new = hashlib.md5(data).hexdigest() # If the checksum is different from the currently installed # plugin, store the new plugin if md5sum_old != md5sum_new: if not self.module.check_mode: self._write_file(plugin_file, data) changed = True else: # Check for update from the updates JSON file plugin_data = self._download_updates() try: sha1_old = hashlib.sha1(open(plugin_file, 'rb').read()) except Exception: e = get_exception() self.module.fail_json( msg="Cannot calculate SHA1 of the old plugin.", details=e.message) sha1sum_old = base64.b64encode(sha1_old.digest()) # If the latest version changed, download it if sha1sum_old != plugin_data['sha1']: if not self.module.check_mode: r = self._download_plugin(plugin_url) self._write_file(plugin_file, r) changed = True # Change file attributes if needed if os.path.isfile(plugin_file): params = { 'dest': plugin_file } params.update(self.params) file_args = self.module.load_file_common_arguments(params) if not self.module.check_mode: # Not sure how to run this in the check mode changed = self.module.set_fs_attributes_if_different( file_args, changed) else: # See the comment above changed = True return changed def _download_updates(self): updates_filename = 'jenkins-plugin-cache.json' updates_dir = os.path.expanduser('~/.ansible/tmp') updates_file = "%s/%s" % (updates_dir, updates_filename) download_updates = True # Check if we need to download new updates file if os.path.isfile(updates_file): # Get timestamp when the file was changed last time ts_file = os.stat(updates_file).st_mtime ts_now = time.time() if ts_now - ts_file < self.params['updates_expiration']: download_updates = False updates_file_orig = updates_file # Download the updates file if needed if download_updates: url = "%s/update-center.json" % self.params['updates_url'] # Get the data r = self._get_url_data( url, msg_status="Remote updates not found.", msg_exception="Updates download failed.") # Write the updates file update_fd, updates_file = tempfile.mkstemp() os.write(update_fd, r.read()) try: os.close(update_fd) except IOError: e = get_exception() self.module.fail_json( msg="Cannot close the tmp updates file %s." % updates_file, details=to_native(e)) # Open the updates file try: f = open(updates_file) except IOError: e = get_exception() self.module.fail_json( msg="Cannot open temporal updates file.", details=to_native(e)) i = 0 for line in f: # Read only the second line if i == 1: try: data = json.loads(line) except Exception: e = get_exception() self.module.fail_json( msg="Cannot load JSON data from the tmp updates file.", details=e.message) break i += 1 # Move the updates file to the right place if we could read it if download_updates: # Make sure the destination directory exists if not os.path.isdir(updates_dir): try: os.makedirs(updates_dir, int('0700', 8)) except OSError: e = get_exception() self.module.fail_json( msg="Cannot create temporal directory.", details=e.message) self.module.atomic_move(updates_file, updates_file_orig) # Check if we have the plugin data available if 'plugins' not in data or self.params['name'] not in data['plugins']: self.module.fail_json( msg="Cannot find plugin data in the updates file.") return data['plugins'][self.params['name']] def _download_plugin(self, plugin_url): # Download the plugin r = self._get_url_data( plugin_url, msg_status="Plugin not found.", msg_exception="Plugin download failed.") return r def _write_file(self, f, data): # Store the plugin into a temp file and then move it tmp_f_fd, tmp_f = tempfile.mkstemp() if isinstance(data, str): os.write(tmp_f_fd, data) else: os.write(tmp_f_fd, data.read()) try: os.close(tmp_f_fd) except IOError: e = get_exception() self.module.fail_json( msg='Cannot close the temporal plugin file %s.' % tmp_f, details=to_native(e)) # Move the file onto the right place self.module.atomic_move(tmp_f, f) def uninstall(self): changed = False # Perform the action if self.is_installed: if not self.module.check_mode: self._pm_query('doUninstall', 'Uninstallation') changed = True return changed def pin(self): return self._pinning('pin') def unpin(self): return self._pinning('unpin') def _pinning(self, action): changed = False # Check if the plugin is pinned/unpinned if ( action == 'pin' and not self.is_pinned or action == 'unpin' and self.is_pinned): # Perform the action if not self.module.check_mode: self._pm_query(action, "%sning" % action.capitalize()) changed = True return changed def enable(self): return self._enabling('enable') def disable(self): return self._enabling('disable') def _enabling(self, action): changed = False # Check if the plugin is pinned/unpinned if ( action == 'enable' and not self.is_enabled or action == 'disable' and self.is_enabled): # Perform the action if not self.module.check_mode: self._pm_query( "make%sd" % action.capitalize(), "%sing" % action[:-1].capitalize()) changed = True return changed def _pm_query(self, action, msg): url = "%s/pluginManager/plugin/%s/%s" % ( self.params['url'], self.params['name'], action) data = urllib.urlencode(self.crumb) # Send the request self._get_url_data( url, msg_status="Plugin not found. %s" % url, msg_exception="%s has failed." % msg, data=data) def main(): # Module arguments argument_spec = url_argument_spec() argument_spec.update( group=dict(default='jenkins'), jenkins_home=dict(default='/var/lib/jenkins'), mode=dict(default='0644', type='raw'), name=dict(required=True), owner=dict(default='jenkins'), params=dict(type='dict'), state=dict( choices=[ 'present', 'absent', 'pinned', 'unpinned', 'enabled', 'disabled', 'latest'], default='present'), timeout=dict(default=30, type="int"), updates_expiration=dict(default=86400, type="int"), updates_url=dict(default='https://updates.jenkins-ci.org'), url=dict(default='http://localhost:8080'), url_password=dict(no_log=True), version=dict(), with_dependencies=dict(default=True, type='bool'), ) # Module settings module = AnsibleModule( argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True, ) # Update module parameters by user's parameters if defined if 'params' in module.params and isinstance(module.params['params'], dict): module.params.update(module.params['params']) # Remove the params module.params.pop('params', None) # Force basic authentication module.params['force_basic_auth'] = True # Convert timeout to float try: module.params['timeout'] = float(module.params['timeout']) except ValueError: e = get_exception() module.fail_json( msg='Cannot convert %s to float.' % module.params['timeout'], details=to_native(e)) # Set version to latest if state is latest if module.params['state'] == 'latest': module.params['state'] = 'present' module.params['version'] = 'latest' # Create some shortcuts name = module.params['name'] state = module.params['state'] # Initial change state of the task changed = False # Instantiate the JenkinsPlugin object jp = JenkinsPlugin(module) # Perform action depending on the requested state if state == 'present': changed = jp.install() elif state == 'absent': changed = jp.uninstall() elif state == 'pinned': changed = jp.pin() elif state == 'unpinned': changed = jp.unpin() elif state == 'enabled': changed = jp.enable() elif state == 'disabled': changed = jp.disable() # Print status of the change module.exit_json(changed=changed, plugin=name, state=state) if __name__ == '__main__': main()
takeshineshiro/nova
refs/heads/master
nova/tests/unit/keymgr/fake.py
110
# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from nova.keymgr import mock_key_mgr def fake_api(): return mock_key_mgr.MockKeyManager()
hakancelik96/coogger
refs/heads/master
core/cooggerapp/views/utopic/commit.py
1
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User from django.contrib.messages.views import SuccessMessageMixin from django.http import Http404 from django.shortcuts import get_object_or_404 from django.urls import reverse from django.views.generic import ListView, TemplateView, UpdateView from ....threaded_comment.forms import ReplyForm from ...models import Commit, UTopic from ...views.generic.detail import CommonDetailView from ..utils import get_current_user class Commits(ListView): template_name = "users/topic/detail/commits.html" paginate_by = 10 http_method_names = ["get"] commits = Commit.objects.approved_commits def get_queryset(self): self.user = get_object_or_404(User, username=self.kwargs.get("username")) self.utopic = UTopic.objects.get( user=self.user, permlink=self.kwargs.get("topic_permlink") ) filter_by_username = self.request.GET.get("username", None) if filter_by_username: return self.commits.filter( user__username=filter_by_username, utopic=self.utopic ) else: return self.commits.filter(utopic=self.utopic) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["current_user"] = get_current_user(self.user) context["utopic"] = self.utopic return context class CommitDetail(CommonDetailView, TemplateView): template_name = "users/topic/commit/commit.html" model = Commit model_name = "commit" form_class = ReplyForm def get_object(self, username, topic_permlink, hash): obj = Commit.objects.get(user__is_active=True, hash=hash) if obj.status != "approved": # NOTE when commit it is a contribute self.template_name = "users/topic/commit/contribution.html" return obj def get_context_data(self, username, topic_permlink, hash, **kwargs): context = super().get_context_data( **dict(username=username, topic_permlink=topic_permlink, hash=hash) ) queryset = context.get("queryset") context["current_user"] = get_current_user( get_object_or_404(User, username=username) ) context["nameoflist"] = queryset.utopic context["commit"] = queryset return context class CommitUpdate(LoginRequiredMixin, SuccessMessageMixin, UpdateView): fields = ["body", "msg"] template_name = "forms/create.html" success_message = "Your commit updated" def get_object(self, queryset=None): hash = self.kwargs.get("hash") obj = get_object_or_404(Commit, hash=hash) if obj.user == self.request.user: if obj.status == "approved": raise Http404("Your commit's approved, thus we can not update.") else: raise Http404("Permission denied.") return obj def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) object = context.get("object") context["current_user"] = get_current_user(object.utopic.user) context["utopic"] = object.utopic return context def get_success_url(self): commit = Commit.objects.get(hash=self.kwargs.get("hash")) return reverse( "commit", kwargs=dict( username=commit.user, topic_permlink=commit.utopic.permlink, hash=commit.hash, ), )
SilverCory/CloudBot
refs/heads/master
plugins/karma.py
2
import time import re from sqlalchemy import Table, Column, Integer, String, PrimaryKeyConstraint from sqlalchemy import select from cloudbot import hook from cloudbot.util import timeformat, formatting, database CAN_DOWNVOTE = False TIME_LIMIT = 300.0 karma_table = Table( 'karma', database.metadata, Column('nick_vote', String(25)), Column('up_karma', Integer, default=0), Column('down_karma', Integer, default=0), Column('total_karma', Integer, default=0), # used for querying PrimaryKeyConstraint('nick_vote') ) voters = {} def up(db, nick_vote): """ gives one karma to a user """ db.execute("""INSERT or IGNORE INTO karma( nick_vote, up_karma, down_karma, total_karma) values(:nick,0,0,0)""", {'nick': nick_vote.lower()}) query = karma_table.update().values( up_karma=karma_table.c.up_karma + 1, total_karma=karma_table.c.total_karma + 1 ).where(karma_table.c.nick_vote == nick_vote.lower()) db.execute(query) db.commit() def down(db, nick_vote): """ takes one karma away from a user """ db.execute("""INSERT or IGNORE INTO karma( nick_vote, up_karma, down_karma, total_karma) values(:nick,0,0,0)""", {'nick': nick_vote.lower()}) query = karma_table.update().values( down_karma=karma_table.c.down_karma + 1, total_karma=karma_table.c.total_karma - 1 ).where(karma_table.c.nick_vote == nick_vote.lower()) db.execute(query) db.commit() def allowed(uid): """ checks if a user is allowed to vote, and keeps track of voters """ global voters # clear expired voters for _uid, _timestamp in voters.items(): if (time.time() - _timestamp) >= TIME_LIMIT: del voters[_uid] if uid in voters: last_voted = voters[uid] return False, timeformat.time_until(last_voted, now=time.time() - TIME_LIMIT) else: voters[uid] = time.time() return True, 0 karma_re = re.compile('^([a-z0-9_\-\[\]\\^{}|`]{3,})(\+\+|\-\-)$', re.I) @hook.regex(karma_re) def karma_add(match, nick, conn, db, notice): nick_vote = match.group(1).strip() if nick.lower() == nick_vote.lower(): notice("You can't vote on yourself!") return uid = ":".join([conn.name, nick, nick_vote]).lower() vote_allowed, when = allowed(uid) if vote_allowed: if match.group(2) == '++': up(db, nick_vote) notice("Gave {} 1 karma!".format(nick_vote)) if match.group(2) == '--' and CAN_DOWNVOTE: down(db, nick_vote) notice("Took away 1 karma from {}.".format(nick_vote)) else: return else: notice("You are trying to vote too often. You can vote on this user again in {}!".format(when)) @hook.command('karma', 'k') def karma(text, db): """k/karma <nick> -- returns karma stats for <nick>""" query = db.execute( select([karma_table]) .where(karma_table.c.nick_vote == text.lower()) ).fetchone() if not query: return "That user has no karma :(" else: karma_text = formatting.pluralize(query['up_karma'] - query['down_karma'], '\x02karma') return "{} has \x02{}\x02 karma!".format(text, query['up_karma'] - query['down_karma']) @hook.command('loved') def loved(text, db): """loved -- Shows the users with the most karma!""" query = db.execute( select([karma_table]) .order_by(karma_table.c.total_karma.desc()) .limit(5) ).fetchall() if not query: return "??" else: return query