repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
leighpauls/k2cro4
|
refs/heads/master
|
tools/gyp/test/win/gyptest-macro-vcinstalldir.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure macro expansion of $(VCInstallDir) is handled, and specifically
always / terminated for compatibility.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'vs-macros'
test.run_gyp('vcinstalldir.gyp', chdir=CHDIR)
# This fails on VS because the trailing slash escapes the trailing quote.
test.build('vcinstalldir.gyp', 'test_slash_trailing', chdir=CHDIR, status=1)
test.build('vcinstalldir.gyp', 'test_slash_dir', chdir=CHDIR)
test.pass_test()
|
WoLpH/jedi
|
refs/heads/master
|
test/completion/definition.py
|
27
|
"""
Fallback to callee definition when definition not found.
- https://github.com/davidhalter/jedi/issues/131
- https://github.com/davidhalter/jedi/pull/149
"""
"""Parenthesis closed at next line."""
#? isinstance
isinstance(
)
#? isinstance
isinstance(
)
#? isinstance
isinstance(None,
)
#? isinstance
isinstance(None,
)
"""Parenthesis closed at same line."""
# Note: len('isinstance(') == 11
#? 11 isinstance
isinstance()
# Note: len('isinstance(None,') == 16
##? 16 isinstance
isinstance(None,)
# Note: len('isinstance(None,') == 16
##? 16 isinstance
isinstance(None, )
# Note: len('isinstance(None, ') == 17
##? 17 isinstance
isinstance(None, )
# Note: len('isinstance( ') == 12
##? 12 isinstance
isinstance( )
"""Unclosed parenthesis."""
#? isinstance
isinstance(
def x(): pass # acts like EOF
##? isinstance
isinstance(
def x(): pass # acts like EOF
#? isinstance
isinstance(None,
def x(): pass # acts like EOF
##? isinstance
isinstance(None,
|
chainer/chainer
|
refs/heads/master
|
tests/chainer_tests/training_tests/test_trainer.py
|
5
|
import time
import traceback
import unittest
from chainer import testing
from chainer import training
class DummyExtension(training.extension.Extension):
def __init__(self, test_case):
self.is_called = False
self.is_finalized = False
self._test_case = test_case
def __call__(self, trainer):
self._test_case.assertTrue(trainer.is_initialized)
self.is_called = True
def finalize(self):
self.is_finalized = True
def initialize(self, trainer):
trainer.is_initialized = True
class ErrorHandlingExtension(training.extension.Extension):
def __init__(self):
self.is_error_handled = False
def __call__(self, trainer):
pass
def on_error(self, trainer, exception, tb):
traceback.print_tb(tb)
self.is_error_handled = True
def finalize(self):
pass
def initialize(self, trainer):
pass
class TheOnlyError(Exception):
pass
class DummyCallableClass(object):
def __init__(self, test_case):
self.name = 'DummyCallableClass'
self.is_called = False
self.is_finalized = False
self._test_case = test_case
def __call__(self, trainer):
self._test_case.assertTrue(trainer.is_initialized)
self.is_called = True
def finalize(self):
self.is_finalized = True
def initialize(self, trainer):
trainer.is_initialized = True
class DummyClass(object):
def __init__(self):
self.is_touched = False
def touch(self):
self.is_touched = True
class TestTrainer(unittest.TestCase):
def setUp(self):
self.trainer = self._create_mock_trainer(10)
self.trainer.is_initialized = False
def _create_mock_trainer(self, iterations):
trainer = testing.get_trainer_with_mock_updater(
(iterations, 'iteration'))
trainer.updater.update_core = lambda: time.sleep(0.001)
return trainer
def test_elapsed_time(self):
with self.assertRaises(RuntimeError):
self.trainer.elapsed_time
self.trainer.run()
self.assertGreater(self.trainer.elapsed_time, 0)
def test_elapsed_time_serialization(self):
self.trainer.run()
serialized_time = self.trainer.elapsed_time
new_trainer = self._create_mock_trainer(5)
testing.save_and_load_npz(self.trainer, new_trainer)
new_trainer.run()
self.assertGreater(new_trainer.elapsed_time, serialized_time)
def test_add_inherit_class_extension(self):
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(dummy_extension.is_called)
self.assertTrue(dummy_extension.is_finalized)
def test_add_callable_class_extension(self):
dummy_callable_class = DummyCallableClass(self)
self.trainer.extend(dummy_callable_class)
self.trainer.run()
self.assertTrue(dummy_callable_class.is_called)
self.assertTrue(dummy_callable_class.is_finalized)
def test_add_called_before_training_extension(self):
class MyDummyCallableClass(DummyCallableClass):
def __init__(self, test_case):
super(MyDummyCallableClass, self).__init__(test_case)
self.is_called_before_training = False
def __call__(self, trainer):
if trainer.is_before_training:
self.is_called_before_training = True
return super(MyDummyCallableClass, self).__call__(trainer)
dummy_callable_class = MyDummyCallableClass(self)
self.trainer.extend(dummy_callable_class, call_before_training=True)
self.trainer.run()
self.assertTrue(dummy_callable_class.is_called)
self.assertTrue(dummy_callable_class.is_called_before_training)
self.assertTrue(dummy_callable_class.is_finalized)
def test_add_lambda_extension(self):
dummy_class = DummyClass()
self.trainer.extend(lambda x: dummy_class.touch())
self.trainer.run()
self.assertTrue(dummy_class.is_touched)
def test_add_make_extension(self):
self.is_called = False
@training.make_extension()
def dummy_extension(trainer):
self.is_called = True
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_make_extension_with_initializer(self):
self.is_called = False
def initializer(trainer):
trainer.is_initialized = True
@training.make_extension(initializer=initializer)
def dummy_extension(trainer):
self.assertTrue(trainer.is_initialized)
self.is_called = True
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_function_extension(self):
self.is_called = False
def dummy_function(trainer):
self.is_called = True
self.trainer.extend(dummy_function)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_two_extensions_default_priority(self):
self.called_order = []
@training.make_extension(trigger=(1, 'epoch'))
def dummy_extension_1(trainer):
self.called_order.append(1)
@training.make_extension(trigger=(1, 'epoch'))
def dummy_extension_2(trainer):
self.called_order.append(2)
self.trainer.extend(dummy_extension_1)
self.trainer.extend(dummy_extension_2)
self.trainer.run()
self.assertEqual(self.called_order, [1, 2])
def test_add_two_extensions_specific_priority(self):
self.called_order = []
@training.make_extension(trigger=(1, 'epoch'), priority=50)
def dummy_extension_1(trainer):
self.called_order.append(1)
@training.make_extension(trigger=(1, 'epoch'), priority=100)
def dummy_extension_2(trainer):
self.called_order.append(2)
self.trainer.extend(dummy_extension_1)
self.trainer.extend(dummy_extension_2)
self.trainer.run()
self.assertEqual(self.called_order, [2, 1])
def test_exception_handler(self):
ext = ErrorHandlingExtension()
self.trainer.extend(ext, trigger=(1, 'iteration'), priority=1)
self.assertFalse(ext.is_error_handled)
d = {}
def exception_handler(trainer, exp, tb):
d['called'] = True
@training.make_extension(trigger=(1, 'iteration'), priority=100,
on_error=exception_handler)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(d['called'])
self.assertTrue(ext.is_error_handled)
self.assertTrue(dummy_extension.is_finalized)
def test_exception_in_exception_handler(self):
ext = ErrorHandlingExtension()
self.trainer.extend(ext, trigger=(1, 'iteration'), priority=1)
self.assertFalse(ext.is_error_handled)
def exception_handler(trainer, exp, tb):
raise ValueError('hogehoge from exception handler')
@training.make_extension(trigger=(1, 'iteration'), priority=100,
on_error=exception_handler)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(ext.is_error_handled)
self.assertTrue(dummy_extension.is_finalized)
testing.run_module(__name__, __file__)
|
philjjoon/Big-Data-Benchmark-for-Big-Bench
|
refs/heads/master
|
engines/flink/queries/q04/variants/q4_reducer2.py
|
3
|
#"INTEL CONFIDENTIAL"
#Copyright 2015 Intel Corporation All Rights Reserved.
#
#The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission.
#
#No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.
import sys
import logging
import traceback
import os
import time
from time import strftime
def filterShoppingCharts(vals):
#tup[0] wptype:string
#tup[1] tstamp_str:string
#tup[2] sessionid:string
last_order = -1
last_dynamic = -1
cnt = len(vals)
for i in range(cnt):
if vals[i][0] == 'order':
last_order = i
if vals[i][0] == 'dynamic':
last_dynamic = i
#is abandonend shopping chart?
if last_dynamic > last_order and (last_order == -1 or vals[last_dynamic][1] >= vals[last_order][1]):
#print sessionid ("<usersk>_<sessionCounter>") and pagecount
print "%s\t%s" % (vals[0][2], str(cnt))
if __name__ == "__main__":
#lines are expected to be grouped by sessionid and presorted by timestamp
line = ''
vals = []
try:
# do first line outside of loop to avoid additional: if current_uid != -1 check inside the loop
line = raw_input()
wptype, tstamp_str ,sessionid = line.strip().split("\t")
tstamp = long(tstamp_str)
current_key = sessionid
vals.append((wptype,tstamp, sessionid))
for line in sys.stdin:
wptype, tstamp_str ,sessionid = line.strip().split("\t")
tstamp = long(tstamp_str)
if current_key == sessionid:
vals.append((wptype,tstamp, sessionid))
else:
filterShoppingCharts(vals)
vals = []
vals.append((wptype,tstamp, sessionid))
current_key = sessionid
filterShoppingCharts(vals)
except:
## should only happen if input format is not correct
logging.basicConfig(level=logging.DEBUG, filename=strftime("/tmp/bigbench_q4_reducer2.py_%Y%m%d-%H%M%S.log"))
logging.info("line from hive: \"" + line + "\"")
logging.exception("Oops:")
sys.exit(1)
|
klihub/intel-iot-refkit
|
refs/heads/master
|
meta-iotqa/lib/oeqa/runtime/connectivity/wifi/wifi_mnode.py
|
6
|
import time
import os
import string
from oeqa.runtime.wifi import wifi
try:
import ConfigParser
except:
import configparser as ConfigParser
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
ssid_config = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(__file__), "files/config.ini")
ssid_config.readfp(open(config_path))
class CommWiFiMNode(oeRuntimeTest):
"""
@class CommWiFiMNode
"""
@classmethod
def setUpClass(cls):
''' initialize wifi class
@fn setUp
@param self
@return
'''
wifi1 = wifi.WiFiFunction(cls.tc.targets[0])
wifi2 = wifi.WiFiFunction(cls.tc.targets[1])
# Connect to same WiFi AP
ap_type = "hidden"
ssid = ssid_config.get("Connect","ssid_80211n")
pwd = ssid_config.get("Connect","passwd_80211n")
wifi1.execute_connection(ap_type, ssid, pwd)
wifi2.execute_connection(ap_type, ssid, pwd)
@classmethod
def tearDownClass(cls):
'''disable wifi, it will block ethernet connection when rebooting
@fn tearDownClass
@param cls
@return
'''
wifi1 = wifi.WiFiFunction(cls.tc.targets[0])
wifi2 = wifi.WiFiFunction(cls.tc.targets[1])
wifi1.disable_wifi()
wifi2.disable_wifi()
def setUp(self):
''' init wifi1 and wifi2
@fn setUp
@param self
@return
'''
# init wifi1 and wifi2
self.wifi1 = wifi.WiFiFunction(self.targets[0])
self.wifi2 = wifi.WiFiFunction(self.targets[1])
def test_wifi_ssh(self):
'''One device ssh to another via WiFi
@fn test_wifi_ssh
@param self
@return
'''
# Check wifi1 to ssh to wifi2
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
def test_wifi_scp_file(self):
'''One device scp a file to another device via WiFi
@fn test_wifi_scp_file
@param self
@return
'''
# Check wifi1 to scp /etc/os-release to wifi2
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
file_path = "/etc/os-release"
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# Compare md5sum
(status, md5sum1) = self.wifi1.target.run('md5sume %s' % file_path)
(status, md5sum2) = self.wifi2.target.run('md5sume /tmp/%s' % file_path.split('/')[-1])
if md5sum1 == md5sum2:
pass
else:
self.assertEqual(0, 1, msg="md5sum checking fail: original %s, remote is %s" % (md5sum1, md5sum2))
def test_wifi_scp_multiple_files(self):
'''Stability: one device scp thousands of small files
to another
@fn test_wifi_scp_multiple_files
@param self
@return
'''
# clean files on both sides
self.wifi2.target.run('rm -f /home/root/*')
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
# create 1000 files under /tmp/1000/ on target1
script = os.path.join(os.path.dirname(__file__), "files/create_1000_files.sh")
self.wifi1.target.copy_to(script, "/tmp/")
self.wifi1.target.run('sh /tmp/create_1000_files.sh')
# scp them to target2 /tmp/ folder
(status, file_number_old) = self.wifi2.target.run('ls /home/root/ | wc -l')
file_path = '/tmp/1000/*'
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# check if /tmp/ files number increase 1000 on target2
(status, file_number_new) = self.wifi2.target.run('ls /home/root/ | wc -l')
if int(file_number_new) - int(file_number_old) == 1000:
pass
else:
self.assertEqual(0, 1, msg="1000 file scp fail: original number %s, new number %s" % (file_number_old, file_number_new))
def test_wifi_scp_big_file(self):
'''Stability: one device scp 500M size file to another
@fn test_wifi_scp_big_file
@param self
@return
'''
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
file_path = '/home/root/big_file'
# create a big file, size is 500M
(status, patition) = self.wifi1.target.run('mount | grep " \/ "')
self.wifi1.target.run('dd if=%s of=%s bs=1M count=500' % (patition.split()[0], file_path))
# scp it to target2 /home/root/ folder
self.wifi2.target.run('rm -f /home/root/*')
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# check if md5sume is consistent
(status, md5sum1) = self.wifi1.target.run('md5sum %s' % file_path)
(status, md5sum2) = self.wifi2.target.run('md5sum /home/root/%s' % file_path.split('/')[-1])
if md5sum1.split()[0] == md5sum2.split()[0]:
pass
else:
self.assertEqual(0, 1, msg="md5sum checking fail: original %s, remote is %s" % (md5sum1.split()[0], md5sum2.split()[0]))
def test_wifi_avaliable_after_longtime_idle(self):
'''Stability: check if wifi is still workable after a long time idle
@fn test_wifi_avaliable_after_longtime_idle
@param self
@return
'''
# Re-connect wifi although setUpClass already did it
ap_type = "hidden"
ssid = ssid_config.get("Connect","ssid_80211n")
pwd = ssid_config.get("Connect","passwd_80211n")
self.wifi1.execute_connection(ap_type, ssid, pwd)
self.wifi2.execute_connection(ap_type, ssid, pwd)
# idle for half hour, then check basic ssh_to function
time.sleep(1800)
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
|
Maethorin/concept2
|
refs/heads/master
|
migrations/versions/dda6cfde5752_.py
|
1
|
"""empty message
Revision ID: dda6cfde5752
Revises: 7189463da035
Create Date: 2016-03-12 00:49:53.587621
"""
# revision identifiers, used by Alembic.
revision = 'dda6cfde5752'
down_revision = '7189463da035'
from alembic import op
import app
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('admins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nome', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('senha_hash', sa.String(length=128), nullable=False),
sa.Column('ativo', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('admins')
### end Alembic commands ###
|
BonexGu/Blik2D-SDK
|
refs/heads/master
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/slim/python/slim/nets/inception_v1.py
|
164
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with variable_scope.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_initializer=trunc_normal(0.01)):
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d], stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_2a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2b_1x1'
net = layers.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2c_3x3'
net = layers.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_4a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_4f'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_5a_2x2'
net = layers_lib.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
def inception_v1_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn't use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
if use_batch_norm:
normalizer_fn = layers_lib.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
|
Karajlug/karajlug
|
refs/heads/master
|
members/models.py
|
1
|
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.db import models
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
class Member(models.Model):
"""
Member model of karajlug.
"""
user = models.OneToOneField(
"auth.User", verbose_name=_("User"),
related_name="%(app_label)s_%(class)s_related",
help_text=_("Who is the owner of this profile?"))
link = models.URLField(verbose_name=_("Home Page"),
blank=True, null=True,
help_text=_("Home page link"))
avatar = models.ImageField(
blank=True, null=True,
upload_to="uploads/avarars/",
verbose_name=_("Avatar"),
help_text=_("Please use your real face avatar. ") +
_("Size: 128x128 DO NOT UPLOAD BIG FILES !!!"))
weight = models.IntegerField(default=40, verbose_name=_("Item Weight"),
help_text=_("This field is not important"))
desc = models.TextField(verbose_name=_("Description"),
blank=True, null=True)
creator = models.ForeignKey("auth.User", verbose_name=_("Creator"),
editable=False)
def __unicode__(self):
return self.user.get_full_name()
def fullname(self):
return self.__unicode__()
def get_absolute_url(self):
return "/members/%i/" % self.id
def safe_email(self):
"""
use js to hide email.
"""
template = """
<SCRIPT LANGUAGE="JavaScript">
user = '$$username$$';
site = '$$domain$$';
document.write('<a href=\"mailto:' + user + '@' + site + '\">');
document.write(user + '@' + site + '</a>');
</SCRIPT>
"""
if self.user.email:
username, domain = self.user.email.split("@")
result = template.replace("$$username$$", username).replace(
"$$domain$$", domain)
return result
else:
return ""
def full_path(self):
from django.conf import settings
site = getattr(settings, "URL", "www.karajlug.org")
return "%s%s" % (site, self.get_absolute_url())
def irc_repr(self, logentry):
if logentry.is_addition():
return ["New member added by %s - %s" % (
self.user,
self.full_path())]
phrase = ""
if logentry.is_change():
phrase = "change"
elif logentry.is_delete():
phrase = "delete"
return ["%s %s a member: %s" % (
self.user,
phrase,
self.full_path())]
class Meta:
verbose_name = _("Member")
verbose_name_plural = _("Members")
permissions = (
("member_admin", _("Can Add new members and details.")),
)
class MemberDetail(models.Model):
"""
Details of each memeber.
"""
LANGUAGES = [
["0", "en-us"],
["1", "fa"],
]
language = models.CharField(
choices=LANGUAGES,
default="0",
max_length=1,
verbose_name=_("Language"),
help_text=_("Site language (en-us at this time)"))
member = models.ForeignKey(
Member, verbose_name=_("Member"),
help_text=_("Who is the owner of this property?"))
field_name = models.CharField(max_length=64,
verbose_name=_("Field Name"),
help_text=_("Profile property name"))
field_value = models.CharField(max_length=256)
weight = models.IntegerField(
default=40, verbose_name=_("Item Weight"),
help_text=_("Properties with lower weight will appear sooner."))
user = models.ForeignKey("auth.User", verbose_name=_("Creator"),
editable=False)
def __unicode__(self):
return "%s - %s" % (self.field_name,
self.field_value)
def irc_repr(self, logentry):
if logentry.is_addition():
return ["Some details added for %s" % (self.member.user)]
phrase = ""
if logentry.is_change():
phrase = "changed"
elif logentry.is_delete():
phrase = "deleted"
return ["A profile detail for %s %s" % (
self.member.user,
phrase)]
class Meta:
verbose_name = _("Member Detail")
verbose_name_plural = _("Member Details")
|
nemesiscodex/JukyOS-sugar
|
refs/heads/juky
|
extensions/deviceicon/volume.py
|
1
|
# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import gobject
import gio
import gtk
import gconf
from sugar.graphics.tray import TrayIcon
from sugar.graphics.xocolor import XoColor
from jarabe.journal import journalactivity
from jarabe.view.palettes import VolumePalette
from jarabe.frame.frameinvoker import FrameWidgetInvoker
_icons = {}
class DeviceView(TrayIcon):
FRAME_POSITION_RELATIVE = 500
def __init__(self, mount):
self._mount = mount
icon_name = None
icon_theme = gtk.icon_theme_get_default()
for icon_name in self._mount.get_icon().props.names:
icon_info = icon_theme.lookup_icon(icon_name,
gtk.ICON_SIZE_LARGE_TOOLBAR, 0)
if icon_info is not None:
break
if icon_name is None:
icon_name = 'drive'
# TODO: retrieve the colors from the owner of the device
client = gconf.client_get_default()
color = XoColor(client.get_string('/desktop/sugar/user/color'))
TrayIcon.__init__(self, icon_name=icon_name, xo_color=color)
self.set_palette_invoker(FrameWidgetInvoker(self))
self.connect('button-release-event', self.__button_release_event_cb)
def create_palette(self):
palette = VolumePalette(self._mount)
palette.set_group_id('frame')
return palette
def __button_release_event_cb(self, widget, event):
journal = journalactivity.get_journal()
journal.set_active_volume(self._mount)
journal.reveal()
return True
def setup(tray):
gobject.idle_add(_setup_volumes, tray)
def _setup_volumes(tray):
volume_monitor = gio.volume_monitor_get()
for volume in volume_monitor.get_volumes():
_mount(volume, tray)
for mount in volume_monitor.get_mounts():
_add_device(mount, tray)
volume_monitor.connect('volume-added', _volume_added_cb, tray)
volume_monitor.connect('mount-added', _mount_added_cb, tray)
volume_monitor.connect('mount-removed', _mount_removed_cb, tray)
def _volume_added_cb(volume_monitor, volume, tray):
_mount(volume, tray)
def _mount(volume, tray):
# Follow Nautilus behaviour here
# since it has the same issue with removable device
# and it would be good to not invent our own workflow
if hasattr(volume, 'should_automount') and not volume.should_automount():
return
#TODO: should be done by some other process, like gvfs-hal-volume-monitor
#TODO: use volume.should_automount() when it gets into pygtk
if volume.get_mount() is None and volume.can_mount():
#TODO: pass None as mount_operation, or better, SugarMountOperation
volume.mount(gtk.MountOperation(tray.get_toplevel()), _mount_cb)
def _mount_cb(volume, result):
logging.debug('_mount_cb %r %r', volume, result)
volume.mount_finish(result)
def _mount_added_cb(volume_monitor, mount, tray):
_add_device(mount, tray)
def _mount_removed_cb(volume_monitor, mount, tray):
icon = _icons[mount]
tray.remove_device(icon)
del _icons[mount]
def _add_device(mount, tray):
icon = DeviceView(mount)
_icons[mount] = icon
tray.add_device(icon)
|
hronoses/vispy
|
refs/heads/master
|
vispy/visuals/tests/test_polygon.py
|
1
|
# -*- coding: utf-8 -*-
"""
Tests for PolygonVisual
All images are of size (100,100) to keep a small file size
"""
import numpy as np
from vispy.scene import visuals, transforms
from vispy.testing import (requires_application, requires_scipy, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
@requires_application()
@requires_scipy()
def test_square_draw():
"""Test drawing squares without transforms using PolygonVisual"""
pos = np.array([[-0.5, 0.5, 0],
[0.5, 0.5, 0],
[0.5, -0.5, 0],
[-0.5, -0.5, 0]])
with TestingCanvas() as c:
polygon = visuals.Polygon(pos=pos, color=(1, 0, 0, 1))
polygon.transform = transforms.STTransform(scale=(50, 50),
translate=(50, 50))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/square1.png')
polygon = visuals.Polygon(pos=pos, color=(1, 0, 0, 1),
border_color=(1, 1, 1, 1))
polygon.transform = transforms.STTransform(scale=(50, 50),
translate=(50, 50))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/square2.png')
polygon = visuals.Polygon(pos=pos, border_color=(1, 1, 1, 1))
polygon.transform = transforms.STTransform(scale=(50, 50),
translate=(50, 50))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/square3.png',
min_corr=0.45)
@requires_application()
@requires_scipy()
def test_rectangle_draw():
"""Test drawing rectangles with transforms using PolygonVisual"""
pos = np.array([[-0.1, 0.5, 0],
[0.1, 0.5, 0],
[0.1, -0.5, 0],
[-0.1, -0.5, 0]])
with TestingCanvas() as c:
polygon = visuals.Polygon(pos=pos, color=(1, 1, 0, 1))
polygon.transform = transforms.STTransform(scale=(200.0, 25),
translate=(50, 50))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/rectangle1.png')
polygon = visuals.Polygon(pos=pos, color=(1, 1, 0, 1),
border_color=(1, 0, 0, 1))
polygon.transform = transforms.STTransform(scale=(200.0, 25),
translate=(50, 50))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/rectangle2.png')
polygon = visuals.Polygon(pos=pos, border_color=(1, 0, 0, 1),
border_width=1)
polygon.transform = transforms.STTransform(scale=(200.0, 25),
translate=(50, 49))
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/rectangle3.png',
min_corr=0.7)
@requires_application()
@requires_scipy()
def test_reactive_draw():
"""Test reactive polygon attributes"""
pos = np.array([[-0.1, 0.5, 0],
[0.1, 0.5, 0],
[0.1, -0.5, 0],
[-0.1, -0.5, 0]])
with TestingCanvas() as c:
polygon = visuals.Polygon(pos=pos, color='yellow')
polygon.transform = transforms.STTransform(scale=(50, 50),
translate=(50, 50))
c.draw_visual(polygon)
polygon.pos += [0.1, -0.1, 0]
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/reactive_polygon1.png')
polygon.color = 'red'
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/reactive_polygon2.png')
polygon.border_color = 'yellow'
c.draw_visual(polygon)
assert_image_approved("screenshot", 'visuals/reactive_polygon3.png',
min_corr=0.8)
run_tests_if_main()
|
whip112/Whip112
|
refs/heads/master
|
vendor/packages/translate/storage/xml_extract/generate.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import lxml.etree as etree
from translate.storage import base
from translate.storage.xml_extract import extract, misc, unit_tree
from translate.storage.xml_name import XmlNamer
def _get_tag_arrays(dom_node):
"""Return a dictionary indexed by child tag names, where each tag is associated with an array
of all the child nodes with matching the tag name, in the order in which they appear as children
of dom_node.
>>> xml = '<a><b></b><c></c><b></b><d/></a>'
>>> element = etree.fromstring(xml)
>>> get_tag_arrays(element)
{'b': [<Element a at 84df144>, <Element a at 84df148>], 'c': [<Element a at 84df120>], 'd': [<Element a at 84df152>]}
"""
child_dict = {}
for child in dom_node:
if child.tag not in child_dict:
child_dict[child.tag] = []
child_dict[child.tag].append(child)
return child_dict
def apply_translations(dom_node, unit_node, do_translate):
tag_array = _get_tag_arrays(dom_node)
for unit_child_index, unit_child in unit_node.children.iteritems():
tag, index = unit_child_index
try:
dom_child = tag_array[XmlNamer(dom_node).name(tag)][index]
apply_translations(dom_child, unit_child, do_translate)
# Raised if tag is not in tag_array. We might want to complain to the
# user in the future.
except KeyError:
pass
# Raised if index is not in tag_array[tag]. We might want to complain to
# the user in the future
except IndexError:
pass
# If there is a translation unit associated with this unit_node...
if unit_node.unit is not None:
# The invoke do_translate on the dom_node and the unit; do_translate
# should replace the text in dom_node with the text in unit_node.
do_translate(dom_node, unit_node.unit)
def reduce_dom_tree(f, dom_node, *state):
return misc.reduce_tree(f, dom_node, dom_node, lambda dom_node: dom_node, *state)
def find_dom_root(parent_dom_node, dom_node):
"""
.. seealso:: :meth:`find_placeable_dom_tree_roots`
"""
if dom_node is None or parent_dom_node is None:
return None
if dom_node.getparent() == parent_dom_node:
return dom_node
elif dom_node.getparent() is None:
return None
else:
return find_dom_root(parent_dom_node, dom_node.getparent())
def find_placeable_dom_tree_roots(unit_node):
"""For an inline placeable, find the root DOM node for the placeable in its
parent.
Consider the diagram. In this pseudo-ODF example, there is an inline span
element. However, the span is contained in other tags (which we never process).
When splicing the template DOM tree (that is, the DOM which comes from
the XML document we're using to generate a translated XML document), we'll
need to move DOM sub-trees around and we need the roots of these sub-trees::
<p> This is text \/ <- Paragraph containing an inline placeable
<blah> <- Inline placeable's root (which we want to find)
... <- Any number of intermediate DOM nodes
<span> bold text <- The inline placeable's Translatable
holds a reference to this DOM node
"""
def set_dom_root_for_unit_node(parent_unit_node, unit_node, dom_tree_roots):
dom_tree_roots[unit_node] = find_dom_root(parent_unit_node.dom_node, unit_node.dom_node)
return dom_tree_roots
return extract.reduce_unit_tree(set_dom_root_for_unit_node, unit_node, {})
def _map_source_dom_to_doc_dom(unit_node, source_dom_node):
"""Creating a mapping from the DOM nodes in source_dom_node which correspond to
placeables, with DOM nodes in the XML document template (this information is obtained
from unit_node). We are interested in DOM nodes in the XML document template which
are the roots of placeables. See the diagram below, as well as
:meth:`find_placeable_dom_tree_roots`.
XLIFF Source (below)::
<source>This is text <g> bold text</g> and a footnote<x/></source>
/ \________
/ \
<p>This is text<blah>...<span> bold text</span>...</blah> and <note>...</note></p>
Input XML document used as a template (above)
In the above diagram, the XLIFF source DOM node <g> is associated with the XML
document DOM node <blah>, whereas the XLIFF source DOM node <x> is associated with
the XML document DOM node <note>.
"""
dom_tree_roots = find_placeable_dom_tree_roots(unit_node)
source_dom_to_doc_dom = {}
def loop(unit_node, source_dom_node):
for child_unit_node, child_source_dom in zip(unit_node.placeables, source_dom_node):
source_dom_to_doc_dom[child_source_dom] = dom_tree_roots[child_unit_node]
loop(child_unit_node, child_source_dom)
loop(unit_node, source_dom_node)
return source_dom_to_doc_dom
def _map_target_dom_to_source_dom(source_dom_node, target_dom_node):
"""Associate placeables in source_dom_node and target_dom_node which
have the same 'id' attributes.
We're using XLIFF placeables. The XLIFF standard requires that
placeables have unique ids. The id of a placeable is never modified,
which means that even if placeables are moved around in a translation,
we can easily associate placeables from the source text with placeables
in the target text.
This function does exactly that.
"""
def map_id_to_dom_node(parent_node, node, id_to_dom_node):
# If this DOM node has an 'id' attribute, then add an id -> node
# mapping to 'id_to_dom_node'.
if u'id' in node.attrib:
id_to_dom_node[node.attrib[u'id']] = node
return id_to_dom_node
# Build a mapping of id attributes to the DOM nodes which have these ids.
id_to_dom_node = reduce_dom_tree(map_id_to_dom_node, target_dom_node, {})
def map_target_dom_to_source_dom_aux(parent_node, node, target_dom_to_source_dom):
#
if u'id' in node.attrib and node.attrib[u'id'] in id_to_dom_node:
target_dom_to_source_dom[id_to_dom_node[node.attrib[u'id']]] = node
return target_dom_to_source_dom
# For each node in the DOM tree rooted at source_dom_node:
# 1. Check whether the node has an 'id' attribute.
# 2. If so, check whether there is a mapping of this id to a target DOM node
# in id_to_dom_node.
# 3. If so, associate this source DOM node with the target DOM node.
return reduce_dom_tree(map_target_dom_to_source_dom_aux, source_dom_node, {})
def _build_target_dom_to_doc_dom(unit_node, source_dom, target_dom):
source_dom_to_doc_dom = _map_source_dom_to_doc_dom(unit_node, source_dom)
target_dom_to_source_dom = _map_target_dom_to_source_dom(source_dom, target_dom)
return misc.compose_mappings(target_dom_to_source_dom, source_dom_to_doc_dom)
def _get_translated_node(target_node, target_dom_to_doc_dom):
"""Convenience function to get node corresponding to 'target_node'
and to assign the tail text of 'target_node' to this node."""
dom_node = target_dom_to_doc_dom[target_node]
dom_node.tail = target_node.tail
return dom_node
def _build_translated_dom(dom_node, target_node, target_dom_to_doc_dom):
"""Use the "shape" of 'target_node' (which is a DOM tree) to insert nodes
into the DOM tree rooted at 'dom_node'.
The mapping 'target_dom_to_doc_dom' is used to map nodes from 'target_node'
to nodes which much be inserted into dom_node.
"""
dom_node.text = target_node.text
# 1. Find all child nodes of target_node.
# 2. Filter out the children which map to None.
# 3. Call _get_translated_node on the remaining children; this maps a node in
# 'target_node' to a node in 'dom_node' and assigns the tail text of 'target_node'
# to the mapped node.
# 4. Add all of these mapped nodes to 'dom_node'
dom_node.extend(_get_translated_node(child, target_dom_to_doc_dom) for child in target_node
if target_dom_to_doc_dom[child] is not None)
# Recursively call this function on pairs of matched children in
# dom_node and target_node.
for dom_child, target_child in zip(dom_node, target_node):
_build_translated_dom(dom_child, target_child, target_dom_to_doc_dom)
def replace_dom_text(make_parse_state):
"""Return a function::
action: etree_Element x base.TranslationUnit -> None
which takes a dom_node and a translation unit. The dom_node is rearranged
according to rearrangement of placeables in unit.target (relative to their
positions in unit.source).
"""
def action(dom_node, unit):
"""Use the unit's target (or source in the case where there is no translation)
to update the text in the dom_node and at the tails of its children."""
source_dom = unit.source_dom
if unit.target_dom is not None:
target_dom = unit.target_dom
else:
target_dom = unit.source_dom
# Build a tree of (non-DOM) nodes which correspond to the translatable DOM nodes in 'dom_node'.
# Pass in a fresh parse_state every time, so as avoid working with stale parse state info.
unit_node = extract.find_translatable_dom_nodes(dom_node, make_parse_state())[0]
target_dom_to_doc_dom = _build_target_dom_to_doc_dom(unit_node, source_dom, target_dom)
# Before we start reconstructing the sub-tree rooted at dom_node, we must clear out its children
dom_node[:] = []
_build_translated_dom(dom_node, target_dom, target_dom_to_doc_dom)
return action
|
maelnor/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/259_placeholder.py
|
200
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
|
Jericho25/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/image.py
|
203
|
import os
from .. import constants, logger
from . import base_classes, io, api
class Image(base_classes.BaseNode):
"""Class the wraps an image node. This is the node that
represent that actual file on disk.
"""
def __init__(self, node, parent):
logger.debug("Image().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent, constants.IMAGE)
texture_folder = self.scene.options.get(constants.TEXTURE_FOLDER, "")
self[constants.URL] = os.path.join(texture_folder, api.image.file_name(self.node))
@property
def destination(self):
"""
:return: full destination path (when copied)
"""
dirname = os.path.dirname(self.scene.filepath)
return os.path.join(dirname, self[constants.URL])
@property
def filepath(self):
"""
:return: source file path
"""
return api.image.file_path(self.node)
def copy_texture(self, func=io.copy):
"""Copy the texture.
self.filepath > self.destination
:param func: Optional function override (Default value = io.copy)
arguments are (<source>, <destination>)
:return: path the texture was copied to
"""
logger.debug("Image().copy_texture()")
func(self.filepath, self.destination)
return self.destination
|
BehavioralInsightsTeam/edx-platform
|
refs/heads/release-bit
|
lms/djangoapps/grades/migrations/0004_visibleblocks_course_id.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
class Migration(migrations.Migration):
dependencies = [
('grades', '0003_coursepersistentgradesflag_persistentgradesenabledflag'),
]
operations = [
migrations.AddField(
model_name='visibleblocks',
name='course_id',
field=CourseKeyField(default=CourseKey.from_string('edX/BerylMonkeys/TNL-5458'), max_length=255, db_index=True),
preserve_default=False,
),
]
|
Arafatk/sympy
|
refs/heads/master
|
sympy/physics/quantum/matrixcache.py
|
124
|
"""A cache for storing small matrices in multiple formats."""
from __future__ import print_function, division
from sympy import Matrix, I, Pow, Rational, exp, pi
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse
)
class MatrixCache(object):
"""A cache for small matrices in different formats.
This class takes small matrices in the standard ``sympy.Matrix`` format,
and then converts these to both ``numpy.matrix`` and
``scipy.sparse.csr_matrix`` matrices. These matrices are then stored for
future recovery.
"""
def __init__(self, dtype='complex'):
self._cache = {}
self.dtype = dtype
def cache_matrix(self, name, m):
"""Cache a matrix by its name.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
m : list of lists
The raw matrix data as a sympy Matrix.
"""
try:
self._sympy_matrix(name, m)
except ImportError:
pass
try:
self._numpy_matrix(name, m)
except ImportError:
pass
try:
self._scipy_sparse_matrix(name, m)
except ImportError:
pass
def get_matrix(self, name, format):
"""Get a cached matrix by name and format.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
format : str
The format desired ('sympy', 'numpy', 'scipy.sparse')
"""
m = self._cache.get((name, format))
if m is not None:
return m
raise NotImplementedError(
'Matrix with name %s and format %s is not available.' %
(name, format)
)
def _store_matrix(self, name, format, m):
self._cache[(name, format)] = m
def _sympy_matrix(self, name, m):
self._store_matrix(name, 'sympy', to_sympy(m))
def _numpy_matrix(self, name, m):
m = to_numpy(m, dtype=self.dtype)
self._store_matrix(name, 'numpy', m)
def _scipy_sparse_matrix(self, name, m):
# TODO: explore different sparse formats. But sparse.kron will use
# coo in most cases, so we use that here.
m = to_scipy_sparse(m, dtype=self.dtype)
self._store_matrix(name, 'scipy.sparse', m)
sqrt2_inv = Pow(2, Rational(-1, 2), evaluate=False)
# Save the common matrices that we will need
matrix_cache = MatrixCache()
matrix_cache.cache_matrix('eye2', Matrix([[1, 0], [0, 1]]))
matrix_cache.cache_matrix('op11', Matrix([[0, 0], [0, 1]])) # |1><1|
matrix_cache.cache_matrix('op00', Matrix([[1, 0], [0, 0]])) # |0><0|
matrix_cache.cache_matrix('op10', Matrix([[0, 0], [1, 0]])) # |1><0|
matrix_cache.cache_matrix('op01', Matrix([[0, 1], [0, 0]])) # |0><1|
matrix_cache.cache_matrix('X', Matrix([[0, 1], [1, 0]]))
matrix_cache.cache_matrix('Y', Matrix([[0, -I], [I, 0]]))
matrix_cache.cache_matrix('Z', Matrix([[1, 0], [0, -1]]))
matrix_cache.cache_matrix('S', Matrix([[1, 0], [0, I]]))
matrix_cache.cache_matrix('T', Matrix([[1, 0], [0, exp(I*pi/4)]]))
matrix_cache.cache_matrix('H', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('Hsqrt2', Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix(
'SWAP', Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
matrix_cache.cache_matrix('ZX', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('ZY', Matrix([[I, 0], [0, -I]]))
|
camagenta/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/twitter.py
|
89
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
unescapeHTML,
)
class TwitterCardIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
_TEST = {
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
'md5': 'a74f50b310c83170319ba16de6955192',
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Different formats served for different User-Agents
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', # mp4
'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0', # webm
]
config = None
formats = []
for user_agent in USER_AGENTS:
request = compat_urllib_request.Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(request, video_id)
config = self._parse_json(
unescapeHTML(self._search_regex(
r'data-player-config="([^"]+)"', webpage, 'data player config')),
video_id)
video_url = config['playlist'][0]['source']
f = {
'url': video_url,
}
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
thumbnail = config.get('posterImageUrl')
duration = float_or_none(config.get('duration'))
return {
'id': video_id,
'title': 'TwitterCard',
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
|
rajendrauppal/coding-interview
|
refs/heads/master
|
programming_languages/Python/decorator.py
|
1
|
#!/usr/bin/env python
from functools import wraps
def decorator(func):
"""Decorator which authenticates the incoming API call.
"""
@wraps(func)
# wraps, preserves metadata (__name__ and __doc__) of the wrapping function.
def wrapper(*args, **kwargs):
# we send all positional arguments and keyword arguments because
# we don't know what all arguments are going to be in func.
# perform any actual authentication here and return the func as is.
print("in decorator() wrapper function")
return func(*args, **kwargs)
return wrapper
@decorator
def do():
print("in do() function")
do()
# MRO (method resolution order)
# hasattr
# class inheritance
#
import time
def time_it(func):
@wraps(func)
def wrapper():
start = time.time()
func()
end = time.time()
return str(end - start)
return wrapper
@time_it
def long_operation():
"""Performs some time taking operation."""
numbers = []
for num in range(0, 100000):
numbers.append(num * 2)
print("Sum = " + str(sum(numbers)))
print(long_operation())
print(long_operation.__name__) # see the result with wraps or without wraps
print(long_operation.__doc__) # see the result with wraps or without wraps
print(long_operation.__module__) # ?? check
# Decorating class methods
def make_uppercase(func):
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
self.name = self.name.upper()
self.family = self.family.upper()
return func(*args, **kwargs)
return wrapper
class Person:
def __init__(self):
self.name = "John"
self.family = "Doe"
@make_uppercase
def fullname(self):
return self.name + " " + self.family
p = Person()
print(p.fullname())
|
kkrizanovic/NanoMark
|
refs/heads/master
|
wrappers/wrapper_miniasm.py
|
2
|
#! /usr/bin/python
import os
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
import sys
sys.path.append(SCRIPT_PATH + '/../src/')
import subprocess
import multiprocessing
from time import gmtime, strftime
from dataspec import *
ASSEMBLER_TYPE = 'nonhybrid'; # hybrid or nonhybrid
try:
import basicdefines
MODULE_BASICDEFINES = True;
ASSEMBLERS_PATH_ROOT_ABS = basicdefines.ASSEMBLERS_PATH_ROOT_ABS;
TOOLS_ROOT = basicdefines.TOOLS_ROOT;
CGMEMTIME_PATH = basicdefines.CGMEMTIME_PATH;
CGMEMTIME_FILE = basicdefines.CGMEMTIME_FILE;
TOOLS_ROOT_ABS = basicdefines.TOOLS_ROOT_ABS;
except:
MODULE_BASICDEFINES = False;
ASSEMBLERS_PATH_ROOT_ABS = os.path.join(SCRIPT_PATH, 'assemblers/');
TOOLS_ROOT = '%s' % (SCRIPT_PATH);
CGMEMTIME_PATH = os.path.join(SCRIPT_PATH, 'tools/cgmemtime/');
CGMEMTIME_FILE = CGMEMTIME_PATH + '/cgmemtime';
TOOLS_ROOT_ABS = '%s/tools/' % (SCRIPT_PATH);
ASSEMBLER_URL = 'https://github.com/lh3/miniasm.git'
ASSEMBLER_PATH = os.path.join(ASSEMBLERS_PATH_ROOT_ABS, 'miniasm')
ASSEMBLER_BIN = os.path.join(ASSEMBLER_PATH,'miniasm/miniasm')
ASSEMBLER_NAME = 'Miniasm'
# ASSEMBLER_RESULTS = 'out/9-terminator/asm.ctg.fasta'
ASSEMBLY_UNPOLISHED = 'benchmark-unpolished_assembly.fasta'
ASSEMBLY_POLISHED = 'benchmark-polished_assembly.fasta'
CREATE_OUTPUT_FOLDER = True
# DRY_RUN = True;
DRY_RUN = False;
### Logs messages to STDERR and an output log file if provided (opened elsewhere).
def log(message, fp_log):
timestamp = strftime("%Y/%m/%d %H:%M:%S", gmtime());
sys.stderr.write('[%s wrapper %s] %s\n' % (ASSEMBLER_NAME, timestamp, message))
sys.stderr.flush();
if (fp_log != None):
fp_log.write('[%s wrapper %s] %s\n' % (ASSEMBLER_NAME, timestamp, message))
fp_log.flush();
import traceback;
def execute_command(command, fp_log, dry_run=True):
if (dry_run == True):
log('Executing (dryrun): "%s".' % (command), fp_log);
log('\n', fp_log);
return 0;
if (dry_run == False):
log('Executing: "%s".' % (command), fp_log);
rc = subprocess.call(command, shell=True);
if (rc != 0):
log('ERROR: subprocess call returned error code: %d.' % (rc), fp_log);
log('Traceback:', fp_log);
traceback.print_stack(fp_log);
exit(1);
return rc;
# def execute_command(command, fp_log, dry_run=True):
# sys.stderr.write('Executing command: "%s"\n' % command);
# if (dry_run == True):
# log('Executing (dryrun): "%s".' % (command), fp_log);
# if (dry_run == False):
# log('Executing: "%s".' % (command), fp_log);
# p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE);
# [output, err] = p.communicate()
# rc = p.returncode
# sys.stderr.write('\n');
# if (rc != 0):
# log('ERROR: subprocess call returned error code: %d.\n' % (rc), fp_log);
# traceback.print_stack(fp_log);
# exit(1);
# return [rc, output, err];
def measure_command(measure_file):
if (MODULE_BASICDEFINES == True and os.path.exists(CGMEMTIME_FILE)):
return basicdefines.measure_command(measure_file);
else:
return '/usr/bin/time --format "Command line: %%C\\nReal time: %%e s\\nCPU time: -1.0 s\\nUser time: %%U s\\nSystem time: %%S s\\nMaximum RSS: %%M kB\\nExit status: %%x" --quiet -o %s ' % measure_file;
def peek(fp, num_chars):
data = fp.read(num_chars);
if len(data) == 0:
return '';
fp.seek(num_chars * -1, 1);
return data;
# Returns a single read from the given FASTA/FASTQ file.
# Parameter header contains only the header of the read.
# Parameter lines contains all lines of the read, which include:
# - header
# - seq
# - '+' if FASTQ
# - quals if FASTQ
# Parameter lines is an array of strings, each for one component.
# Please note that multiline FASTA/FASTQ entries (e.g. sequence line)
# will be truncated into one single line.
def get_single_read(fp):
lines = [];
line = fp.readline();
header = line.rstrip();
header_leading_char = '';
if (len(header) > 0):
sequence_separator = header[0];
header_leading_char = header[0];
header = header[1:]; # Strip the '>' or '@' sign from the beginning.
else:
return ['', []];
next_char = peek(fp, 1);
line_string = '';
lines.append(header_leading_char + header);
num_lines = 1;
#while len(next_char) > 0 and next_char != sequence_separator or (next_char == '@' and num_lines < 4):
while (len(next_char) > 0 and (next_char != sequence_separator or (next_char == '@' and num_lines < 4))):
line = fp.readline();
if (line.rstrip() == '+' or line.rstrip() == ('+' + header)):
#if (line.rstrip()[0] == '+'):
lines.append(line_string);
lines.append(line.rstrip());
line_string = '';
else:
line_string += line.rstrip();
next_char = peek(fp, 1);
num_lines += 1;
lines.append(line_string);
return [header, lines];
##############################################################
##############################################################
##############################################################
def parse_memtime(memtime_path):
cmdline = '';
realtime = 0;
cputime = 0;
usertime = 0;
systemtime = 0;
maxrss = 0;
rsscache = 0;
time_unit = '';
mem_unit = '';
try:
fp = open(memtime_path, 'r');
lines = [line.strip() for line in fp.readlines() if (len(line.strip()) > 0)];
fp.close();
except Exception, e:
sys.stderr.write('Could not find memory and time statistics in file "%s".\n' % (memtime_path));
return [cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit];
for line in lines:
if (line.startswith('Command line:')):
cmdline = line.split(':')[1].strip();
elif (line.startswith('Real time:')):
split_line = line.split(':')[1].strip().split(' ');
realtime = float(split_line[0].strip());
time_unit = split_line[1].strip();
elif (line.startswith('CPU time:')):
split_line = line.split(':')[1].strip().split(' ');
cputime = float(split_line[0].strip());
time_unit = split_line[1].strip();
elif (line.startswith('User time:')):
split_line = line.split(':')[1].strip().split(' ');
usertime = float(split_line[0].strip());
time_unit = split_line[1].strip();
elif (line.startswith('System time:')):
split_line = line.split(':')[1].strip().split(' ');
systemtime = float(split_line[0].strip());
time_unit = split_line[1].strip();
elif (line.startswith('Maximum RSS:')):
split_line = line.split(':')[1].strip().split(' ');
maxrss = float(split_line[0].strip());
mem_unit = split_line[1].strip();
# elif (line.startswith('')):
# split_line = line.split(':')[1].strip().split(' ');
# rsscache = float(split_line[0].strip());
# mem_unit = split_line[1].strip();
return [cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit];
def parse_memtime_files_and_accumulate(memtime_files, final_memtime_file):
final_command_line = '';
final_real_time = 0.0;
final_cpu_time = 0.0;
final_user_time = 0.0;
final_system_time = 0.0;
final_time_unit = '';
final_max_rss = 0;
final_mem_unit = '';
i = 0;
for memtime_file in memtime_files:
i += 1;
sys.stderr.write('Parsing memtime file "%s"...\n' % (memtime_file));
[cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit] = parse_memtime(memtime_file);
if (i == 1):
final_command_line = cmdline;
final_real_time = realtime;
final_cpu_time = cputime;
final_user_time = usertime;
final_system_time = systemtime;
final_max_rss = maxrss;
final_time_unit = time_unit;
final_mem_unit = mem_unit;
else:
if (time_unit == final_time_unit and mem_unit == final_mem_unit):
final_command_line += '; ' + cmdline;
final_real_time += realtime;
final_cpu_time += cputime;
final_user_time += usertime;
final_system_time += systemtime;
final_max_rss = max(final_max_rss, maxrss);
else:
sys.stderr.write('Memory or time units not the same in all files! Instead of handling this, we decided to be lazy and just give up.\n');
break;
try:
fp = open(final_memtime_file, 'w');
except Exception, e:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % (final_memtime_file));
return;
if (final_cpu_time <= 0.0):
final_cpu_time = final_user_time + final_system_time;
fp.write('Command line: %s\n' % (final_command_line));
fp.write('Real time: %f %s\n' % (final_real_time, final_time_unit));
fp.write('CPU time: %f %s\n' % (final_cpu_time, final_time_unit));
fp.write('User time: %f %s\n' % (final_user_time, final_time_unit));
fp.write('System time: %f %s\n' % (final_system_time, final_time_unit));
fp.write('Maximum RSS: %f %s\n' % (final_max_rss, final_mem_unit));
fp.close();
##############################################################
##############################################################
##############################################################
# Function 'run' should provide a standard interface for running a mapper. Given input parameters, it should run the
# alignment process, and convert any custom output results to the SAM format. Function should return a string with the
# path to the output file.
# reads_file Path to a FASTA/FASTQ file containing reads.
# reference_file Path to a reference genome FASTA file.
# machine_name A symbolic name to specify a set of parameters for a specific sequencing platform.
# output_path Folder to which the output will be placed to. Filename will be automatically generated according to the name of the mapper being run.
# output_suffix A custom suffix that can be added to the output filename.
# def run(reads_file, reference_file, machine_name, output_path, output_suffix=''):
def run(datasets, output_path, approx_genome_len=0, move_exiting_out_path=True):
##################################################################################
### Sanity check for input datasets.
##################################################################################
machine_name = None
reads_file = None;
for dataset in datasets:
if (machine_name != None and dataset.type != machine_name):
sys.stderr.write('ERROR: %s is not a hybrid assembler, but datasets from disparate technologies are specified! Exiting.\n');
exit(1);
machine_name = dataset.type;
reads_file = dataset.reads_path;
if (machine_name == None):
sys.stderr.write('ERROR: Input datasets not specified correctly! Exiting.\n');
exit(1);
# if (len(datasets) > 1):
# sys.stderr.write('ERROR: More than one input dataset specified. Only one is expected. Exiting.\n');
# exit(1);
##################################################################################
### Simple variable definitions.
##################################################################################
### Reference for running a local job instead of using an SGE cluster:
### http://seqanswers.com/forums/showthread.php?t=50937
num_threads = multiprocessing.cpu_count() / 2;
output_path = os.path.abspath(output_path);
##################################################################################
### Backup old assembly results, and create the new output folder.
##################################################################################
if (move_exiting_out_path == True):
if (os.path.exists(output_path)):
timestamp = strftime("%Y_%m_%d-%H_%M_%S", gmtime());
os.rename(output_path, '%s.bak_%s' % (output_path, timestamp));
if (not os.path.exists(output_path)):
log('Creating a directory on path "%s".' % (output_path), None);
os.makedirs(output_path);
##################################################################################
### Prepare a log file.
##################################################################################
log_file = '%s/wrapper_log.txt' % (output_path);
try:
fp_log = open(log_file, 'a');
except Exception, e:
log('ERROR: Could not open file "%s" for writing! Using only STDERR for logging.' % (log_file), None);
fp_log = None;
##################################################################################
### Preparing the input datasets.
##################################################################################
reads_file = '%s/all_reads.fastq' % (output_path);
for dataset in datasets:
if (dataset.reads_path.endswith('fasta') or dataset.reads_path.endswith('fa')):
# log('ERROR: Assembler %s expects only FASTQ files for input. Trouble loading "%s". Exiting.\n' % (ASSEMBLER_NAME, dataset.reads_path), fp_log);
converted_reads_path = '%s/%s.fastq' % (output_path, os.path.splitext(os.path.basename(dataset.reads_path))[0]);
log('Converting file "%s" to FASTQ format and aggregating to "%s".\n' % (dataset.reads_path, reads_file), fp_log);
command = 'java -jar %s/convertFastaAndQualToFastq.jar %s >> %s' % (ASSEMBLER_PATH, dataset.reads_path, reads_file);
execute_command(command, fp_log, dry_run=DRY_RUN);
else:
log('Aggregating FASTQ file "%s" to "%s".\n' % (dataset.reads_path, reads_file), fp_log);
command = 'cat %s >> %s' % (dataset.reads_path, reads_file);
execute_command(command, fp_log, dry_run=DRY_RUN);
##################################################################################
### Start the important work.
##################################################################################
memtime_path = os.path.join(output_path, 'total.memtime')
memtime_files_prefix = '%s/%s' % (output_path, ASSEMBLER_NAME);
current_memtime_id = 0;
spec_file = ''
if machine_name == 'pacbio' or machine_name == 'nanopore':
overlaps_file = '%s/overlaps.paf.gz' % (output_path);
current_memtime_id += 1;
command = '%s %s/minimap/minimap -Sw5 -L100 -m0 -t%d %s %s | gzip -1 > %s' % (measure_command('%s-%s.memtime' % (memtime_files_prefix, current_memtime_id)), ASSEMBLER_PATH, num_threads, reads_file, reads_file, overlaps_file);
execute_command(command, fp_log, dry_run=DRY_RUN);
gfa_file = '%s/miniasm.gfa' % (output_path);
current_memtime_id += 1;
command = '%s %s -f %s %s > %s' % (measure_command('%s-%s.memtime' % (memtime_files_prefix, current_memtime_id)), ASSEMBLER_BIN, reads_file, overlaps_file, gfa_file);
execute_command(command, fp_log, dry_run=DRY_RUN);
command = 'awk \'$1 ~/S/ {print ">"$2"\\n"$3}\' %s > %s/%s' % (gfa_file, output_path, ASSEMBLY_UNPOLISHED);
execute_command(command, fp_log, dry_run=DRY_RUN);
elif machine_name == 'illumina':
log('\nMachine name "%s" not implemented for %s.\n' % (machine_name, ASSEMBLER_NAME));
log('Skipping ....\n', fp_log)
return;
else:
log('\nMachine name "%s" not implemented for %s.\n' % (machine_name, ASSEMBLER_NAME));
log('Skipping ....\n', fp_log)
return;
if (fp_log != None):
fp_log.close();
all_memtimes = ['%s-%s.memtime' % (memtime_files_prefix, value) for value in xrange(1, current_memtime_id)];
parse_memtime_files_and_accumulate(all_memtimes, memtime_path);
# A placeholder for a function that runs quast on assembly results
# This is the same for all wrappers at the moment and is done in the main program
def run_quast():
pass
# A function that gets the results of cgmemtime for a wrapper
# Since some aligners are run multiple times and create multiple measurements
# this is specific for each wrapper
# Returns relevant real time, user time and maximum memory reserved
def get_memtime():
pass
# This is a standard interface for setting up an assembler. It should assume that the assembler
# is not present localy, but needs to be retrieved, unpacked, compiled and set-up, without requireing
# root privileges.
def download_and_install():
if os.path.exists(ASSEMBLER_BIN):
sys.stderr.write('[%s wrapper] Bin found at %s. Skipping installation ...\n' % (ASSEMBLER_NAME, ASSEMBLER_BIN))
else:
sys.stderr.write('[%s wrapper] Started installation of %s.\n' % (ASSEMBLER_NAME, ASSEMBLER_NAME))
if (not os.path.exists(ASSEMBLER_PATH)):
log('Creating a directory on path "%s".' % (ASSEMBLER_PATH), None);
os.makedirs(ASSEMBLER_PATH);
command = 'cd %s; git clone https://github.com/lh3/minimap && (cd minimap && make)' % (ASSEMBLER_PATH)
execute_command(command, None, dry_run=DRY_RUN);
command = 'cd %s; git clone %s && (cd miniasm && make)' % (ASSEMBLER_PATH, ASSEMBLER_URL)
execute_command(command, None, dry_run=DRY_RUN);
command = 'cd %s; wget http://www.cbcb.umd.edu/software/PBcR/data/convertFastaAndQualToFastq.jar' % (ASSEMBLER_PATH);
execute_command(command, None, dry_run=DRY_RUN);
# if os.path.exists(CGMEMTIME_PATH + '/' + CGMEMTIME_BIN):
# sys.stderr.write('Cgmemtime already installed. Skipping...\n')
# else:
# command = 'mkdir -p %s; cd %s; git clone https://github.com/isovic/cgmemtime.git' % (TOOLS_ROOT_ABS, TOOLS_ROOT_ABS)
# execute_command(command, None, dry_run=DRY_RUN);
def verbose_usage_and_exit():
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s mode [<output_path> approx_genome_len dataset1 [dataset2 ...]]\n' % sys.argv[0])
sys.stderr.write('\n')
sys.stderr.write('\t- mode - either "run" or "install". If "install" other parameters can be omitted.\n')
sys.stderr.write('\t- dataset - specification of a dataset in the form: reads_type,<reads_path>[<reads_path_b,frag_len,frag_stddev] .\n');
sys.stderr.write('\t Reads_type can be nanopore/pacbio/single/paired/mate. If reads_type != "paired" or "mate", last three parameters can be omitted".\n');
sys.stderr.write('\t If reads_type == "paired" or "mate", other end of the pair needs to be in another file provided by reads_path_b.\n');
sys.stderr.write('\t- approx_genome_len - approximate length of the genome to be assembled. This parameter is neccesary for PBcR.\n');
sys.stderr.write('\n');
sys.stderr.write('Example:\n');
sys.stderr.write('\t%s run results/%s - nanopore,datasets/reads.fastq\n' % (os.path.basename(sys.argv[0]), ASSEMBLER_NAME));
sys.stderr.write('\n');
exit(0)
if __name__ == "__main__":
if (len(sys.argv) < 2):
verbose_usage_and_exit()
if (sys.argv[1] == 'install'):
download_and_install()
elif (sys.argv[1] == 'run'):
if (len(sys.argv) < 5):
verbose_usage_and_exit()
output_path = sys.argv[2]
approx_genome_len = 0 if (sys.argv[3] == '-') else int(sys.argv[3]);
datasets = [];
for arg in sys.argv[4:]:
dataset = Dataset(arg);
datasets.append(dataset);
run(datasets, output_path, approx_genome_len);
else:
verbose_usage_and_exit()
|
pwoodworth/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/substringFromFormatSingleValue.py
|
83
|
print("<selection>Hello</selection> %s" % "World")
|
bvamanan/ns3
|
refs/heads/master
|
src/topology-read/bindings/callbacks_list.py
|
664
|
callback_classes = [
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
ibinti/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyPep8NamingInspection/importConstant.py
|
74
|
from x import TEST as <weak_warning descr="Constant variable imported as non constant">test</weak_warning>
|
eicher31/compassion-modules
|
refs/heads/dev
|
intervention_compassion/__manifest__.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2016-2017 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': 'Compassion Interventions',
'version': '10.0.1.3.1',
'category': 'Other',
'author': 'Compassion CH',
'license': 'AGPL-3',
'website': 'http://www.compassion.ch',
'depends': ['child_compassion'],
'external_dependencies': {},
'data': [
'data/compassion.intervention.category.csv',
'data/compassion.intervention.subcategory.csv',
'data/compassion.intervention.deliverable.csv',
'data/install_category_rel.xml',
'data/intervention_server_actions.xml',
'data/intervention_action_rules.xml',
'data/gmc_action.xml',
'security/intervention_groups.xml',
'security/ir.model.access.csv',
'views/compassion_intervention_view.xml',
'views/global_intervention_view.xml',
'views/intervention_search_view.xml',
'views/project_view.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
bleepbloop/Pivy
|
refs/heads/master
|
examples/contrib/iv2pov.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###
# Copyright (c) 2005 Øystein Handegard <handegar@sim.no>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
##
## A simple Inventor-2-POV converter
##
## TODO:
## * Handle transformation of light position
## * Handle textures
## * Better lightsource converter (esp. for spotlights)
## * Native support for POV primitives (Spheres/Cones/Cylinder)
## * Better camera support
## * Search graph for lights or cameras BEFORE processing. Add
## if missing.
## * Better material handeling
## * Make it into a library?
##
from pivy.coin import *
from pivy.sogui import *
##############################################################
lightfound = False
##############################################################
def printMaterial(action, idx=0):
(ambient, diffuse, specular, emissive, shininess, transparency) = action.getMaterial(idx)
print " texture { // Material"
if transparency != 0:
print " pigment { color rgbt <%f, %f, %f, %f> }" % (diffuse[0], diffuse[1], diffuse[2], transparency)
else:
print " pigment { color rgb <%f, %f, %f> }" % (diffuse[0], diffuse[1], diffuse[2])
ambientfactor = SbVec3f(ambient[0], ambient[1], ambient[2]).length() / 3.0
diffusefactor = SbVec3f(specular[0], specular[1], specular[2]).length() / 3.0
print " finish { diffuse 0.8 ambient %f specular %f reflection %f }" % (ambientfactor, diffusefactor, shininess)
print " }"
##############################################################
def cameraCallback(userdata, action, camera):
print "\ncamera {"
print " perspective"
print " up <0, 1, 0>"
## FIXME: This is not really needed (?) (20050819 handegar)
##print " right <%f, 0, 0>" % camera.aspectRatio.getValue()
print " direction <0, 0, -1>"
campos = camera.position.getValue()
print " location <%f, %f, %f>" % (campos[0], campos[1], campos[2])
print " angle %f" % (camera.heightAngle.getValue() * (360.0 / (2*M_PI)))
lookat = camera.getViewVolume().getSightPoint(10)
print " look_at <%f, %f, %f>" % (lookat[0], lookat[1], lookat[2])
print "}\n"
def lightCallback(userdata, action, light):
global lightfound
lightfound = True
print "\nlight_source {"
position = SbVec3f()
if light.isOfType(SoDirectionalLight.getClassTypeId()) :
position = light.direction.getValue() ## Not exactly correct
else :
position = light.location.getValue()
print " <%f, %f, %f>" % (position[0], position[1], position[2])
color = light.color.getValue()
print " rgb <%f, %f, %f>" % (color[0], color[1], color[2])
if light.isOfType(SoDirectionalLight.getClassTypeId()) :
print " parallel"
print " point_at <0, 0, 0>" ## I'm not sure if this is correct (but it looks OK)
if light.isOfType(SoSpotLight.getClassTypeId()):
target = position + light.direction.getValue()
print " spotlight"
print " radius %f" % (2*(light.cutOffAngle.getValue() * 360) / (2*M_PI))
print " point_at <%f, %f, %f>" % (target[0], target[1], target[2])
print "}\n"
def preShapeCallback(userdata, action, node):
print "\n// Mesh start"
if node.getName().getString() != "":
print "// name: '%s'" % node.getName().getString()
print "mesh {"
return SoCallbackAction.CONTINUE
def postShapeCallback(userdata, action, node):
printMaterial(action)
print "} // Mesh end\n"
return SoCallbackAction.CONTINUE
def triangleCallback(userdata, action, v1, v2, v3):
matrix = action.getModelMatrix()
revmatrix = matrix.inverse().transpose()
p1 = matrix.multVecMatrix(v1.getPoint())
p2 = matrix.multVecMatrix(v2.getPoint())
p3 = matrix.multVecMatrix(v3.getPoint())
n1 = revmatrix.multVecMatrix(v1.getNormal())
n2 = revmatrix.multVecMatrix(v2.getNormal())
n3 = revmatrix.multVecMatrix(v3.getNormal())
## FIXME: There must be a better way to detect if normals
## are invalid than this (20050819 handegar)
if n1[0] < 2.0: ## Substitute for +NaN as the normal is always normalized
print " smooth_triangle {"
print " <%f, %f, %f>,<%f, %f, %f>, <%f, %f, %f>,<%f, %f, %f>, <%f, %f, %f>,<%f, %f, %f>" % \
(p1[0], p1[1], p1[2], n1[0], n1[1], n1[2], p2[0], p2[1], p2[2], n2[0], n2[1], n2[2], p3[0], p3[1], p3[2], n3[0], n3[1], n3[2])
else:
print " triangle {"
print " <%f, %f, %f>, <%f, %f, %f>, <%f, %f, %f>" % (p1[0], p1[1], p1[2], p2[0], p2[1], p2[2], p3[0], p3[1], p3[2])
if (action.getMaterialBinding() == SoMaterialBinding.PER_FACE) or \
(action.getMaterialBinding() == SoMaterialBinding.PER_FACE_INDEXED):
print " texture { T%d }" % (v1.getMaterialIndex())
print " }"
return SoCallbackAction.CONTINUE
def materialCallback(userdata, action, node):
print "// Material declarations"
nr = node.diffuseColor.getNum()
for i in range(0, nr):
d = node.diffuseColor[i]
ambientfactor = SbVec3f(node.ambientColor[i][0], node.ambientColor[i][1], node.ambientColor[i][2]).length() / 3.0
specularfactor = SbVec3f(node.specularColor[i][0], node.specularColor[i][1], node.specularColor[i][2]).length() / 3.0
transparency = node.transparency[i]
shininess = node.shininess[i]
if node.ambientColor.getNum() < nr: ambientfactor = 0.2
if node.specularColor.getNum() < nr: specularfactor = 0
if node.transparency.getNum() < nr: transparency = 0
if node.shininess.getNum() < nr: shininess = 0
print "#declare T%d=" % (i)
print "texture { pigment { color rgbt <%f, %f, %f, %f> }" % (d[0], d[1], d[2], transparency)
print " finish { diffuse 0.8 ambient %f specular %f reflection %f } }" \
% (ambientfactor, specularfactor, shininess)
print "// end"
def convert(root):
print """
/*
iv2pov.py - An Inventor to Persistence of Vision converter
Version 0.01 alpha
Øystein Handegard, <handegar@sim.no>
*/"""
callbackAction = SoCallbackAction()
callbackAction.addPreCallback(SoPerspectiveCamera.getClassTypeId(), cameraCallback, None)
callbackAction.addPreCallback(SoMaterial.getClassTypeId(), materialCallback, None)
callbackAction.addPreCallback(SoLight.getClassTypeId(), lightCallback, None)
callbackAction.addPreCallback(SoShape.getClassTypeId(), preShapeCallback, None)
callbackAction.addPostCallback(SoShape.getClassTypeId(), postShapeCallback, None)
callbackAction.addTriangleCallback(SoShape.getClassTypeId(), triangleCallback, None)
callbackAction.apply(root)
##############################################################
def main():
myWindow = SoGui.init(sys.argv[0])
if myWindow == None: sys.exit(1)
sys.stderr.write("Inventor => POV converter - Version 0.01 alpha\n")
if len(sys.argv) != 2:
sys.stderr.write(" Usage: iv2pov.py [iv file] > out.pov\n")
sys.exit(1)
myInput = SoInput()
if not myInput.openFile(sys.argv[1]):
sys.stderr.write(" Could not open specified file.\n")
sys.exit(1)
root = SoDB.readAll(myInput)
sys.stderr.write("* Select a camera angle and press 'q'\n")
# setup viewer
myViewer = SoGuiExaminerViewer(myWindow)
myViewer.setSceneGraph(root)
myViewer.setTitle("Inventor to POV converter")
myViewer.viewAll()
myViewer.show()
SoGui.show(myWindow)
SoGui.mainLoop()
sys.stderr.write("* Processing...\n")
cam = myViewer.getCamera()
root.insertChild(cam, 0)
convert(root)
sys.stderr.write("* ...finished\n")
# Add a default headlight if no light were processed (or else
# the scene gets completely dark...)
global lightfound
if lightfound != True:
sys.stderr.write("* Scene contained no lights. Added a headlight to the camera.\n")
pos = cam.position.getValue()
print "// Default headlight"
print "light_source { <%f, %f, %f>, rgb <1, 1, 1> }" % (pos[0], pos[1], pos[2])
return 0
if __name__ == "__main__":
sys.exit(main())
|
n-west/gnuradio-volk
|
refs/heads/master
|
gnuradio-runtime/python/gnuradio/gru/mathmisc.py
|
78
|
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
def gcd(a,b):
while b:
a,b = b, a % b
return a
def lcm(a,b):
return a * b / gcd(a, b)
def log2(x):
return math.log(x)/math.log(2)
|
liang42hao/bokeh
|
refs/heads/master
|
bokeh/server/views/deps.py
|
13
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from . import backbone, main, plugins, statics
# this just shuts up pyflakes
backbone, main, plugins, statics
|
clobrano/personfinder
|
refs/heads/master
|
app/pytz/zoneinfo/Etc/GMT_plus_12.py
|
9
|
'''tzinfo timezone information for Etc/GMT_plus_12.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class GMT_plus_12(StaticTzInfo):
'''Etc/GMT_plus_12 timezone definition. See datetime.tzinfo for details'''
zone = 'Etc/GMT_plus_12'
_utcoffset = timedelta(seconds=-43200)
_tzname = 'GMT+12'
GMT_plus_12 = GMT_plus_12()
|
QingChenmsft/azure-cli
|
refs/heads/master
|
src/command_modules/azure-cli-extension/azure/cli/command_modules/extension/_help.py
|
3
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['extension'] = """
type: group
short-summary: Manage and update CLI extensions.
"""
helps['extension add'] = """
type: command
short-summary: Add an extension.
"""
helps['extension list'] = """
type: command
short-summary: List the installed extensions.
"""
helps['extension list-available'] = """
type: command
short-summary: List publicly available extensions.
"""
helps['extension show'] = """
type: command
short-summary: Show an extension.
"""
helps['extension remove'] = """
type: command
short-summary: Remove an extension.
"""
|
steritecit/pythonTag
|
refs/heads/master
|
pycomm/__init__.py
|
8
|
__author__ = 'agostino'
|
grow/grow
|
refs/heads/dependabot/npm_and_yarn/grow/ui/postcss-7.0.36
|
grow/common/structures.py
|
1
|
"""Custom structures for Grow."""
from bisect import bisect_left
from bisect import bisect_right
class AttributeDict(dict):
"""Allows using a dictionary to reference keys as attributes."""
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
raise AttributeError('Object does not have attribute: {}'.format(attr))
__setattr__ = dict.__setitem__
class DeepReferenceDict(dict):
"""Deep reference dictionary using a delimited key."""
def __getitem__(self, key):
"""Handle the ability to do a delimited key."""
try:
return super(DeepReferenceDict, self).__getitem__(key)
except KeyError:
data = None
for sub_key in key.split('.'):
if data is None:
data = self.get(sub_key)
continue
if sub_key in data:
data = data[sub_key]
else:
raise
return data
class SafeDict(dict):
"""Keeps the unmatched format params in place."""
def __missing__(self, key):
return '{' + key + '}'
class SortedCollection(object):
"""Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
"""
def __init__(self, iterable=(), key=None, default=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
self._default = default
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
k = k if k is not None else self._default
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
k = k if k is not None else self._default
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
k = k if k is not None else self._default
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
k = k if k is not None else self._default
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
k = k if k is not None else self._default
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, k):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k):
'Return last item with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, k):
'Return first item with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
|
rjschof/gem5
|
refs/heads/master
|
ext/ply/test/yacc_badtok.py
|
174
|
# -----------------------------------------------------------------------------
# yacc_badtok.py
#
# A grammar, but tokens is a bad datatype
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
tokens = "Hello"
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
bdeepak77/Network-project
|
refs/heads/master
|
Network project/Network/download.py
|
2
|
#!/usr/bin/python
#status 5 "reset passwaord"
# 0 "registered" and 1 "send mail" 2 "mail send"
import mysql.connector
from mysql.connector import errorcode
import threading
import os
import subprocess
import time
config = {
'user': 'root',
'password': '',
'host': '127.0.0.1',
'database': 'network',
'raise_on_warnings': True,
}
try:
cnx = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exists")
else:
print(err)
cursor = cnx.cursor()
def call_download(url, filename,size,email):
os.system("IDMan.lnk /n /p C:\wamp\www\Network\data /d %s"%(url))
print "IN download"
while(1):
print "checking for the file"
time.sleep(5)
if os.path.exists("C:\wamp\www\Network\data\%s"%(filename)):
break
#size /= 1024;
#size /= 1024;
cnx.commit()
readsql = "SELECT * FROM user WHERE email ='%s' "%(email)
cursor.execute(readsql)
toread = cursor.fetchone()
cnx.commit()
usage = toread[3];
if(usage>0):
usage = usage - size;
updatesql = "UPDATE `user` SET `usage`='%s' WHERE `email` = '%s' "%((usage),(email))
print(updatesql)
cursor.execute(updatesql)
cnx.commit()
updatesql = "UPDATE `downloads` SET `status`=9,`priority`=201 WHERE `url` = '%s' "%(url)
print(updatesql)
cursor.execute(updatesql)
cnx.commit()
#threading.exit()
# prepare a cursor object using cursor() method
while 1:
while 1 :
# Prepare SQL query to INSERT a record into the database.
readsql = "SELECT * FROM status WHERE kindof = 0 "
cursor.execute(readsql)
toread = cursor.fetchone()
cnx.commit()
if toread[1] == 0:
time.sleep(5)
print (toread[1])
print " Woke up"
continue;
#sleep for some time
else :
if toread[1] == -1:
time.sleep(10)
print "Again Woke up"
continue
else:
break
sql = "SELECT * FROM downloads WHERE priority < 201"
print '-- Checking for downloads'
try:
#os.system("idman.lnk /n /d http://www.fanpop.com/clubs/one-piece/images/16074133/title/luffy-ace-photo")
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
#a = "http://intranet.iith.ac.in/software/scientific/scilab/scilab-5.2.1.exe"
for row in results:
#downcmd = "IDMan.lnk /n /p C:\wamp\www\Network\data /f {} /d {} /n".format(name, url)
#print ("IDMan.lnk /n /p C:\wamp\www\Network\data /d %s"%(row[1]))
t=threading.Thread(target=call_download, args=(row[1],row[0],row[3],row[4]))
t.start()
print "Download thread started"
# os.system("IDMan.lnk /n /p C:\wamp\www\Network\data /d %s"%(row[1]))
# updatesql = "UPDATE `downloads` SET `status`=1,`priority`=201 WHERE `url` = '%s' "%(row[1])
# print(updatesql)
# cursor.execute(updatesql)
# cnx.commit()
except:
print "Error: unable to fecth data"
while threading.activeCount()>2:
print threading.activeCount()
time.sleep(10)
print "Downloads Completed."
updatesql = "UPDATE `status` SET `value`=0 WHERE `kindof` = 0"
print(updatesql)
cursor.execute(updatesql)
cnx.commit()
# disconnect from server
cnx.close()
|
echanna/EdxNotAFork
|
refs/heads/master
|
common/test/acceptance/pages/lms/find_courses.py
|
96
|
"""
Find courses page (main page of the LMS).
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import BrokenPromise
from . import BASE_URL
class FindCoursesPage(PageObject):
"""
Find courses page (main page of the LMS).
"""
url = BASE_URL
def is_browser_on_page(self):
return "edX" in self.browser.title
@property
def course_id_list(self):
"""
Retrieve the list of available course IDs
on the page.
"""
return self.q(css='article.course').attrs('id')
|
Metaswitch/calico-neutron
|
refs/heads/calico-readme
|
neutron/api/v2/resource.py
|
8
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
import sys
import netaddr
from oslo import i18n
import six
import webob.dec
import webob.exc
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import policy as common_policy
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Request(wsgi.Request):
pass
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
language = request.best_match_language()
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer.deserialize(request.body)['body']
method = getattr(controller, action)
result = method(request=request, **args)
except (exceptions.NeutronException,
netaddr.AddrFormatError,
common_policy.PolicyNotAuthorized) as e:
for fault in faults:
if isinstance(e, fault):
mapped_exc = faults[fault]
break
else:
mapped_exc = webob.exc.HTTPInternalServerError
if 400 <= mapped_exc.code < 500:
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': e})
else:
LOG.exception(_LE('%s failed'), action)
e = translate(e, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise mapped_exc(**kwargs)
except webob.exc.HTTPException as e:
type_, value, tb = sys.exc_info()
LOG.exception(_LE('%s failed'), action)
translate(e, language)
value.body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
value.content_type = content_type
six.reraise(type_, value, tb)
except NotImplementedError as e:
e = translate(e, language)
# NOTE(armando-migliaccio): from a client standpoint
# it makes sense to receive these errors, because
# extensions may or may not be implemented by
# the underlying plugin. So if something goes south,
# because a plugin does not implement a feature,
# returning 500 is definitely confusing.
body = serializer.serialize(
{'NotImplementedError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPNotImplemented(**kwargs)
except Exception:
# NOTE(jkoelker) Everything else is 500
LOG.exception(_LE('%s failed'), action)
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
msg = translate(msg, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(
webob.exc.HTTPInternalServerError(msg))})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPInternalServerError(**kwargs)
status = action_status.get(action, 200)
body = serializer.serialize(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
def get_exception_data(e):
"""Extract the information about an exception.
Neutron client for the v2 API expects exceptions to have 'type', 'message'
and 'detail' attributes.This information is extracted and converted into a
dictionary.
:param e: the exception to be reraised
:returns: a structured dict with the exception data
"""
err_data = {'type': e.__class__.__name__,
'message': e, 'detail': ''}
return err_data
def translate(translatable, locale):
"""Translates the object to the given locale.
If the object is an exception its translatable elements are translated
in place, if the object is a translatable string it is translated and
returned. Otherwise, the object is returned as-is.
:param translatable: the object to be translated
:param locale: the locale to translate to
:returns: the translated object, or the object as-is if it
was not translated
"""
localize = i18n.translate
if isinstance(translatable, exceptions.NeutronException):
translatable.msg = localize(translatable.msg, locale)
elif isinstance(translatable, webob.exc.HTTPError):
translatable.detail = localize(translatable.detail, locale)
elif isinstance(translatable, Exception):
translatable.message = localize(translatable.message, locale)
else:
return localize(translatable, locale)
return translatable
|
rlr/kitsune
|
refs/heads/master
|
kitsune/motidings/urls.py
|
18
|
from django.conf.urls import patterns, url
# Note: This overrides the tidings tidings.unsubscribe url pattern, so
# we need to keep the name exactly as it is.
urlpatterns = patterns(
'kitsune.motidings.views',
url(r'^unsubscribe/(?P<watch_id>\d+)$',
'unsubscribe',
name='tidings.unsubscribe')
)
|
edmundoa/CheckApp
|
refs/heads/develop
|
checkapp/profiles/helpers/data_checker.py
|
1
|
# coding=utf8
#
# Copyright (C) 2011 Edmundo Álvarez Jiménez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Edmundo Álvarez Jiménez <e.alvarezj@gmail.com>
import re
from checkapp.profiles.models import Application, Category, CheckApp, \
Comment, Platform, Profile
class DataError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def __unicode__(self):
return repr(self.msg)
class DataChecker:
TEXT_RE = re.compile(r'^[\w\d\-\.]+$')
EMAIL_RE = re.compile(r'^[\w\-\.]+@[\w\-]+\.[\w\.]{2,}$')
URL_RE = re.compile(r'^http(s)?:\/\/[\w\d](.)*$')
@staticmethod
def defined(string):
return (string is not None) and (string != "")
@staticmethod
def user_exists(uname):
user = Profile.objects.filter(username=uname)
if len(user) > 0:
raise DataError('User already exists')
@staticmethod
def check_username(uname):
if not DataChecker.defined(uname):
raise DataError("Username cannot be empty")
if len(uname) < 3:
raise DataError('Username must have at least 3 characters')
if not DataChecker.TEXT_RE.match(uname):
raise DataError("Invalid username format")
@staticmethod
def check_first_name(fname):
if not DataChecker.defined(fname):
raise DataError("First name cannot be empty")
@staticmethod
def check_last_name(lname):
if not DataChecker.defined(lname):
raise DataError("Last name cannot be empty")
@staticmethod
def check_email(email):
if not DataChecker.defined(email):
raise DataError("E-Mail cannot be empty")
if not DataChecker.EMAIL_RE.match(email):
raise DataError("Invalid E-Mail format")
@staticmethod
def check_password(password, confirmation):
if not DataChecker.defined(password):
raise DataError("Password cannot be empty")
if len(password) < 6:
raise DataError("Password has to have at least 6 characters")
if (password != confirmation):
raise DataError("Passwords don't match")
@staticmethod
def check_short_name(sname):
if not DataChecker.defined(sname):
raise DataError("Short name cannot be empty")
if len(sname) < 3:
raise DataError('Short name must have at least 3 characters')
if not DataChecker.TEXT_RE.match(sname):
raise DataError("Invalid short name format")
@staticmethod
def check_name(name):
if not DataChecker.defined(name):
raise DataError("Name cannot be empty")
@staticmethod
def check_category(category):
try:
Category.objects.get(name=category)
except:
raise DataError("Category '%s' doesn't exist" % category)
@staticmethod
def check_platform(platform):
try:
Platform.objects.get(name=platform)
except:
raise DataError("Platform '%s' doesn't exist" % platform)
@staticmethod
def check_url(url):
if not DataChecker.defined(url):
return
if not DataChecker.URL_RE.match(url):
raise DataError("%s is not a valid URL" % url)
@staticmethod
def check_comment(comment):
if not DataChecker.defined(comment):
raise DataError("Comment cannot be empty")
if len(comment) > Comment.COMMENT_LENGTH:
raise DataError("Comment is too long (max. %s characters)" %\
(Comment.COMMENT_LENGTH))
@staticmethod
def check_ca_comment(ca_comment):
if len(ca_comment) > CheckApp.COMMENT_LENGTH:
raise DataError("Comment is too long (max. %s characters)" %\
(CheckApp.COMMENT_LENGTH))
@staticmethod
def check_description(description):
if not DataChecker.defined(description):
raise DataError("Description cannot be empty")
if len(description) > Application.DESC_LENGTH:
raise DataError("Description is too long (max. %s characters)" %\
(Application.DESC_LENGTH))
|
mtagle/airflow
|
refs/heads/master
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
tmp_file_handle.write(
json.dumps(row_dict, sort_keys=True, ensure_ascii=False).encode("utf-8")
)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
|
clobrano/personfinder
|
refs/heads/master
|
app/pytz/zoneinfo/Europe/Andorra.py
|
9
|
'''tzinfo timezone information for Europe/Andorra.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Andorra(DstTzInfo):
'''Europe/Andorra timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Andorra'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1946,9,30,0,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(0,0,'WET'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Andorra = Andorra()
|
AlperSaltabas/OR_Tools_Google_API
|
refs/heads/master
|
examples/python/data/nonogram_regular/nonogram_soccer_player.py
|
74
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Nonogram problem from Wikipedia, soccer player
# http://en.wikipedia.org/wiki/Nonogram
# Also see http://en.wikipedia.org/wiki/Image:Paint_by_numbers_Animation.gif
#
rows = 20
row_rule_len = 5
row_rules = [
[0,0,0,0,3],
[0,0,0,0,5],
[0,0,0,3,1],
[0,0,0,2,1],
[0,0,3,3,4],
[0,0,2,2,7],
[0,0,6,1,1],
[0,0,4,2,2],
[0,0,0,1,1],
[0,0,0,3,1],
[0,0,0,0,6],
[0,0,0,2,7],
[0,0,6,3,1],
[1,2,2,1,1],
[0,4,1,1,3],
[0,0,4,2,2],
[0,0,3,3,1],
[0,0,0,3,3],
[0,0,0,0,3],
[0,0,0,2,1]
]
cols = 20
col_rule_len = 5
col_rules = [
[0,0,0,0,2],
[0,0,0,1,2],
[0,0,0,2,3],
[0,0,0,2,3],
[0,0,3,1,1],
[0,0,2,1,1],
[1,1,1,2,2],
[1,1,3,1,3],
[0,0,2,6,4],
[0,3,3,9,1],
[0,0,5,3,2],
[0,3,1,2,2],
[0,0,2,1,7],
[0,0,3,3,2],
[0,0,0,2,4],
[0,0,2,1,2],
[0,0,2,2,1],
[0,0,0,2,2],
[0,0,0,0,1],
[0,0,0,0,1]
]
|
WillGuan105/django
|
refs/heads/master
|
tests/generic_relations/models.py
|
188
|
"""
Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__model"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class AbstractComparison(models.Model):
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
first_obj = GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
@python_2_unicode_compatible
class Comparison(AbstractComparison):
"""
A model that tests having multiple GenericForeignKeys. One is defined
through an inherited abstract model and one defined directly on this class.
"""
content_type2 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = GenericRelation(TaggedItem, related_query_name='animal')
comparisons = GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_queryset(self):
return super(GeckoManager, self).get_queryset().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
class Rock(Mineral):
tags = GenericRelation(TaggedItem)
class ManualPK(models.Model):
id = models.IntegerField(primary_key=True)
tags = GenericRelation(TaggedItem, related_query_name='manualpk')
class ForProxyModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
class ForConcreteModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey()
class ConcreteRelatedModel(models.Model):
bases = GenericRelation(ForProxyModelModel, for_concrete_model=False)
class ProxyRelatedModel(ConcreteRelatedModel):
class Meta:
proxy = True
# To test fix for #7551
class AllowsNullGFK(models.Model):
content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
|
bleib1dj/boto
|
refs/heads/develop
|
boto/support/__init__.py
|
145
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon Support service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.support.layer1 import SupportConnection
return get_regions('support', connection_cls=SupportConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
reid-vollett/Software_Quality_Project
|
refs/heads/master
|
SpaceGame.py
|
1
|
# SpaceGame.py
# Software Quality Project - Refactoring / Unit Tests
# Game Author:
# Name: Isaiah Smith
# Github: https://github.com/Technostalgic/Asteroids-Too
import os
import sys
import time
import pygame
from Alien import alien
from Asteroid import asteroid
from Basher import basher
from Camera import camera
from Enemy import enemy
from EnemyBullet import enemyBullet
from Item import item
from MotherCow import motherCow
from Player import player
from Poly import poly
from Projectile import projectile
from gameFunctions import *
import GlobalVariables
# used for compiling with pyinstaller
fpath = '.'
if getattr(sys, 'frozen', False):
fpath = os.path.abspath(os.curdir)
os.chdir(sys._MEIPASS)
def handleInput():
'''handles receiving input from the keyboard'''
# pump() must be called before you attempt to get the keyboard state
# this is due to the get_pressed function being called after the SDL_GetKeyState() in the
# gamepy engine, which pump() resets
pygame.event.pump()
# stops the function if the game does not have focus
if (not pygame.key.get_focused()):
return
#global activecontr
#global lastactivec
itr = 0
for pressed in GlobalVariables.activecontr:
GlobalVariables.lastactivec[itr] = pressed
itr += 1
keyspressed = pygame.key.get_pressed()
GlobalVariables.activecontr[0] = keyspressed[GlobalVariables.controls[0]]
GlobalVariables.activecontr[1] = keyspressed[GlobalVariables.controls[1]]
GlobalVariables.activecontr[2] = keyspressed[GlobalVariables.controls[2]]
GlobalVariables.activecontr[3] = keyspressed[GlobalVariables.controls[3]]
GlobalVariables.activecontr[4] = keyspressed[GlobalVariables.controls[4]]
GlobalVariables.activecontr[5] = keyspressed[GlobalVariables.controls[5]] or keyspressed[pygame.K_RETURN]
handleGlobalControls(keyspressed)
def getTappedKeys():
'''returns they keys that are being pressed on the first frame they are being pressed'''
#global lastkeyspr
if (GlobalVariables.lastkeyspr == None):
# if lastkeyspr is not defined, it sets and returns it to avoid errors
GlobalVariables.lastkeyspr = pygame.key.get_pressed()
return GlobalVariables.lastkeyspr
r = list()
keyspr = pygame.key.get_pressed()
itr = 0
for key in keyspr:
'''compares the new keypress list to the keys that were presed last frame and stores them in the return list if they are new keypresses'''
if (key and not GlobalVariables.lastkeyspr[itr]):
r.append(itr)
itr += 1
GlobalVariables.lastkeyspr = keyspr
return r
def loadHiscore():
'''loads the highscore from the score file into the global hi variable'''
#global hi
file = open(os.path.join(fpath, 'Scores/scores'), 'r')
scs = file.read()
GlobalVariables.hi = int(scs.split('\n')[0].split(':')[1])
def init():
'''initializes the program'''
print("initializing...", end="")
#global screen
global font
global tinyfont
# initializes pygame:
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
pygame.display.set_icon(pygame.image.load(compilePath(os.path.join("Graphics", "icon.png"))))
pygame.display.set_caption("SPACEGAME.EXE")
pygame.mouse.set_visible(False)
GlobalVariables.screen = pygame.display.set_mode(GlobalVariables.size)
font = pygame.font.Font(compilePath("font.ttf"), 32)
tinyfont = pygame.font.Font(compilePath("font.ttf"), 16)
loadSprites()
loadSounds()
loadHiscore()
gotoMode(0)
print("Done!")
def startGame():
'''starts a new round'''
#global maincam
#global p1
#global mode
#global score
#global scoredrop
#global scoredropper
#global enemies
#global stars
#global projectiles
#global items
#global particles
#global enemyspawndelay
#global cowspawndelay
GlobalVariables.enemies = list()
GlobalVariables.stars = list()
GlobalVariables.projectiles = list()
GlobalVariables.items = list()
GlobalVariables.particles = list()
gotoMode(1)
GlobalVariables.score = 0
GlobalVariables.scoredrop = 500
GlobalVariables.enemyspawndelay = 0
GlobalVariables.cowspawndelay = 0
GlobalVariables.scoredropper = None
GlobalVariables.maincam = camera()
GlobalVariables.p1 = player()
# testpow = item((0, 40), 2)
# items.append(testpow)
# testbas = basher((100,100))
# enemies.append(testbas)
# testmtc = motherCow((100,100))
# enemies.append(testmtc)
# fills in the stars
for i in range(200):
GlobalVariables.stars.append(randPoint(500))
def scoreDrops():
#global scoredropper
if (GlobalVariables.scoredropper == None):
return
ppos = multPoint(xyComponent(GlobalVariables.p1.angle), 100)
ppos = addPoints(ppos, GlobalVariables.p1.pos)
if (GlobalVariables.scoredropper <= 0):
GlobalVariables.scoredropper = None
print(ppos)
bonus = item.randItem(ppos)
GlobalVariables.items.append(bonus)
else:
col = (255, 255, 255)
if (GlobalVariables.scoredropper % 8 > 3):
col = (100, 100, 100)
pdraw = poly.circleGon(8, 15)
pdraw.thickness = 3
pdraw.color = col
pdraw.pos = ppos
GlobalVariables.maincam.toDraw(pdraw)
GlobalVariables.scoredropper -= 1
def lose():
'''goes to the endgame screen'''
gotoMode(2)
def loadSprites():
'''loads the item icon sprites'''
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_spread.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_ioncannon.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_rapid.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_missiles.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_overshield.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_deflectorshield.png"))))
GlobalVariables.powersprites.append(pygame.image.load(compilePath(os.path.join("Graphics", "power_quadshooter.png"))))
def loadSounds():
'''loads the sound files'''
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "MenuNavigate.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "MenuSelect.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Shoot_Default.wav"))) # 2) weapon firing
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Shoot_RapidGun.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Shoot_IonCannon.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Shoot_SpreadGun.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Shoot_MissileLauncher.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "PowerUp.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Death_LargeAsteroid.wav"))) # 8) deaths
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Death_SmallAsteroid.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Death_Alien.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Death_MotherCow.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Hit_Default.wav"))) # 12) weapon hits
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Hit_MissileLauncher.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Hit_RapidGun.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Attack_Alien.wav"))) # 15) enemy attacks
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "Attack_MotherCow.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "TakeDamage.wav")))
GlobalVariables.sounds.append(pygame.mixer.Sound(os.path.join("Sounds", "ShieldDeflect.wav")))
def handleGlobalControls(keys):
'''handles the global controls that work anywhere in the gameloop'''
if (keys[pygame.K_ESCAPE]):
pygame.quit()
def handleMenuControls():
'''handles GUI navigation in the menu screens'''
#global selection
if (GlobalVariables.activecontr[5] and (not GlobalVariables.lastactivec[5])):
select(GlobalVariables.selection)
GlobalVariables.sounds[1].play()
GlobalVariables.sounds[1].play()
if (GlobalVariables.activecontr[1] and (not GlobalVariables.lastactivec[1])):
GlobalVariables.selection += 1
GlobalVariables.sounds[0].play()
if (GlobalVariables.activecontr[0] and (not GlobalVariables.lastactivec[0])):
GlobalVariables.selection -= 1
GlobalVariables.sounds[0].play()
if (GlobalVariables.selection < 0):
GlobalVariables.selection = 0
def select(selection):
'''performs an action depending on the currently selected option in the menu'''
if (GlobalVariables.mode == 0):
if (selection == 0):
startGame()
if (selection == 1):
gotoMode(3)
if (selection == 2):
pygame.mixer.stop()
pygame.quit()
raise ()
elif (GlobalVariables.mode == 3):
if (selection == 0):
startGame()
if (selection == 1):
gotoMode(0)
def saveScore(name, points):
'''saves a score under a specified name to the scoreboard'''
scores = loadScoreboard()
newscores = list()
itr = 0
pt = points
for scor in scores:
if (pt >= scor[1]):
newscores.append((name, pt))
pt = 0
newscores.append(scor)
itr += 1
while (True):
try:
newscores.remove(newscores[5])
except:
break
sbfile = open(os.path.join(fpath, 'Scores/scores'), 'w')
for scor in newscores:
sbfile.write(scor[0] + ':' + str(scor[1]) + '\n')
def gotoMode(modenum):
'''goes to a specified mode and performs necessary actions before and after'''
#global mode
#global iteration
#global selection
if (modenum == 0):
loadTitlesprite()
if (GlobalVariables.mode == 0):
disposeTitlesprite()
GlobalVariables.selection = 0
GlobalVariables.mode = modenum
GlobalVariables.iteration = 0
def loadTitlesprite():
'''loads the title screen background image'''
#global titlesprite
GlobalVariables.titlesprite = pygame.image.load(compilePath(os.path.join("Graphics", "title.png")))
def disposeTitlesprite():
'''disposes the title screen background image'''
titlesprite = None
def loop():
'''defines the call order of the game loop'''
#global GlobalVariables.lagcatch
start = time.time() # stores the time at the beginning if the step
# lagcatch dissipates over time
if (GlobalVariables.lagcatch > 0):
GlobalVariables.lagcatch -= 0.01
elif (GlobalVariables.lagcatch < 0):
GlobalVariables.lagcatch = 0
handleInput()
update()
draw()
elapsed = time.time() - start # compares the time at the beginning of the step to now
sltime = GlobalVariables.framerate - elapsed # calculates how much time is left before the next step is called
if (sltime >= 0): # if that time is above zero, it suspends the thread until the next step is to be called
time.sleep(sltime)
else: # if that time is below zero, a lag has occured, this is where the lag is handled
# print("lag" + str(sltime) + "s")
GlobalVariables.lagcatch -= sltime
handleLag()
def update():
'''main game logic is handled here'''
#global iteration
updateMode()
GlobalVariables.iteration += 1
def updateNoCol():
#global iteration
updateModeNoCol()
GlobalVariables.iteration += 1
def updateMode():
'''handles update logic based on the current game mode'''
if (GlobalVariables.mode == 0): # main menu
updateMenu()
elif (GlobalVariables.mode == 1): # gameplay
updateGameplay()
elif (GlobalVariables.mode == 2): # name entry
updateNameEntry()
elif (GlobalVariables.mode == 3): # scoreboard
updateScoreboard()
def updateModeNoCol():
'''handles update logic based on the current game mode'''
if (GlobalVariables.mode == 0): # main menu
updateMenu()
elif (GlobalVariables.mode == 1): # gameplay
updateGameplayNoCol()
elif (GlobalVariables.mode == 2): # name entry
updateNameEntry()
elif (GlobalVariables.mode == 3): # scoreboard
updateScoreboard()
def updateMenu():
'''handles update logic for the main menu'''
#global selection
handleMenuControls()
if (GlobalVariables.selection > 2):
GlobalVariables.selection = 2
def updateGameplay():
'''handles the update logic for in-game'''
for part in GlobalVariables.particles:
part.update() # updates all the particles
for proj in GlobalVariables.projectiles:
proj.update() # updates all the projectiles
for en in GlobalVariables.enemies:
en.update() # updates all the enemies
for power in GlobalVariables.items:
power.update() # updates all the items
GlobalVariables.p1.update() # updates the player
handleCollisions()
scoreDrops()
GlobalVariables.maincam.orient(GlobalVariables.p1.pos, GlobalVariables.p1.angle + math.pi / 2) # orients the camera to follow the player
# removes necessary projectiles. they are not removed as the projectile list is updating because it causes an iteration skip which results in some projectiles not getting updated
for proj in GlobalVariables.projectiles:
proj.removeCheck()
spawnEnemies() # spawns/despawns enemies into the world
spawnStars() # spawns/despawns stars around the player as they move
if (
GlobalVariables.p1.health == None and GlobalVariables.iteration > 60): # acts as a makeshift timer to tell the screen to transition to the end screen after the player has been dead for a second
lose()
def updateGameplayNoCol():
for part in GlobalVariables.particles:
part.update() # updates all the particles
for proj in GlobalVariables.projectiles:
proj.update() # updates all the projectiles
for en in GlobalVariables.enemies:
en.update() # updates all the enemies
for power in GlobalVariables.items:
power.update() # updates all the items
GlobalVariables.p1.update() # updates the player
# handleCollisions()
scoreDrops()
GlobalVariables.maincam.orient(GlobalVariables.p1.pos, GlobalVariables.p1.angle + math.pi / 2) # orients the camera to follow the player
# removes necessary projectiles. they are not removed as the projectile list is updating because it causes an iteration skip which results in some projectiles not getting updated
for proj in GlobalVariables.projectiles:
proj.removeCheck()
spawnEnemies() # spawns/despawns enemies into the world
spawnStars() # spawns/despawns stars around the player as they move
if (GlobalVariables.p1.health == None and GlobalVariables.iteration > 60): # acts as a makeshift timer to tell the screen to transition to the end screen after the player has been dead for a second
lose()
def updateNameEntry():
'''updates the name entry screen'''
#global p1
# turns the player object into a string that the name on the scoreboard will be saved as so we don't have to create a new variable
if (not type(GlobalVariables.p1) is str):
GlobalVariables.scores = loadScoreboard()
if (GlobalVariables.score < GlobalVariables.scores[4][1]):
gotoMode(3)
return
GlobalVariables.p1 = ""
tkeys = getTappedKeys()
# parses the keyboard input into ascii characterss
for k in tkeys:
if (k == pygame.K_BACKSPACE):
GlobalVariables.p1 = GlobalVariables.p1[:len(GlobalVariables.p1) - 1]
if (k == pygame.K_SPACE):
GlobalVariables.p1 += ' '
if (k >= 48 and k <= 57):
num = "0123456789"
GlobalVariables.p1 += num[k - 48]
if (k >= 97 and k <= 122):
alph = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
GlobalVariables.p1 += alph[k - 97]
# limits the name length to 12 characters
if (len(GlobalVariables.p1) > 12):
GlobalVariables.p1 = GlobalVariables.p1[:12]
# if enter is pressed, finish name entry
if (pygame.K_RETURN in tkeys):
saveScore(GlobalVariables.p1, GlobalVariables.score)
gotoMode(3)
def updateScoreboard():
'''updates the scoreboard menu screen'''
#global selection
handleMenuControls()
if (GlobalVariables.selection > 1):
GlobalVariables.selection = 1
def draw():
'''rendering logic handled here'''
if (GlobalVariables.screen == None):
print("Cannot draw to screen because screen is not initialized!")
return
GlobalVariables.screen.fill((0, 0, 0))
drawMode()
pygame.display.flip()
def drawMode():
'''renders certain ways depending on the game mode'''
if (GlobalVariables.mode == 0): # mainmenu
drawMenu()
elif (GlobalVariables.mode == 1): # in game
drawGameplay()
elif (GlobalVariables.mode == 2): # name entry
drawNameEntry()
elif (GlobalVariables.mode == 3): # score board
drawScoreboard()
def drawMenu():
'''draws the main title screen'''
buttonpos = (200, 450)
selcol = (0, 255, 0)
if (GlobalVariables.iteration % 10 >= 5): # selcol flashes btween green and dark green every 5 frames
selcol = (0, 100, 0)
# creates the selectable buttons, the flash if they are selcted
if (GlobalVariables.selection == 0):
startButton = font.render("START GAME", False, selcol)
else:
startButton = font.render("START GAME", False, (255, 255, 255))
if (GlobalVariables.selection == 1):
scoresButton = font.render("SCOREBOARD", False, selcol)
else:
scoresButton = font.render("SCOREBOARD", False, (255, 255, 255))
if (GlobalVariables.selection == 2):
exitButton = font.render("EXIT", False, selcol)
else:
exitButton = font.render("EXIT", False, (255, 255, 255))
selector = font.render(">", False, (255, 255, 255))
# renders the background
GlobalVariables.screen.blit(GlobalVariables.titlesprite, (0, 0))
GlobalVariables.screen.blit(selector, addPoints(buttonpos, (-15, GlobalVariables.selection * 40)))
GlobalVariables.screen.blit(startButton, addPoints(buttonpos, (0, 0)))
GlobalVariables.screen.blit(scoresButton, addPoints(buttonpos, (0, 40)))
GlobalVariables.screen.blit(exitButton, addPoints(buttonpos, (0, 80)))
def drawGameplay():
'''draws the gameplay'''
drawStars()
for part in GlobalVariables.particles:
part.draw(poly) # draws all the particles
for power in GlobalVariables.items:
power.draw() # draws all the items
for proj in GlobalVariables.projectiles:
proj.draw() # draws all the projectiles
for en in GlobalVariables.enemies:
en.draw() # draws all the enemies
GlobalVariables.p1.draw() # renders the player
GlobalVariables.maincam.render() # renders the world objects throught the camera's point of view
drawScore()
##
# col0 = circ(200)
# col0.pos = p1.pos
# col0.thickness = 1
# col1 = circ(300)
# col1.pos = p1.pos
# col1.thickness = 1
# maincam.renderCirc(col0)
# maincam.renderCirc(col1)
##
def drawNameEntry():
'''draws the name entry screen'''
#global p1
if (not type(GlobalVariables.p1) is str):
return
sccol = (255, 255, 0)
ncol = (0, 255, 255)
if (GlobalVariables.iteration % 10 >= 5):
sccol = (255, 150, 0)
ncol = (0, 100, 255)
title1 = font.render("YOU PLACED IN THE ", False, (255, 255, 255))
title2 = font.render("SCOREBOARD!", False, sccol)
title3 = font.render("ENTER YOUR NAME BELOW:", False, (255, 255, 255))
GlobalVariables.screen.blit(title1, (10, 300))
GlobalVariables.screen.blit(title2, (350, 300))
GlobalVariables.screen.blit(title3, (85, 340))
ntex = GlobalVariables.p1
if (len(GlobalVariables.p1) < 12):
ntex += '_'
nametext = font.render(ntex, False, ncol)
GlobalVariables.screen.blit(nametext, (180, 400))
def drawScoreboard():
'''draws the scoreoard screen'''
buttonpos = (200, 450)
scores = loadScoreboard()
col = (255, 150, 0)
selcol = (0, 255, 0)
ycol = (0, 255, 255)
if (GlobalVariables.iteration % 10 >= 5):
col = (255, 255, 0)
selcol = (0, 100, 0)
ycol = (0, 100, 255)
title = font.render("---SCOREBOARD---", False, col)
scoretext = font.render("YOU: " + str(GlobalVariables.score), False, ycol)
if (GlobalVariables.selection == 0):
startButton = font.render("START GAME", False, selcol)
else:
startButton = font.render("START GAME", False, (255, 255, 255))
if (GlobalVariables.selection == 1):
menuButton = font.render("MAIN MENU", False, selcol)
else:
menuButton = font.render("MAIN MENU", False, (255, 255, 255))
selector = font.render(">", False, (255, 255, 255))
itr = 0
for scor in scores:
scoreentry = font.render(str(itr + 1) + '. ' + scor[0], False, (0, 255, 0))
GlobalVariables.screen.blit(scoreentry, (100, 150 + itr * 40))
scoreentry = font.render(str(scor[1]), False, (0, 255, 0))
GlobalVariables.screen.blit(scoreentry, (450, 150 + itr * 40))
itr += 1
GlobalVariables.screen.blit(title, (150, 50))
GlobalVariables.screen.blit(selector, addPoints(buttonpos, (-15, GlobalVariables.selection * 40)))
if (GlobalVariables.score > 0):
GlobalVariables.screen.blit(scoretext, (150, 380))
GlobalVariables.screen.blit(startButton, addPoints(buttonpos, (0, 0)))
GlobalVariables.screen.blit(menuButton, addPoints(buttonpos, (0, 40)))
def drawScore():
'''draws the score and high score in the upper left of the screen'''
pygame.sysfont
text = font.render("SCORE: " + str(int(GlobalVariables.score)), False, (255, 255, 255))
hitext = tinyfont.render("HI: " + str(GlobalVariables.hi), False, (200, 200, 200))
GlobalVariables.screen.blit(text, (4, 4))
GlobalVariables.screen.blit(hitext, (4, 40))
def loadScoreboard():
'''loads the scores from the score file'''
loadHiscore()
r = list()
file = open(os.path.join(fpath, 'Scores/scores'), 'r')
dat = file.read()
spldat = dat.split('\n')
for scor in spldat:
if (scor == ""):
continue
splscor = scor.split(':')
r.append((splscor[0], int(splscor[1])))
return r
def handleLag():
'''handles the lag by removing non essential game assets'''
if (GlobalVariables.mode != 1):
return
catchup()
partsr = 0
asterr = 0
for part in GlobalVariables.particles:
if (randChance(50)):
part.life = 0
partsr += 1
for en in GlobalVariables.enemies:
if (type(en) is asteroid):
if (en.radius <= 10):
if (distance(en.pos, GlobalVariables.p1.pos) > 400):
GlobalVariables.enemies.remove(en)
asterr += 1
# print("cleaned up " + str(partsr) + " particles, " +str(asterr) + " asteroids")
def catchup():
'''updates the game without rendering to save time'''
#global lagcatch
itr = 0
while (GlobalVariables.lagcatch >= GlobalVariables.framerate):
GlobalVariables.lagcatch -= GlobalVariables.framerate
start = time.time()
updateNoCol()
elapsed = time.time() - start
GlobalVariables.lagcatch += elapsed
itr += 1
GlobalVariables.maincam.drawQuery = list()
# print("caught up " + str(itr) + " frames")
def handleCollisions():
handleColLists()
itr = 0
for op in GlobalVariables.colcheck0:
typ = projectile
if (baseIs(op, enemy)):
typ = enemy
opCheckColList(op, GlobalVariables.colcheck0, itr, typ)
if (collision(GlobalVariables.p1, op)):
if (baseIs(op, projectile)):
if (not op.friendly):
GlobalVariables.p1.damage(1)
GlobalVariables.p1.powerEvent(0, op)
op.hit(GlobalVariables.p1)
else:
GlobalVariables.p1.damage(1)
GlobalVariables.p1.powerEvent(0, op)
op.hit(GlobalVariables.p1)
itr += 1
itr = 0
for op in GlobalVariables.colcheck1:
typ = projectile
if (baseIs(op, enemy)):
typ = enemy
opCheckColList(op, GlobalVariables.colcheck1, itr, typ)
itr += 1
itr = 0
for op in GlobalVariables.colcheck2:
typ = projectile
if (baseIs(op, enemy)):
typ = enemy
opCheckColList(op, GlobalVariables.colcheck2, itr, typ)
itr += 1
collectBodies()
def opCheckColList(op, clist, itr, optype):
if (op.cck or op.dead()):
return
op.cck = True
for i in range(itr + 1, len(clist)):
if (clist[i].dead()):
continue
if (baseIs(clist[i], optype)):
if (optype is projectile):
if (clist[i].friendly == op.friendly):
continue
else:
continue
if (collision(op, clist[i])):
op.hit(clist[i])
if (type(clist[i]) is enemyBullet):
clist[i].hit(op)
def collectBodies():
itr = 0
while (itr < len(GlobalVariables.enemies)):
if (GlobalVariables.enemies[itr].dead()):
del GlobalVariables.enemies[itr]
continue
itr += 1
itr = 0
while (itr < len(GlobalVariables.projectiles)):
if (GlobalVariables.projectiles[itr].dead()):
del GlobalVariables.projectiles[itr]
continue
itr += 1
def handleColLists():
#global colcheck0
#global colcheck1
#global colcheck2
GlobalVariables.colcheck0 = list()
GlobalVariables.colcheck1 = list()
GlobalVariables.colcheck2 = list()
for en in GlobalVariables.enemies:
sortToColList(en)
for proj in GlobalVariables.projectiles:
sortToColList(proj)
def sortToColList(obj):
dist = distance(obj.pos, GlobalVariables.p1.pos)
if (dist <= 200):
GlobalVariables.colcheck0.append(obj)
if (dist + obj.radius > 200):
GlobalVariables.colcheck1.append(obj)
elif (dist <= 300):
GlobalVariables.colcheck1.append(obj)
if (dist - obj.radius <= 200):
GlobalVariables.colcheck0.append(obj)
if (dist + obj.radius > 300):
GlobalVariables.colcheck2.append(obj)
else:
GlobalVariables.colcheck2.append(obj)
if (dist - obj.radius <= 300):
GlobalVariables.colcheck1.append(obj)
def spawnStars():
'''spawns/despawns the stars around the player'''
for star in GlobalVariables.stars:
if (distance(subtractPoints(star, GlobalVariables.p1.pos)) > 405):
GlobalVariables.stars.remove(star)
while (len(GlobalVariables.stars) < 200):
GlobalVariables.stars.append(addPoints(multPoint(xyComponent(random.random() * math.pi * 2), 400), GlobalVariables.p1.pos))
def spawnEnemies():
# spawns/despawns the enemies around the player
#global enemyspawndelay
#global cowspawndelay
for en in GlobalVariables.enemies:
if (distance(subtractPoints(GlobalVariables.p1.pos, en.pos)) > 800):
GlobalVariables.enemies.remove(en)
# astsize = iteration / 10000
astsize = 1
aliencount = (GlobalVariables.iteration - 1800) / 1800 + 1
bashercount = (GlobalVariables.iteration - 2000) / 2500 + 1
if (aliencount < 0):
aliencount = 0
if (GlobalVariables.enemyspawndelay <= 0):
GlobalVariables.enemyspawndelay = 300
for i in range(3):
en = asteroid(addPoints(randCirc(500), GlobalVariables.p1.pos), randRange(randRange(randRange(120 * astsize + 10))) + 8)
en.vel = randPoint(3)
GlobalVariables.enemies.append(en)
for i in range(int(aliencount)):
en = alien(addPoints(randCirc(500), GlobalVariables.p1.pos))
GlobalVariables.enemies.append(en)
if (int(bashercount) > 0):
for i in range(random.randrange(int(bashercount))):
bs = basher(addPoints(randCirc(randRange(600, 500)), GlobalVariables.p1.pos))
GlobalVariables.enemies.append(bs)
GlobalVariables.enemyspawndelay -= 1
#testing 3500 -> 350
if (GlobalVariables.iteration > 3500):
GlobalVariables.cowspawndelay -= 1
if (GlobalVariables.cowspawndelay <= 0):
#testing 1200 -> 200
GlobalVariables.cowspawndelay = 1200
cow = motherCow(addPoints(randCirc(500), GlobalVariables.p1.pos))
GlobalVariables.enemies.append(cow)
def compilePath(path):
'''ignore, used for compiling to a standalone exe with pyinstaller, ended up not doing it'''
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, path)
return os.path.join(os.path.abspath('.'), path)
def drawStars():
'''draws the stars'''
itr = 0
for star in GlobalVariables.stars:
pol = poly()
pol.color = (140, 140, 140)
pol.verts = [(0, 0)]
pol.verts.append((0, 1))
pol.pos = star
pol.thickness = 2
verts = GlobalVariables.maincam.renderPoly(pol)
itr += 1
def main():
'''main entry point of the gameloop'''
while (True):
loop()
init()
# raise Exception()
main()
|
craynot/django
|
refs/heads/master
|
tests/test_client/auth_backends.py
|
315
|
from django.contrib.auth.backends import ModelBackend
class TestClientBackend(ModelBackend):
pass
|
jordiclariana/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_users_facts.py
|
15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_users_facts
short_description: Retrieve facts about one or more oVirt users
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt users."
notes:
- "This module creates a new top-level C(ovirt_users) fact, which
contains a list of users."
options:
pattern:
description:
- "Search term which is accepted by oVirt search backend."
- "For example to search user X use following pattern: name=X"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all users which first names start with C(john):
- ovirt_users_facts:
pattern: name=john*
- debug:
var: ovirt_users
'''
RETURN = '''
ovirt_users:
description: "List of dictionaries describing the users. User attribues are mapped to dictionary keys,
all users attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/user."
returned: On success.
type: list
'''
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
users_service = connection.system_service().users_service()
users = users_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_users=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in users
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
|
nitely/django-djconfig
|
refs/heads/master
|
tests/urls.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls)
]
|
disqus/graphite-web
|
refs/heads/master
|
examples/example-client.py
|
73
|
#!/usr/bin/python
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import sys
import time
import os
import platform
import subprocess
from socket import socket
CARBON_SERVER = '127.0.0.1'
CARBON_PORT = 2003
delay = 60
if len(sys.argv) > 1:
delay = int( sys.argv[1] )
def get_loadavg():
# For more details, "man proc" and "man uptime"
if platform.system() == "Linux":
return open('/proc/loadavg').read().strip().split()[:3]
else:
command = "uptime"
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
os.waitpid(process.pid, 0)
output = process.stdout.read().replace(',', ' ').strip().split()
length = len(output)
return output[length - 3:length]
sock = socket()
try:
sock.connect( (CARBON_SERVER,CARBON_PORT) )
except:
print "Couldn't connect to %(server)s on port %(port)d, is carbon-agent.py running?" % { 'server':CARBON_SERVER, 'port':CARBON_PORT }
sys.exit(1)
while True:
now = int( time.time() )
lines = []
#We're gonna report all three loadavg values
loadavg = get_loadavg()
lines.append("system.loadavg_1min %s %d" % (loadavg[0],now))
lines.append("system.loadavg_5min %s %d" % (loadavg[1],now))
lines.append("system.loadavg_15min %s %d" % (loadavg[2],now))
message = '\n'.join(lines) + '\n' #all lines must end in a newline
print "sending message\n"
print '-' * 80
print message
print
sock.sendall(message)
time.sleep(delay)
|
abzaloid/maps
|
refs/heads/master
|
django-project/lib/python2.7/site-packages/pip/_vendor/distlib/version.py
|
426
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
studybuffalo/studybuffalo
|
refs/heads/master
|
study_buffalo/drug_price_calculator/admin.py
|
1
|
from django.contrib import admin
from . import models
class ClientsInlineAdmin(admin.TabularInline):
model = models.Clients
class CoverageCriteriaInlineAdmin(admin.TabularInline):
model = models.CoverageCriteria
extra = 1
class SpecialInlineAdmin(admin.TabularInline):
model = models.Price.special_authorizations.through
extra = 1
@admin.register(models.Drug)
class DrugAdmin(admin.ModelAdmin):
"""Admin for the Drug (and related) models."""
model = models.Drug
list_display = ('din', 'brand_name', 'generic_name', 'strength', 'dosage_form')
ordering = ('generic_name', 'strength', 'dosage_form')
search_fields = ('din', 'brand_name', 'generic_name')
fields = (
'din', 'brand_name', 'strength', 'route', 'dosage_form',
'generic_name', 'manufacturer', 'schedule', 'atc', 'ptc',
'generic_product',
)
@admin.register(models.Price)
class PriceAdmin(admin.ModelAdmin):
"""Admin for the Price (and related) models."""
model = models.Price
list_display = ('drug', 'abc_id', 'unit_price', 'lca_price', 'mac_price')
ordering = ('abc_id',)
search_fields = ('drug__brand_name', 'drug__generic_name', 'abc_id')
fields = (
'drug', 'abc_id', 'date_listed', 'unit_price', 'lca_price', 'mac_price',
'mac_text', 'unit_issue', 'interchangeable', 'coverage_status',
)
inlines = (ClientsInlineAdmin, CoverageCriteriaInlineAdmin, SpecialInlineAdmin)
@admin.register(models.ATC)
class ATCAdmin(admin.ModelAdmin):
"""Admin for the ATC model."""
model = models.ATC
list_display = ('id', 'atc_1', 'atc_2', 'atc_3', 'atc_4', 'atc_5')
ordering = ('id',)
search_fields = (
'atc_1', 'atc_1_text', 'atc_2', 'atc_2_text',
'atc_3', 'atc_3_text', 'atc_4', 'atc_4_text',
'atc_5', 'atc_5_text'
)
fields = (
'id', 'atc_1', 'atc_1_text',
'atc_2', 'atc_2_text', 'atc_3', 'atc_3_text',
'atc_4', 'atc_4_text', 'atc_5', 'atc_5_text'
)
@admin.register(models.PTC)
class PTCAdmin(admin.ModelAdmin):
"""Admin for the PTC model."""
model = models.PTC
list_display = ('id', 'ptc_1', 'ptc_2', 'ptc_3', 'ptc_4')
ordering = ('id',)
search_fields = (
'ptc_1', 'ptc_1_text', 'ptc_2', 'ptc_2_text',
'ptc_3', 'ptc_3_text', 'ptc_4', 'ptc_4_text',
)
fields = (
'id', 'ptc_1', 'ptc_1_text', 'ptc_2', 'ptc_2_text',
'ptc_3', 'ptc_3_text', 'ptc_4', 'ptc_4_text',
)
@admin.register(models.SpecialAuthorization)
class SpecialAuthorizationAdmin(admin.ModelAdmin):
"""Admin for the PTC model."""
model = models.SpecialAuthorization
list_display = ('file_name', 'pdf_title')
ordering = ('pdf_title',)
fields = ('file_name', 'pdf_title')
@admin.register(models.SubsBSRF)
class SubsBSRFAdmin(admin.ModelAdmin):
"""Admin for the BSRF substitution model."""
model = models.SubsBSRF
list_display = ('original', 'brand_name', 'strength', 'route', 'dosage_form')
ordering = ('brand_name', 'route', 'dosage_form', 'strength')
fields = ('original', 'brand_name', 'strength', 'route', 'dosage_form')
@admin.register(models.SubsGeneric)
class SubsGenericAdmin(admin.ModelAdmin):
"""Admin for the generic substitution model."""
model = models.SubsGeneric
list_display = ('original', 'correction')
ordering = ('original', 'correction')
fields = ('original', 'correction')
@admin.register(models.SubsManufacturer)
class SubsManufacturerAdmin(admin.ModelAdmin):
"""Admin for the manufacturer substitution model."""
model = models.SubsManufacturer
list_display = ('original', 'correction')
ordering = ('original', 'correction')
fields = ('original', 'correction')
@admin.register(models.SubsUnit)
class SubsUnitAdmin(admin.ModelAdmin):
"""Admin for the unit substitution model."""
model = models.SubsUnit
list_display = ('original', 'correction')
ordering = ('original', 'correction')
fields = ('original', 'correction')
@admin.register(models.PendBSRF)
class PendBSRFAdmin(admin.ModelAdmin):
"""Admin for the pending BSRF substitution model."""
model = models.PendBSRF
list_display = ('original', 'brand_name', 'strength', 'route', 'dosage_form')
ordering = ('brand_name', 'route', 'dosage_form', 'strength')
fields = ('original', 'brand_name', 'strength', 'route', 'dosage_form')
@admin.register(models.PendGeneric)
class PendGenericAdmin(admin.ModelAdmin):
"""Admin for the pending generic substitution model."""
model = models.PendGeneric
list_display = ('original', 'correction')
ordering = ('original', 'correction')
fields = ('original', 'correction')
@admin.register(models.PendManufacturer)
class PendManufacturerAdmin(admin.ModelAdmin):
"""Admin for the pending manufacturer substitution model."""
model = models.PendManufacturer
list_display = ('original', 'correction')
ordering = ('original', 'correction')
fields = ('original', 'correction')
|
bonitadecker77/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/Crypto/PublicKey/RSA.py
|
45
|
#
# RSA.py : RSA encryption/decryption
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util import number
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate an RSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=RSAobj()
# Generate the prime factors of n
if progress_func:
progress_func('p,q\n')
p = q = 1
while number.size(p*q) < bits:
p = pubkey.getPrime(bits/2, randfunc)
q = pubkey.getPrime(bits/2, randfunc)
# p shall be smaller than q (for calc of u)
if p > q:
(p, q)=(q, p)
obj.p = p
obj.q = q
if progress_func:
progress_func('u\n')
obj.u = pubkey.inverse(obj.p, obj.q)
obj.n = obj.p*obj.q
obj.e = 65537
if progress_func:
progress_func('d\n')
obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1))
assert bits <= 1+obj.size(), "Generated key is too small"
return obj
def construct(tuple):
"""construct(tuple:(long,) : RSAobj
Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers.
"""
obj=RSAobj()
if len(tuple) not in [2,3,5,6]:
raise error('argument for construct() wrong length')
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
if len(tuple) >= 5:
# Ensure p is smaller than q
if obj.p>obj.q:
(obj.p, obj.q)=(obj.q, obj.p)
if len(tuple) == 5:
# u not supplied, so we're going to have to compute it.
obj.u=pubkey.inverse(obj.p, obj.q)
return obj
class RSAobj(pubkey.pubkey):
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
def _encrypt(self, plaintext, K=''):
if self.n<=plaintext:
raise error('Plaintext too large')
return (pow(plaintext, self.e, self.n),)
def _decrypt(self, ciphertext):
if (not hasattr(self, 'd')):
raise error('Private key not available in this object')
if self.n<=ciphertext[0]:
raise error('Ciphertext too large')
return pow(ciphertext[0], self.d, self.n)
def _sign(self, M, K=''):
return (self._decrypt((M,)),)
def _verify(self, M, sig):
m2=self._encrypt(sig[0])
if m2[0]==M:
return 1
else: return 0
def _blind(self, M, B):
tmp = pow(B, self.e, self.n)
return (M * tmp) % self.n
def _unblind(self, M, B):
tmp = pubkey.inverse(B, self.n)
return (M * tmp) % self.n
def can_blind (self):
"""can_blind() : bool
Return a Boolean value recording whether this algorithm can
blind data. (This does not imply that this
particular key object has the private information required to
to blind a message.)
"""
return 1
def size(self):
"""size() : int
Return the maximum number of bits that can be handled by this key.
"""
return number.size(self.n) - 1
def has_private(self):
"""has_private() : bool
Return a Boolean denoting whether the object contains
private components.
"""
if hasattr(self, 'd'):
return 1
else: return 0
def publickey(self):
"""publickey(): RSAobj
Return a new key object containing only the public key information.
"""
return construct((self.n, self.e))
class RSAobj_c(pubkey.pubkey):
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if attr in self.__dict__:
self.__dict__[attr]
else:
raise AttributeError('%s instance has no attribute %s' % (self.__class__, attr))
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
n,e = state['n'], state['e']
if 'd' not in state:
self.key = _fastmath.rsa_construct(n,e)
else:
d = state['d']
if 'q' not in state:
self.key = _fastmath.rsa_construct(n,e,d)
else:
p, q, u = state['p'], state['q'], state['u']
self.key = _fastmath.rsa_construct(n,e,d,p,q,u)
def _encrypt(self, plain, K):
return (self.key._encrypt(plain),)
def _decrypt(self, cipher):
return self.key._decrypt(cipher[0])
def _sign(self, M, K):
return (self.key._sign(M),)
def _verify(self, M, sig):
return self.key._verify(M, sig[0])
def _blind(self, M, B):
return self.key._blind(M, B)
def _unblind(self, M, B):
return self.key._unblind(M, B)
def can_blind (self):
return 1
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.n, self.key.e))
def generate_c(bits, randfunc, progress_func = None):
# Generate the prime factors of n
if progress_func:
progress_func('p,q\n')
p = q = 1
while number.size(p*q) < bits:
p = pubkey.getPrime(bits/2, randfunc)
q = pubkey.getPrime(bits/2, randfunc)
# p shall be smaller than q (for calc of u)
if p > q:
(p, q)=(q, p)
if progress_func:
progress_func('u\n')
u=pubkey.inverse(p, q)
n=p*q
e = 65537
if progress_func:
progress_func('d\n')
d=pubkey.inverse(e, (p-1)*(q-1))
key = _fastmath.rsa_construct(n,e,d,p,q,u)
obj = RSAobj_c(key)
## print p
## print q
## print number.size(p), number.size(q), number.size(q*p),
## print obj.size(), bits
assert bits <= 1+obj.size(), "Generated key is too small"
return obj
def construct_c(tuple):
key = _fastmath.rsa_construct(*tuple)
return RSAobj_c(key)
object = RSAobj
generate_py = generate
construct_py = construct
if _fastmath:
#print "using C version of RSA"
generate = generate_c
construct = construct_c
error = _fastmath.error
|
XBMC-Addons/plugin.library.node.editor
|
refs/heads/master
|
resources/lib/pluginBrowser.py
|
1
|
# coding=utf-8
import sys
import xbmc, xbmcaddon, xbmcgui
import json
from resources.lib.common import *
def getPluginPath( ltype, location = None ):
listings = []
listingsLabels = []
if location is not None:
# If location given, add 'create' item
listings.append( "::CREATE::" )
listingsLabels.append( LANGUAGE( 30411 ) )
else:
# If no location, build default
if location is None:
if ltype.startswith( "video" ):
location = "addons://sources/video"
else:
location = "addons://sources/audio"
# Show a waiting dialog, then get the listings for the directory
dialog = xbmcgui.DialogProgress()
dialog.create( ADDONNAME, LANGUAGE( 30410 ) )
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Files.GetDirectory", "params": { "properties": ["title", "file", "thumbnail", "episode", "showtitle", "season", "album", "artist", "imdbnumber", "firstaired", "mpaa", "trailer", "studio", "art"], "directory": "' + location + '", "media": "files" } }')
json_response = json.loads(json_query)
# Add all directories returned by the json query
if json_response.get('result') and json_response['result'].get('files') and json_response['result']['files']:
json_result = json_response['result']['files']
for item in json_result:
if item[ "file" ].startswith( "plugin://" ):
listings.append( item[ "file" ] )
listingsLabels.append( "%s >" %( item[ "label" ] ) )
# Close progress dialog
dialog.close()
selectedItem = xbmcgui.Dialog().select( LANGUAGE( 30309 ), listingsLabels )
if selectedItem == -1:
# User cancelled
return None
selectedAction = listings[ selectedItem ]
if selectedAction == "::CREATE::":
return location
else:
# User has chosen a sub-level to display, add details and re-call this function
return getPluginPath(ltype, selectedAction)
|
dade22/HelloPython
|
refs/heads/master
|
samples/classes/ex_func_decorators.py
|
1
|
""" Decorators """
# In this example `beg` wraps `say`. If say_please is True then it
# will change the returned message.
from functools import wraps
def beg(target_function):
@wraps(target_function)
def wrapper(*args, **kwargs):
msg, say_please = target_function(*args, **kwargs)
if say_please:
return "{} {}".format(msg, "Please! I am poor :(")
return msg
return wrapper
@beg
def say(say_please=False):
msg = "Can you buy me a beer?"
return msg, say_please
print(say()) # Can you buy me a beer?
print(say(say_please=True)) # Can you buy me a beer? Please! I am poor :(
|
sbunatyan/tavrida
|
refs/heads/master
|
tavrida/service.py
|
1
|
#!/usr/bin/env python
# Copyright (c) 2015 Sergey Bunatyan <sergey.bunatyan@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import copy
import logging
import controller
import dispatcher
import exceptions
import messages
class ServiceController(controller.AbstractController):
"""
Base service controller. All service controllers should be inherited from
this class.
This class is responsible for final message processing: calls incoming
middlewares, calls handler method, after handling calls outgoing
middlewares and finally sends result to postprocessor.
"""
__metaclass__ = abc.ABCMeta
_dispatcher = None
_discovery = None
def __init__(self, postprocessor):
super(ServiceController, self).__init__()
self.postprocessor = postprocessor
self._incoming_middlewares = []
self._outgoing_middlewares = []
self.log = logging.getLogger(__name__)
@classmethod
def get_discovery(cls):
if not cls._discovery:
raise ValueError("Discovery should be defined for service")
return cls._discovery
@classmethod
def set_discovery(cls, discovery):
cls._discovery = discovery
@classmethod
def get_dispatcher(cls):
if not cls._dispatcher:
cls._dispatcher = dispatcher.Dispatcher()
return cls._dispatcher
def add_incoming_middleware(self, middleware):
"""
Append middleware controller
:param middleware: middleware object
:type middleware: middleware.Middleware
"""
self._incoming_middlewares.append(middleware)
def add_outgoing_middleware(self, middleware):
"""
Append middleware controller
:param middleware: middleware object
:type middleware: middleware.Middleware
"""
self._outgoing_middlewares.append(middleware)
def send_heartbeat(self):
self.postprocessor.driver.send_heartbeat_via_reader()
def _run_outgoing_middlewares(self, result):
for mld in self._outgoing_middlewares:
result = mld.process(result)
if not (isinstance(result, messages.Outgoing) and
isinstance(result, messages.Message)):
raise exceptions.IncorrectOutgoingMessage(message=result)
return result
def _send(self, message):
return self.postprocessor.process(message)
def _filter_redundant_parameters(self, method_name, incoming_kwargs):
arg_names = getattr(self, method_name)._arg_names[2:]
incoming_params = incoming_kwargs.keys()
if set(arg_names) - set(incoming_params):
raise ValueError("Wrong incoming parameters (%s) for method '%s'"
% (str(incoming_kwargs), method_name))
else:
return dict((k, v) for (k, v) in incoming_kwargs.iteritems()
if k in arg_names)
def _handle_request(self, method, request, proxy):
"""
Calls processor for Request message, gets result and handles exceptions
:param request: incoming request
:type request: IncomingRequestCall or IncomingRequestCast
:return: response or None
:rtype: messages.Response, messages.Error, None
"""
try:
filtered_kwargs = self._filter_redundant_parameters(
method, request.payload)
result = getattr(self, method)(request, proxy, **filtered_kwargs)
if isinstance(request, messages.IncomingRequestCall):
return result
except Exception as e:
if isinstance(request, messages.IncomingRequestCall):
self.log.exception(e)
return messages.Error.create_by_request(request, exception=e)
else:
raise
def _process_request(self, method, request, proxy):
"""
Handles Request message and sends back results of controller
execution if there are any.
:param request: incoming request
:type request: IncomingRequestCall or IncomingRequestCast
"""
result = self._handle_request(method, request, proxy)
if result:
if isinstance(result, (messages.Response, messages.Error)):
result = self._run_outgoing_middlewares(result)
self._send(result)
elif isinstance(result, dict):
message = request.make_response(**result)
message = self._run_outgoing_middlewares(message)
self._send(message)
else:
raise exceptions.WrongResponse(response=str(result))
def _process_notification(self, method, notification, proxy):
"""
Handles incoming notification message
"""
getattr(self, method)(notification, proxy, **notification.payload)
def _process_response(self, method, response, proxy):
"""
Handles incoming response message
"""
getattr(self, method)(response, proxy, **response.payload)
def _process_error(self, method, error, proxy):
"""
Handles incoming error message
"""
getattr(self, method)(error, proxy)
def _route_message_by_type(self, method, message, proxy):
message.update_context(copy.copy(message.payload))
if isinstance(message, messages.IncomingRequest):
return self._process_request(method, message, proxy)
if isinstance(message, messages.IncomingResponse):
return self._process_response(method, message, proxy)
if isinstance(message, messages.IncomingNotification):
return self._process_notification(method, message, proxy)
if isinstance(message, messages.IncomingError):
return self._process_error(method, message, proxy)
def _run_incoming_middlewares(self, message):
continue_processing = True
res = message
for mld in self._incoming_middlewares:
try:
res = mld.process(res)
except Exception as e:
if isinstance(message, messages.IncomingRequestCall):
res = messages.Error.create_by_request(message, e)
if not isinstance(res, (messages.Response, messages.Error,
messages.IncomingRequest)):
raise exceptions.IncorrectMessage(message=res)
if isinstance(res, (messages.Response, messages.Error)):
continue_processing = False
if isinstance(message, messages.IncomingRequestCall):
self._send(res)
break
return continue_processing, res
def process(self, method, message, proxy):
"""
Processes message to corresponding handler.
Before handler call message is transfered to all middlewares.
:param method: handler method name
:type method: string
:param message: incoming message
:type message: messages.Message
:param proxy: proxy to make calls to remote services
:type proxy: proxies.RPCProxy
"""
continue_processing, res = self._run_incoming_middlewares(message)
if continue_processing:
self._route_message_by_type(method, res, proxy)
|
USGSDenverPychron/pychron
|
refs/heads/develop
|
pychron/processing/dataset/tests/__init__.py
|
186
|
__author__ = 'ross'
|
MediaSapiens/autonormix
|
refs/heads/master
|
django/conf/urls/defaults.py
|
320
|
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django.core.exceptions import ImproperlyConfigured
__all__ = ['handler404', 'handler500', 'include', 'patterns', 'url']
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if isinstance(arg, tuple):
# callable returning a namespace hint
if namespace:
raise ImproperlyConfigured('Cannot override the namespace for a dynamic module that provides a namespace')
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list,tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, basestring):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyMethodOverridingInspection/InstanceCheck.py
|
74
|
class MyType1(type):
def __instancecheck__(cls, instance):
return True
class MyType2(type):
def __instancecheck__<warning descr="Signature of method 'MyType2.__instancecheck__()' does not match signature of base method in class 'type'">(cls)</warning>:
return True
class MyType3(type):
def __instancecheck__<warning descr="Signature of method 'MyType3.__instancecheck__()' does not match signature of base method in class 'type'">(cls, foo, bar)</warning>:
return True
|
ypid/series60-remote
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2010 Lukas Hetzenecker <LuHe@gmx.at>
import os
import sys
from distutils.core import setup
VERSION = '0.4.80'
applicationSis_Py14 = "series60-remote-py14.sis"
pythonSis_Py14 = "PythonForS60_1_4_5_3rdEd.sis"
applicationSis_Py20 = "series60-remote-py20.sis"
pythonSis_Py20 = "Python_2.0.0.sis"
textfiles = ['Changelog', 'HACKING', 'INSTALL', 'LICENSE', 'LICENSE.icons-oxygen',
'README.icons-oxygen', 'TODO']
pys60 = 'PythonForS60_1_4_5_3rdEd.sis'
sisfiles = ['mobile/' + applicationSis_Py14, 'mobile/' + applicationSis_Py20, 'mobile/' + pythonSis_Py14, 'mobile/' + pythonSis_Py20]
desktopfile = 'pc/series60-remote.desktop'
extra = {}
src_dir = 'pc'
app_dir = 'series60_remote'
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Before distributing source call sibling setup to generate files.
if "sdist" in sys.argv[1:] and os.name == 'posix':
if os.path.exists('mobile/create_package'):
os.system("cd mobile && ./create_package && cd ..")
#if os.path.exists('pc/generate-pro.sh') and os.path.exists('pc/mkpyqt.py'):
# os.system("cd pc && ./generate-pro.sh && ./mkpyqt.py && cd ..")
if os.path.exists('pc/mkpyqt.py'):
os.system("cd pc && ./mkpyqt.py && cd ..")
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk(src_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
s = fullsplit(dirpath)
if s[0] == src_dir:
s[0] = app_dir
packages.append('.'.join(s))
dist = setup(name='series60-remote',
version=VERSION,
author='Lukas Hetzenecker',
author_email='LuHe@gmx.at',
url='http://series60-remote.sf.net',
description='Series60-Remote is an application to manage your S60 mobile phone.',
long_description="""Series60-Remote is an application for Linux and XP that manages Nokia mobile phones with the S60 operating system. The application provides the following features:
- Message management
- Contact management
- File management
""",
license='GPL2',
packages=packages,
package_dir={app_dir: src_dir},
scripts=['series60-remote']
)
# HACK! Copy extra files
if dist.have_run.get('install'):
install = dist.get_command_obj('install')
# Copy textfiles in site-package directory
for file in textfiles:
install.copy_file(file, os.path.join(install.install_lib, app_dir))
# Copy .sis files on Unix-like systems to /usr/share/series60-remote, on Windows systems
# to PREFIX/site-packages/series60_remote/mobile
if os.name == 'posix':
dest = os.path.join(install.install_data, 'share', install.config_vars['dist_name'])
else:
dest = os.path.join(install.install_lib, app_dir)
install.mkpath(dest + os.sep + 'mobile')
for file in sisfiles:
install.copy_file(file, dest + os.sep + 'mobile')
# Copy export templates too
install.mkpath(dest + os.sep + 'data')
for root, dirs, files in os.walk('pc' + os.sep +'data'):
# Ignore hidden dirs
for dir in dirs:
if dir.startswith("."):
dirs.remove(dir)
datadest = dest + os.sep + os.sep.join(root.split(os.sep)[1:]) # remove pc/ from directory
install.mkpath(datadest) # create target directory
for file in files:
install.copy_file(root + os.sep + file, datadest)
# Install desktop file on Linux
if os.name == 'posix':
dest = os.path.join(install.install_data, 'share', 'applications')
install.mkpath(dest)
install.copy_file(desktopfile, dest)
|
johnlinp/telegram-good-timing-bot
|
refs/heads/master
|
goodtiming/core/request.py
|
1
|
class Request:
def __init__(self, kind, arguments):
self.kind = kind
self.arguments = arguments
|
Edraak/circleci-edx-platform
|
refs/heads/circleci-master
|
lms/djangoapps/certificates/migrations/0005_auto_20151208_0801.py
|
83
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('certificates', '0004_certificategenerationhistory'),
]
operations = [
migrations.AlterField(
model_name='generatedcertificate',
name='verify_uuid',
field=models.CharField(default=b'', max_length=32, db_index=True, blank=True),
),
]
|
ovgu-FINken/paparazzi_pre_merge
|
refs/heads/master
|
sw/ground_segment/python/joystick/arduino_dangerboard.py
|
85
|
#! /usr/bin/env python
import time
import serial # sudo apt-get install python-serial
#open() #open port
#close() #close port immediately
#setBaudrate(baudrate) #change baudrate on an open port
#inWaiting() #return the number of chars in the receive buffer
#read(size=1) #read "size" characters
#write(s) #write the string s to the port
#flushInput() #flush input buffer, discarding all it's contents
#flushOutput() #flush output buffer, abort output
#sendBreak() #send break condition
#setRTS(level=1) #set RTS line to specified logic level
#setDTR(level=1) #set DTR line to specified logic level
#getCTS() #return the state of the CTS line
#getDSR() #return the state of the DSR line
#getRI() #return the state of the RI line
#getCD() #return the state of the CD line
class arduino_dangerboard():
def __init__(self, port='/dev/ttyUSB0'):
self.port = serial.Serial(port, 115200)
self.SLIDER_COUNT = 3
self.sliders = [0] * self.SLIDER_COUNT
self.POT_MIN = 0.0
self.POT_MAX = 1023.0
def HandleEvent(self):
pass
def poll(self):
while( True):
self.port.write('G');
foo = self.port.inWaiting()
if foo == 6:
a = ord( self.port.read())
b = ord( self.port.read())
c = ord( self.port.read())
d = ord( self.port.read())
e = ord( self.port.read())
f = ord( self.port.read())
self.sliders[0] = (a << 8) | b;
self.sliders[1] = (c << 8) | d;
self.sliders[2] = (e << 8) | f;
self.HandleEvent()
else: # flush queue
while foo:
foo -= 1
data = self.port.read()
time.sleep(0.25);
def main():
try:
foo = arduino_dangerboard()
foo.poll()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
atplanet/ansible-modules-extras
|
refs/heads/devel
|
monitoring/datadog_monitor.py
|
13
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# import module snippets
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
notes: []
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'muted', 'unmuted']
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
required: false
default: null
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
required: false
default: null
name:
description: ["The name of the alert."]
required: true
message:
description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
required: false
default: null
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
required: false
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
required: false
default: False
no_data_timeframe:
description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
required: false
default: null
renotify_interval:
description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
required: false
default: null
escalation_message:
description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
required: false
default: null
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
required: false
default: False
thresholds:
description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
'''
EXAMPLES = '''
# Create a metric monitor
datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True),
app_key=dict(required=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
return message.replace('[[', '{{').replace(']]', '}}')
def _get_monitor(module):
for monitor in api.Monitor.get_all():
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
msg = api.Monitor.create(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception, e:
module.fail_json(msg=str(e))
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
msg = api.Monitor.update(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception, e:
module.fail_json(msg=str(e))
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception, e:
module.fail_json(msg=str(e))
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None
and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception, e:
module.fail_json(msg=str(e))
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception, e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
mdworks2016/work_development
|
refs/heads/master
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/chardet/big5freq.py
|
342
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
BIG5_CHAR_TO_FREQ_ORDER = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
)
|
tinfoil/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/committers.py
|
121
|
# Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
def _tokenize_contributor_name(self, contributor):
full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))
|
fldc/CouchPotatoServer
|
refs/heads/custom
|
couchpotato/core/media/_base/providers/torrent/torrentz.py
|
11
|
import re
import traceback
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import six
log = CPLog(__name__)
class Base(TorrentMagnetProvider, RSS):
urls = {
'detail': 'https://torrentz.eu/%s',
'search': 'https://torrentz.eu/feed?q=%s',
'verified_search': 'https://torrentz.eu/feed_verified?q=%s'
}
http_time_between_calls = 0
def _searchOnTitle(self, title, media, quality, results):
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
# Create search parameters
search_params = self.buildUrl(title, media, quality)
smin = quality.get('size_min')
smax = quality.get('size_max')
if smin and smax:
search_params += ' size %sm - %sm' % (smin, smax)
min_seeds = tryInt(self.conf('minimal_seeds'))
if min_seeds:
search_params += ' seed > %s' % (min_seeds - 1)
rss_data = self.getRSSData(search_url % search_params)
if rss_data:
try:
for result in rss_data:
name = self.getTextElement(result, 'title')
detail_url = self.getTextElement(result, 'link')
description = self.getTextElement(result, 'description')
magnet = splitString(detail_url, '/')[-1]
magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce'))
reg = re.search('Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description))
size = reg.group('size')
seeds = reg.group('seeds').replace(',', '')
peers = reg.group('peers').replace(',', '')
results.append({
'id': magnet,
'name': six.text_type(name),
'url': magnet_url,
'detail_url': detail_url,
'size': tryInt(size),
'seeders': tryInt(seeds),
'leechers': tryInt(peers),
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'torrentz',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Torrentz',
'description': 'Torrentz is a free, fast and powerful meta-search engine. <a href="https://torrentz.eu/" target="_blank">Torrentz</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True
},
{
'name': 'verified_only',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Only search verified releases',
},
{
'name': 'minimal_seeds',
'type': 'int',
'default': 1,
'advanced': True,
'description': 'Only return releases with minimal X seeds',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
|
fahhem/mbed-os
|
refs/heads/master
|
tools/test/config_test/test04/test_data.py
|
38
|
# Similar to test1, but this time B2 attempt to define base1_1. Since base1_1
# is already defined in B1 and F derives from both B1 and B2, this results
# in an error. However, when building for B2 instead of F, defining base1_1
# should be OK.
expected_results = {
"f": {
"desc": "attempt to redefine parameter in target inheritance tree",
"exception_msg": "Parameter name 'base1_1' defined in both 'target:b2' and 'target:b1'"
},
"b2": {
"desc": "it should be OK to define parameters with the same name in non-related targets",
"target.base2_1": "v_base2_1_b2",
"target.base2_2": "v_base2_2_b2",
"target.base1_1": "v_base1_1_b2"
}
}
|
Andypsamp/CODfinalJUNIT
|
refs/heads/master
|
test/test_importadded.py
|
25
|
# This file is part of beets.
# Copyright 2015, Stig Inge Lea Bjornsen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Tests for the `importadded` plugin."""
import os
from test._common import unittest
from test.test_importer import ImportHelper, AutotagStub
from beets import importer
from beets import util
from beetsplug.importadded import ImportAddedPlugin
_listeners = ImportAddedPlugin.listeners
def preserve_plugin_listeners():
"""Preserve the initial plugin listeners as they would otherwise be
deleted after the first setup / tear down cycle.
"""
if not ImportAddedPlugin.listeners:
ImportAddedPlugin.listeners = _listeners
def modify_mtimes(paths, offset=-60000):
for i, path in enumerate(paths, start=1):
mstat = os.stat(path)
os.utime(path, (mstat.st_atime, mstat.st_mtime + offset * i))
class ImportAddedTest(unittest.TestCase, ImportHelper):
# The minimum mtime of the files to be imported
min_mtime = None
def setUp(self):
preserve_plugin_listeners()
self.setup_beets()
self.load_plugins('importadded')
self._create_import_dir(2)
# Different mtimes on the files to be imported in order to test the
# plugin
modify_mtimes((mfile.path for mfile in self.media_files))
self.min_mtime = min(os.path.getmtime(mfile.path)
for mfile in self.media_files)
self.matcher = AutotagStub().install()
self.matcher.macthin = AutotagStub.GOOD
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
self.matcher.restore()
def findMediaFile(self, item):
"""Find the pre-import MediaFile for an Item"""
for m in self.media_files:
if m.title.replace('Tag', 'Applied') == item.title:
return m
raise AssertionError("No MediaFile found for Item " +
util.displayable_path(item.path))
def assertEqualTimes(self, first, second, msg=None):
"""For comparing file modification times at a sufficient precision"""
self.assertAlmostEqual(first, second, places=4, msg=msg)
def assertAlbumImport(self):
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqual(item.added, self.min_mtime)
def test_import_album_with_added_dates(self):
self.assertAlbumImport()
def test_import_album_inplace_with_added_dates(self):
self.config['import']['copy'] = False
self.config['import']['move'] = False
self.config['import']['link'] = False
self.assertAlbumImport()
def test_import_album_with_preserved_mtimes(self):
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqualTimes(item.added, self.min_mtime)
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_album_skipped(self):
# Import and record the original added dates
self.importer.run()
album = self.lib.albums().get()
album_added_before = album.added
items_added_before = dict((item.path, item.added)
for item in album.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
self._setup_import_session(import_dir=album.path)
self.importer.run()
# Verify the reimported items
album = self.lib.albums().get()
self.assertEqualTimes(album.added, album_added_before)
items_added_after = dict((item.path, item.added)
for item in album.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def test_import_singletons_with_added_dates(self):
self.config['import']['singletons'] = True
self.importer.run()
for item in self.lib.items():
mfile = self.findMediaFile(item)
self.assertEqualTimes(item.added, os.path.getmtime(mfile.path))
def test_import_singletons_with_preserved_mtimes(self):
self.config['import']['singletons'] = True
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
for item in self.lib.items():
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.added, mediafile_mtime)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_singletons_skipped(self):
self.config['import']['singletons'] = True
# Import and record the original added dates
self.importer.run()
items_added_before = dict((item.path, item.added)
for item in self.lib.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
import_dir = os.path.dirname(items_added_before.keys()[0])
self._setup_import_session(import_dir=import_dir, singletons=True)
self.importer.run()
# Verify the reimported items
items_added_after = dict((item.path, item.added)
for item in self.lib.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
dlt-rilmta/emLam
|
refs/heads/master
|
emLam/nn/lstm_model.py
|
2
|
"""Generic LSTM language model."""
from emLam.nn.rnn import get_rnn
import tensorflow as tf
class LSTMModel(object):
"""Generic LSTM language model based on the PTB model in tf/models."""
def __init__(self, params, is_training, softmax, need_prediction=False):
self.is_training = is_training
self.params = params
self._data()
outputs = self._build_network()
if need_prediction:
self._cost, self.prediction = softmax(
outputs, self._targets, need_prediction)
else:
self._cost = softmax(outputs, self._targets)
if is_training:
self._optimize()
else:
self._train_op = tf.no_op()
with tf.name_scope('Summaries') as scope:
summaries = []
summaries.append(
tf.summary.scalar(scope + 'Loss', self._cost))
if is_training:
summaries.append(
tf.summary.scalar(scope + 'Learning rate',
self._lr))
self.summaries = tf.summary.merge(summaries)
def _data(self):
"""
Creates the input placeholders. If using an embedding, the input is
a single number per token; if not, it must be one-hot encoded.
"""
dims = [self.params.batch_size, self.params.num_steps]
self._input_data = tf.placeholder(
tf.int32, dims, name='input_placeholder')
self._targets = tf.placeholder(
tf.int32, dims, name='target_placeholder')
def _build_network(self):
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
# D: Not really...
def _get_layer():
rnn_cell = get_rnn(self.params.rnn_cell, self.params.hidden_size)
if self.is_training and self.params.dropout < 1:
rnn_cell = tf.contrib.rnn.DropoutWrapper(
rnn_cell, output_keep_prob=self.params.dropout)
return rnn_cell
cell = tf.contrib.rnn.MultiRNNCell(
[_get_layer() for _ in range(self.params.num_layers)])
self._initial_state = cell.zero_state(self.params.batch_size,
dtype=self.params.data_type)
if self.params.embedding:
# If using an embedding, the input is a single number per token...
with tf.device("/cpu:0"):
embedding = tf.get_variable(
'embedding', [self.params.vocab_size, self.params.hidden_size],
trainable=self.params.embedding_trainable,
dtype=self.params.data_type)
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
else:
# ..., if not, it must be one-hot encoded.
inputs = tf.one_hot(self._input_data, self.params.vocab_size,
dtype=self.params.data_type)
# tf.unpack(inputs, axis=1) only needed for rnn, not dynamic_rnn
if self.is_training and self.params.dropout < 1:
inputs = tf.nn.dropout(inputs, self.params.dropout)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = tf.nn.rnn(cell, inputs, initial_state=self._initial_state)
# outputs = []
# state = self._initial_state
# with tf.variable_scope("RNN"):
# for time_step in range(num_steps):
# if time_step > 0: tf.get_variable_scope().reuse_variables()
# (cell_output, state) = cell(inputs[:, time_step, :], state)
# outputs.append(cell_output)
# output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size])
# This could be replaced by tf.scan(), see
# http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html
outputs, state = tf.nn.dynamic_rnn(
inputs=inputs, cell=cell, dtype=self.params.data_type,
initial_state=self._initial_state)
self._final_state = state
return outputs
def _optimize(self):
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads = tf.gradients(self.cost, tvars)
if self.params.max_grad_norm:
grads, _ = tf.clip_by_global_norm(grads, self.params.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
|
uwdata/termite-visualizations
|
refs/heads/master
|
web2py/gluon/contrib/pymysql/util.py
|
95
|
import struct
def byte2int(b):
if isinstance(b, int):
return b
else:
return struct.unpack("!B", b)[0]
def int2byte(i):
return struct.pack("!B", i)
def join_bytes(bs):
if len(bs) == 0:
return ""
else:
rv = bs[0]
for b in bs[1:]:
rv += b
return rv
|
beeftornado/sentry
|
refs/heads/master
|
src/sentry/plugins/interfaces/__init__.py
|
41
|
from __future__ import absolute_import
from .releasehook import * # NOQA
|
benhoff/pypi-parser
|
refs/heads/master
|
src/pypi_parser.py
|
1
|
import re
import asyncio
import requests
from simpleyapsy import IPlugin
DEFAULT_PYPI = 'https://pypi.python.org/pypi'
PYPI_RE = re.compile('''^(?:(?P<pypi>https?://[^/]+/pypi)/)?
(?P<name>[-A-Za-z0-9_.]+)
(?:/(?P<version>[-A-Za-z0-9.]+))?$''', re.X)
# NOTE: not used
# SEARCH_URL = 'https://pypi.python.org/pypi?%3Aaction=search&term={query}'
class PyPiParser(IPlugin):
# TODO: change to Regex Plugin class
def __init__(self):
super().__init__()
self.name = 'pypi-parser'
self.matches = [re.compile('pypi'), PYPI_RE]
def __call__(self, name):
package = get_package(name)
loop = asyncio.get_event_loop()
future = loop.run_in_executor(None, package.data)
yield from future
return package.downloads, package.average_downloads
@asyncio.coroutine
def get_package(name_or_url, client=None):
m = PYPI_RE.match(name_or_url)
if not m:
return None
pypi_url = m.group('pypi') or DEFAULT_PYPI
name = m.group('name')
return Package(name, pypi_url=pypi_url, client=client)
class Package(object):
def __init__(self, name, client=None, pypi_url=DEFAULT_PYPI):
self.client = client or requests.Session()
self.name = name
self.url = '{pypi_url}/{name}/json'.format(pypi_url=pypi_url,
name=name)
@asyncio.coroutine
def data(self):
resp = self.client.get(self.url)
if resp.status_code == 404:
raise Exception('Package not found')
return resp.json()
@lazy_property
def versions(self):
"""Return a list of versions, sorted by release datae."""
return [k for k, v in self.release_info]
@lazy_property
def version_downloads(self):
"""Return a dictionary of version:download_count pairs."""
ret = OrderedDict()
for release, info in self.release_info:
download_count = sum(file_['downloads'] for file_ in info)
ret[release] = download_count
return ret
@property
def release_info(self):
release_info = self.data['releases']
# filter out any versions that have no releases
filtered = [(ver, releases) for ver, releases in release_info.items()
if len(releases) > 0]
# sort by first upload date of each release
return sorted(filtered, key=lambda x: x[1][0]['upload_time'])
@lazy_property
def version_dates(self):
ret = OrderedDict()
for release, info in self.release_info:
if info:
upload_time = dateparse(info[0]['upload_time'])
ret[release] = upload_time
return ret
def chart(self):
def style_version(version):
return style(version, fg='cyan', bold=True)
data = OrderedDict()
for version, dl_count in self.version_downloads.items():
date = self.version_dates.get(version)
date_formatted = ''
if date:
date_formatted = time.strftime(DATE_FORMAT,
self.version_dates[version].timetuple())
key = "{0:20} {1}".format(
style_version(version),
date_formatted
)
data[key] = dl_count
return bargraph(data, max_key_width=20 + _COLOR_LEN + _BOLD_LEN)
@lazy_property
def downloads(self):
"""Total download count.
:return: A tuple of the form (version, n_downloads)
"""
return sum(self.version_downloads.values())
@lazy_property
def max_version(self):
"""Version with the most downloads.
:return: A tuple of the form (version, n_downloads)
"""
data = self.version_downloads
if not data:
return None, 0
return max(data.items(), key=lambda item: item[1])
@lazy_property
def min_version(self):
"""Version with the fewest downloads."""
data = self.version_downloads
if not data:
return (None, 0)
return min(data.items(), key=lambda item: item[1])
@lazy_property
def average_downloads(self):
"""Average number of downloads."""
return int(self.downloads / len(self.versions))
@property
def author(self):
return self.data['info'].get('author')
@property
def description(self):
return self.data['info'].get('description')
@property
def summary(self):
return self.data['info'].get('summary')
@property
def author_email(self):
return self.data['info'].get('author_email')
@property
def maintainer(self):
return self.data['info'].get('maintainer')
@property
def maintainer_email(self):
return self.data['info'].get('maintainer_email')
@property
def license(self):
return self.data['info'].get('license')
@property
def downloads_last_day(self):
return self.data['info']['downloads']['last_day']
@property
def downloads_last_week(self):
return self.data['info']['downloads']['last_week']
@property
def downloads_last_month(self):
return self.data['info']['downloads']['last_month']
@property
def package_url(self):
return self.data['info']['package_url']
@property
def home_page(self):
return self.data['info'].get('home_page')
@property
def docs_url(self):
return self.data['info'].get('docs_url')
def __repr__(self):
return '<Package(name={0!r})>'.format(self.name)
|
windedge/odoo
|
refs/heads/8.0
|
addons/lunch/wizard/lunch_order.py
|
440
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
|
ciraxwe/cherrypy-app-engine
|
refs/heads/master
|
cherrypy/process/plugins.py
|
49
|
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident
from cherrypy._cpcompat import ntob, set, Timer, SetDaemonProperty
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file
# has "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform.
The :class:`SignalHandler` will ignore errors raised from attempting
to register handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd
import grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to `Gavin Baker <http://antonym.org/2005/12/dropping-privileges-in-python.html>`_
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(
_get_umask,
_set_umask,
doc="""The default permission mode for newly created files and
directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s\n" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
"Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(SetDaemonProperty, threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>`
thread.
"""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus=self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." %
self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can
adjust the ``match`` attribute, a regular expression. For example,
to stop monitoring cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in list(sys.modules.items()):
if re.match(self.match, k):
if (
hasattr(m, '__loader__') and
hasattr(m.__loader__, 'archive')
):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app
# doesn't break me
f = os.path.normpath(
os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." %
filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." %
self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
|
LEPT-Development/android_kernel_lge_msm8916-old
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
smasala/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
|
119
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket.http_header_util import quote_if_necessary
_available_processors = {}
_compression_extension_names = []
class ExtensionProcessorInterface(object):
def __init__(self, request):
self._request = request
self._active = True
def request(self):
return self._request
def name(self):
return None
def check_consistency_with_other_processors(self, processors):
pass
def set_active(self, active):
self._active = active
def is_active(self):
return self._active
def _get_extension_response_internal(self):
return None
def get_extension_response(self):
if self._active:
response = self._get_extension_response_internal()
if response is None:
self._active = False
return response
return None
def _setup_stream_options_internal(self, stream_options):
pass
def setup_stream_options(self, stream_options):
if self._active:
self._setup_stream_options_internal(stream_options)
def _log_outgoing_compression_ratio(
logger, original_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
logger.debug('Outgoing compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _log_incoming_compression_ratio(
logger, received_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if filtered_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
logger.debug('Incoming compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _parse_window_bits(bits):
"""Return parsed integer value iff the given string conforms to the
grammar of the window bits extension parameters.
"""
if bits is None:
raise ValueError('Value is required')
# For non integer values such as "10.0", ValueError will be raised.
int_bits = int(bits)
# First condition is to drop leading zero case e.g. "08".
if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
raise ValueError('Invalid value: %r' % bits)
return int_bits
class _AverageRatioCalculator(object):
"""Stores total bytes of original and result data, and calculates average
result / original ratio.
"""
def __init__(self):
self._total_original_bytes = 0
self._total_result_bytes = 0
def add_original_bytes(self, value):
self._total_original_bytes += value
def add_result_bytes(self, value):
self._total_result_bytes += value
def get_average_ratio(self):
if self._total_original_bytes != 0:
return (float(self._total_result_bytes) /
self._total_original_bytes)
else:
return float('inf')
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""deflate-frame extension processor.
Specification:
http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
"""
_WINDOW_BITS_PARAM = 'max_window_bits'
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._response_window_bits = None
self._response_no_context_takeover = False
self._bfinal = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def name(self):
return common.DEFLATE_FRAME_EXTENSION
def _get_extension_response_internal(self):
# Any unknown parameter will be just ignored.
window_bits = None
if self._request.has_parameter(self._WINDOW_BITS_PARAM):
window_bits = self._request.get_parameter_value(
self._WINDOW_BITS_PARAM)
try:
window_bits = _parse_window_bits(window_bits)
except ValueError, e:
return None
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
self._request.get_parameter_value(
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
self._rfc1979_deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if self._response_window_bits is not None:
response.add_parameter(
self._WINDOW_BITS_PARAM, str(self._response_window_bits))
if self._response_no_context_takeover:
response.add_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: window_bits=%s; no_context_takeover=%r, '
'response: window_wbits=%s; no_context_takeover=%r)' %
(self._request.name(),
window_bits,
no_context_takeover,
self._response_window_bits,
self._response_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
class _OutgoingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._outgoing_filter(frame)
class _IncomingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._incoming_filter(frame)
stream_options.outgoing_frame_filters.append(
_OutgoingFilter(self))
stream_options.incoming_frame_filters.insert(
0, _IncomingFilter(self))
def set_response_window_bits(self, value):
self._response_window_bits = value
def set_response_no_context_takeover(self, value):
self._response_no_context_takeover = value
def set_bfinal(self, value):
self._bfinal = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _outgoing_filter(self, frame):
"""Transform outgoing frames. This method is called only by
an _OutgoingFilter instance.
"""
original_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
self._outgoing_average_ratio_calculator.add_result_bytes(
original_payload_size)
return
frame.payload = self._rfc1979_deflater.filter(
frame.payload, bfinal=self._bfinal)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
an _IncomingFilter instance.
"""
received_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
self._incoming_average_ratio_calculator.add_original_bytes(
received_payload_size)
return
frame.payload = self._rfc1979_inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data, allow_quoted_string=True)
def _create_accepted_method_desc(method_name, method_params):
"""Creates accepted-method-desc from given method name and parameters"""
extension = common.ExtensionParameter(method_name)
for name, value in method_params:
extension.add_parameter(name, value)
return common.format_extension(extension)
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
"""Base class for perframe-compress and permessage-compress extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._compression_method_name = None
self._compression_processor = None
self._compression_processor_hook = None
def name(self):
return ''
def _lookup_compression_processor(self, method_desc):
return None
def _get_compression_processor_response(self):
"""Looks up the compression processor based on the self._request and
returns the compression processor's response.
"""
method_list = self._request.get_parameter_value(self._METHOD_PARAM)
if method_list is None:
return None
methods = _parse_compression_method(method_list)
if methods is None:
return None
comression_processor = None
# The current implementation tries only the first method that matches
# supported algorithm. Following methods aren't tried even if the
# first one is rejected.
# TODO(bashi): Need to clarify this behavior.
for method_desc in methods:
compression_processor = self._lookup_compression_processor(
method_desc)
if compression_processor is not None:
self._compression_method_name = method_desc.name()
break
if compression_processor is None:
return None
if self._compression_processor_hook:
self._compression_processor_hook(compression_processor)
processor_response = compression_processor.get_extension_response()
if processor_response is None:
return None
self._compression_processor = compression_processor
return processor_response
def _get_extension_response_internal(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
response = common.ExtensionParameter(self._request.name())
accepted_method_desc = _create_accepted_method_desc(
self._compression_method_name,
processor_response.get_parameters())
response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
self._logger.debug(
'Enable %s extension (method: %s)' %
(self._request.name(), self._compression_method_name))
return response
def _setup_stream_options_internal(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
def set_compression_processor_hook(self, hook):
self._compression_processor_hook = hook
def get_compression_processor(self):
return self._compression_processor
class PerFrameCompressExtensionProcessor(CompressionExtensionProcessorBase):
"""perframe-compress processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression
"""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERFRAME_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return DeflateFrameExtensionProcessor(method_desc)
return None
_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
PerFrameCompressExtensionProcessor)
_compression_extension_names.append(common.PERFRAME_COMPRESSION_EXTENSION)
class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
"""permessage-deflate extension processor. It's also used for
permessage-compress extension when the deflate method is chosen.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
"""
_S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits'
_S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover'
_C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits'
_C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover'
def __init__(self, request, draft08=True):
"""Construct PerMessageDeflateExtensionProcessor
Args:
draft08: Follow the constraints on the parameters that were not
specified for permessage-compress but are specified for
permessage-deflate as on
draft-ietf-hybi-permessage-compression-08.
"""
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._c2s_max_window_bits = None
self._c2s_no_context_takeover = False
self._draft08 = draft08
def name(self):
return 'deflate'
def _get_extension_response_internal(self):
if self._draft08:
for name in self._request.get_parameter_names():
if name not in [self._S2C_MAX_WINDOW_BITS_PARAM,
self._S2C_NO_CONTEXT_TAKEOVER_PARAM,
self._C2S_MAX_WINDOW_BITS_PARAM]:
self._logger.debug('Unknown parameter: %r', name)
return None
else:
# Any unknown parameter will be just ignored.
pass
s2c_max_window_bits = None
if self._request.has_parameter(self._S2C_MAX_WINDOW_BITS_PARAM):
s2c_max_window_bits = self._request.get_parameter_value(
self._S2C_MAX_WINDOW_BITS_PARAM)
try:
s2c_max_window_bits = _parse_window_bits(s2c_max_window_bits)
except ValueError, e:
self._logger.debug('Bad %s parameter: %r',
self._S2C_MAX_WINDOW_BITS_PARAM,
e)
return None
s2c_no_context_takeover = self._request.has_parameter(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM)
if (s2c_no_context_takeover and
self._request.get_parameter_value(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None):
self._logger.debug('%s parameter must not have a value: %r',
self._S2C_NO_CONTEXT_TAKEOVER_PARAM,
s2c_no_context_takeover)
return None
c2s_max_window_bits = self._request.has_parameter(
self._C2S_MAX_WINDOW_BITS_PARAM)
if (self._draft08 and
c2s_max_window_bits and
self._request.get_parameter_value(
self._C2S_MAX_WINDOW_BITS_PARAM) is not None):
self._logger.debug('%s parameter must not have a value in a '
'client\'s opening handshake: %r',
self._C2S_MAX_WINDOW_BITS_PARAM,
c2s_max_window_bits)
return None
self._rfc1979_deflater = util._RFC1979Deflater(
s2c_max_window_bits, s2c_no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._framer = _PerMessageDeflateFramer(
s2c_max_window_bits, s2c_no_context_takeover)
self._framer.set_bfinal(False)
self._framer.set_compress_outgoing_enabled(True)
response = common.ExtensionParameter(self._request.name())
if s2c_max_window_bits is not None:
response.add_parameter(
self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits))
if s2c_no_context_takeover:
response.add_parameter(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._c2s_max_window_bits is not None:
if self._draft08 and c2s_max_window_bits:
self._logger.debug('Processor is configured to use %s but '
'the client cannot accept it',
self._C2S_MAX_WINDOW_BITS_PARAM)
return None
response.add_parameter(
self._C2S_MAX_WINDOW_BITS_PARAM,
str(self._c2s_max_window_bits))
if self._c2s_no_context_takeover:
response.add_parameter(
self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, '
'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' %
(self._request.name(),
s2c_max_window_bits,
s2c_no_context_takeover,
self._c2s_max_window_bits,
self._c2s_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
self._framer.setup_stream_options(stream_options)
def set_c2s_max_window_bits(self, value):
"""If this option is specified, this class adds the c2s_max_window_bits
extension parameter to the handshake response, but doesn't reduce the
LZ77 sliding window size of its inflater. I.e., you can use this for
testing client implementation but cannot reduce memory usage of this
class.
If this method has been called with True and an offer without the
c2s_max_window_bits extension parameter is received,
- (When processing the permessage-deflate extension) this processor
declines the request.
- (When processing the permessage-compress extension) this processor
accepts the request.
"""
self._c2s_max_window_bits = value
def set_c2s_no_context_takeover(self, value):
"""If this option is specified, this class adds the
c2s_no_context_takeover extension parameter to the handshake response,
but doesn't reset inflater for each message. I.e., you can use this for
testing client implementation but cannot reduce memory usage of this
class.
"""
self._c2s_no_context_takeover = value
def set_bfinal(self, value):
self._framer.set_bfinal(value)
def enable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(True)
def disable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(False)
class _PerMessageDeflateFramer(object):
"""A framer for extensions with per-message DEFLATE feature."""
def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
self._logger = util.get_class_logger(self)
self._rfc1979_deflater = util._RFC1979Deflater(
deflate_max_window_bits, deflate_no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._bfinal = False
self._compress_outgoing_enabled = False
# True if a message is fragmented and compression is ongoing.
self._compress_ongoing = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def set_bfinal(self, value):
self._bfinal = value
def set_compress_outgoing_enabled(self, value):
self._compress_outgoing_enabled = value
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
message = self._rfc1979_inflater.filter(message)
filtered_payload_size = len(message)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
return message
def _process_outgoing_message(self, message, end, binary):
if not binary:
message = message.encode('utf-8')
if not self._compress_outgoing_enabled:
return message
original_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
message = self._rfc1979_deflater.filter(
message, flush=end, bfinal=self._bfinal)
filtered_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
if not self._compress_ongoing:
self._outgoing_frame_filter.set_compression_bit()
self._compress_ongoing = not end
return message
def _process_incoming_frame(self, frame):
if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
self._incoming_message_filter.decompress_next_message()
frame.rsv1 = 0
def _process_outgoing_frame(self, frame, compression_bit):
if (not compression_bit or
common.is_control_opcode(frame.opcode)):
return
frame.rsv1 = 1
def setup_stream_options(self, stream_options):
"""Creates filters and sets them to the StreamOptions."""
class _OutgoingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, message, end=True, binary=False):
return self._parent._process_outgoing_message(
message, end, binary)
class _IncomingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
self._decompress_next_message = False
def decompress_next_message(self):
self._decompress_next_message = True
def filter(self, message):
message = self._parent._process_incoming_message(
message, self._decompress_next_message)
self._decompress_next_message = False
return message
self._outgoing_message_filter = _OutgoingMessageFilter(self)
self._incoming_message_filter = _IncomingMessageFilter(self)
stream_options.outgoing_message_filters.append(
self._outgoing_message_filter)
stream_options.incoming_message_filters.append(
self._incoming_message_filter)
class _OutgoingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
self._set_compression_bit = False
def set_compression_bit(self):
self._set_compression_bit = True
def filter(self, frame):
self._parent._process_outgoing_frame(
frame, self._set_compression_bit)
self._set_compression_bit = False
class _IncomingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._process_incoming_frame(frame)
self._outgoing_frame_filter = _OutgoingFrameFilter(self)
self._incoming_frame_filter = _IncomingFrameFilter(self)
stream_options.outgoing_frame_filters.append(
self._outgoing_frame_filter)
stream_options.incoming_frame_filters.append(
self._incoming_frame_filter)
stream_options.encode_text_message_to_utf8 = False
_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
PerMessageDeflateExtensionProcessor)
# TODO(tyoshino): Reorganize class names.
_compression_extension_names.append('deflate')
class PerMessageCompressExtensionProcessor(
CompressionExtensionProcessorBase):
"""permessage-compress extension processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
"""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERMESSAGE_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return PerMessageDeflateExtensionProcessor(method_desc, False)
return None
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
PerMessageCompressExtensionProcessor)
_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
class MuxExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket multiplexing extension processor."""
_QUOTA_PARAM = 'quota'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._quota = 0
self._extensions = []
def name(self):
return common.MUX_EXTENSION
def check_consistency_with_other_processors(self, processors):
before_mux = True
for processor in processors:
name = processor.name()
if name == self.name():
before_mux = False
continue
if not processor.is_active():
continue
if before_mux:
# Mux extension cannot be used after extensions
# that depend on frame boundary, extension data field, or any
# reserved bits which are attributed to each frame.
if (name == common.PERFRAME_COMPRESSION_EXTENSION or
name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
self.set_active(False)
return
else:
# Mux extension should not be applied before any history-based
# compression extension.
if (name == common.PERFRAME_COMPRESSION_EXTENSION or
name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
name == common.PERMESSAGE_COMPRESSION_EXTENSION or
name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
self.set_active(False)
return
def _get_extension_response_internal(self):
self._active = False
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
if quota is not None:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
self._quota = quota
self._active = True
return common.ExtensionParameter(common.MUX_EXTENSION)
def _setup_stream_options_internal(self, stream_options):
pass
def set_quota(self, quota):
self._quota = quota
def quota(self):
return self._quota
def set_extensions(self, extensions):
self._extensions = extensions
def extensions(self):
return self._extensions
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
def is_compression_extension(extension_name):
return extension_name in _compression_extension_names
# vi:sts=4 sw=4 et
|
shakamunyi/neutron
|
refs/heads/master
|
neutron/tests/unit/agent/linux/test_iptables_manager.py
|
5
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import mock
from oslo_config import cfg
import testtools
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
from neutron.common import exceptions as n_exc
from neutron.tests import base
from neutron.tests import tools
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'snat_out_comment': ic.SNAT_OUT,
'filter_rules': ''}
NAT_TEMPLATE = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n')
NAT_DUMP = NAT_TEMPLATE % IPTABLES_ARG
FILTER_TEMPLATE = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n')
FILTER_DUMP = FILTER_TEMPLATE % IPTABLES_ARG
FILTER_WITH_RULES_TEMPLATE = (
'# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-local - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'%(filter_rules)s'
'COMMIT\n'
'# Completed by iptables_manager\n')
COMMENTED_NAT_DUMP = (
'# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat '
'-m comment --comment "%(snat_out_comment)s"\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
TRAFFIC_COUNTERS_DUMP = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
class IptablesTestCase(base.BaseTestCase):
def test_get_binary_name_in_unittest(self):
# Corresponds to sys.argv content when running python -m unittest class
with mock.patch('sys.argv', ['python -m unittest', 'class']):
binary_name = iptables_manager.get_binary_name()
self.assertEqual('python_-m_unitte', binary_name)
class IptablesCommentsTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesCommentsTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', True, 'AGENT')
self.iptables = iptables_manager.IptablesManager()
self.execute = mock.patch.object(self.iptables, "execute").start()
def test_comments_short_enough(self):
for attr in dir(ic):
if not attr.startswith('__') and len(getattr(ic, attr)) > 255:
self.fail("Iptables comment %s is longer than 255 characters."
% attr)
def test_add_filter_rule(self):
iptables_args = {}
iptables_args.update(IPTABLES_ARG)
filter_rules = ('[0:0] -A %(bn)s-filter -j DROP\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n' % iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(IPTABLES_ARG)
mangle_dump = _generate_mangle_dump(IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + COMMENTED_NAT_DUMP +
mangle_dump + filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + COMMENTED_NAT_DUMP +
mangle_dump + FILTER_DUMP),
run_as_root=True
),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _generate_mangle_dump(iptables_args):
return ('# Generated by iptables_manager\n'
'*mangle\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-mark - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A %(bn)s-PREROUTING -j %(bn)s-mark\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
def _generate_raw_dump(iptables_args):
return ('# Generated by iptables_manager\n'
'*raw\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
MANGLE_DUMP = _generate_mangle_dump(IPTABLES_ARG)
RAW_DUMP = _generate_raw_dump(IPTABLES_ARG)
class IptablesManagerStateFulTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateFulTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.iptables = iptables_manager.IptablesManager()
self.execute = mock.patch.object(self.iptables, "execute").start()
def test_binary_name(self):
expected = os.path.basename(sys.argv[0])[:16]
self.assertEqual(expected, iptables_manager.binary_name)
def test_get_chain_name(self):
name = '0123456789' * 5
# 28 chars is the maximum length of iptables chain name.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=False),
name[:28])
# 11 chars is the maximum length of chain name of iptable_manager
# if binary_name is prepended.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=True),
name[:11])
def test_defer_apply_with_exception(self):
self.iptables._apply = mock.Mock(side_effect=Exception)
with testtools.ExpectedException(n_exc.IpTablesApplyException):
with self.iptables.defer_apply():
pass
def _extend_with_ip6tables_filter(self, expected_calls, filter_dump):
expected_calls.insert(2, (
mock.call(['ip6tables-save', '-c'],
run_as_root=True),
''))
expected_calls.insert(3, (
mock.call(['ip6tables-restore', '-c'],
process_input=filter_dump,
run_as_root=True),
None))
expected_calls.extend([
(mock.call(['ip6tables-save', '-c'],
run_as_root=True),
''),
(mock.call(['ip6tables-restore', '-c'],
process_input=filter_dump,
run_as_root=True),
None)])
def _test_add_and_remove_chain_custom_binary_name_helper(self, use_ipv6):
bn = ("abcdef" * 5)
self.iptables = iptables_manager.IptablesManager(
binary_name=bn,
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn[:16], 'filter_rules': ''}
filter_dump = FILTER_WITH_RULES_TEMPLATE % iptables_args
filter_dump_ipv6 = FILTER_TEMPLATE % iptables_args
filter_dump_mod = filter_dump
nat_dump = NAT_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(iptables_args)
mangle_dump = _generate_mangle_dump(iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + nat_dump + mangle_dump +
filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + nat_dump + mangle_dump +
filter_dump),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
filter_dump_ipv6)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].empty_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_and_remove_chain_custom_binary_name(self):
self._test_add_and_remove_chain_custom_binary_name_helper(False)
def test_add_and_remove_chain_custom_binary_name_with_ipv6(self):
self._test_add_and_remove_chain_custom_binary_name_helper(True)
def _test_empty_chain_custom_binary_name_helper(self, use_ipv6):
bn = ("abcdef" * 5)[:16]
self.iptables = iptables_manager.IptablesManager(
binary_name=bn,
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn}
filter_dump = FILTER_TEMPLATE % iptables_args
filter_rules = ('[0:0] -A %(bn)s-filter -s 0/0 -d 192.168.0.2\n'
% iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
nat_dump = NAT_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(iptables_args)
mangle_dump = _generate_mangle_dump(iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + nat_dump + mangle_dump +
filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump + nat_dump + mangle_dump +
filter_dump),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
filter_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter',
'-s 0/0 -d 192.168.0.2')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_empty_chain_custom_binary_name(self):
self._test_empty_chain_custom_binary_name_helper(False)
def test_empty_chain_custom_binary_name_with_ipv6(self):
self._test_empty_chain_custom_binary_name_helper(True)
def _test_add_and_remove_chain_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % IPTABLES_ARG
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_and_remove_chain(self):
self._test_add_and_remove_chain_helper(False)
def test_add_and_remove_chain_with_ipv6(self):
self._test_add_and_remove_chain_helper(True)
def _test_add_filter_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {}
iptables_args.update(IPTABLES_ARG)
filter_rules = ('[0:0] -A %(bn)s-filter -j DROP\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n' % iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True
),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_filter_rule(self):
self._test_add_filter_rule_helper(False)
def test_add_filter_rule_with_ipv6(self):
self._test_add_filter_rule_helper(True)
def _test_rule_with_wrap_target_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
name = '0123456789' * 5
wrap = "%s-%s" % (iptables_manager.binary_name,
iptables_manager.get_chain_name(name))
iptables_args = {'bn': iptables_manager.binary_name,
'wrap': wrap}
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(wrap)s - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(wrap)s\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
filter_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain(name)
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.ipv4['filter'].remove_chain(name)
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_rule_with_wrap_target(self):
self._test_rule_with_wrap_target_helper(False)
def test_rule_with_wrap_target_with_ipv6(self):
self._test_rule_with_wrap_target_helper(True)
def _test_add_mangle_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
mangle_dump_mod = (
'# Generated by iptables_manager\n'
'*mangle\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-mangle - [0:0]\n'
':%(bn)s-mark - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A %(bn)s-PREROUTING -j %(bn)s-mark\n'
'[0:0] -A %(bn)s-PREROUTING -j MARK --set-xmark 0x1/0xffffffff\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + mangle_dump_mod +
FILTER_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['mangle'].add_chain('mangle')
self.iptables.ipv4['mangle'].add_rule(
'PREROUTING',
'-j MARK --set-xmark 0x1/0xffffffff')
self.iptables.apply()
self.iptables.ipv4['mangle'].remove_rule(
'PREROUTING',
'-j MARK --set-xmark 0x1/0xffffffff')
self.iptables.ipv4['mangle'].remove_chain('mangle')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_mangle_rule(self):
self._test_add_mangle_rule_helper(False)
def test_add_mangle_rule_with_ipv6(self):
self._test_add_mangle_rule_helper(True)
def _test_add_nat_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
nat_dump = NAT_TEMPLATE % IPTABLES_ARG
nat_dump_mod = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-nat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n'
'[0:0] -A %(bn)s-PREROUTING -d 192.168.0.3 -j '
'%(bn)s-nat\n'
'[0:0] -A %(bn)s-nat -p tcp --dport 8080 -j '
'REDIRECT --to-port 80\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + nat_dump_mod + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + nat_dump + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['nat'].add_chain('nat')
self.iptables.ipv4['nat'].add_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].add_rule('nat',
'-p tcp --dport 8080' +
' -j REDIRECT --to-port 80')
self.iptables.apply()
self.iptables.ipv4['nat'].remove_rule('nat',
'-p tcp --dport 8080 -j'
' REDIRECT --to-port 80')
self.iptables.ipv4['nat'].remove_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].remove_chain('nat')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_nat_rule(self):
self._test_add_nat_rule_helper(False)
def test_add_nat_rule_with_ipv6(self):
self._test_add_nat_rule_helper(True)
def _test_add_raw_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
raw_dump_mod = ('# Generated by iptables_manager\n'
'*raw\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-raw - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A %(bn)s-PREROUTING -j CT --notrack\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(raw_dump_mod + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save', '-c'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-c'],
process_input=(RAW_DUMP + NAT_DUMP + MANGLE_DUMP +
FILTER_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(expected_calls_and_values,
FILTER_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['raw'].add_chain('raw')
self.iptables.ipv4['raw'].add_rule('PREROUTING',
'-j CT --notrack')
self.iptables.apply()
self.iptables.ipv4['raw'].remove_rule('PREROUTING',
'-j CT --notrack')
self.iptables.ipv4['raw'].remove_chain('raw')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_raw_rule(self):
self._test_add_raw_rule_helper(False)
def test_add_raw_rule_with_ipv6(self):
self._test_add_raw_rule_helper(True)
def test_add_rule_to_a_nonexistent_chain(self):
self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
'nonexistent', '-j DROP')
def test_remove_nonexistent_chain(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_chain('nonexistent')
log.debug.assert_called_once_with(
'Attempted to remove chain %s which does not exist',
'nonexistent')
def test_remove_nonexistent_rule(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
log.warn.assert_called_once_with(
'Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False, 'rule': '-j DROP',
'chain': 'nonexistent'})
def test_iptables_failure_with_no_failing_line_number(self):
with mock.patch.object(iptables_manager, "LOG") as log:
# generate Runtime errors on iptables-restore calls
def iptables_restore_failer(*args, **kwargs):
if 'iptables-restore' in args[0]:
self.input_lines = kwargs['process_input'].split('\n')
# don't provide a specific failure message so all lines
# are logged
raise RuntimeError()
return FILTER_DUMP
self.execute.side_effect = iptables_restore_failer
# _apply_synchronized calls iptables-restore so it should raise
# a RuntimeError
self.assertRaises(RuntimeError,
self.iptables._apply_synchronized)
# The RuntimeError should have triggered a log of the input to the
# process that it failed to execute. Verify by comparing the log
# call to the 'process_input' arg given to the failed iptables-restore
# call.
# Failure without a specific line number in the error should cause
# all lines to be logged with numbers.
logged = ['%7d. %s' % (n, l)
for n, l in enumerate(self.input_lines, 1)]
log.error.assert_called_once_with(_(
'IPTablesManager.apply failed to apply the '
'following set of iptables rules:\n%s'),
'\n'.join(logged)
)
def test_iptables_failure_on_specific_line(self):
with mock.patch.object(iptables_manager, "LOG") as log:
# generate Runtime errors on iptables-restore calls
def iptables_restore_failer(*args, **kwargs):
if 'iptables-restore' in args[0]:
self.input_lines = kwargs['process_input'].split('\n')
# pretend line 11 failed
msg = ("Exit code: 1\nStdout: ''\n"
"Stderr: 'iptables-restore: line 11 failed\n'")
raise RuntimeError(msg)
return FILTER_DUMP
self.execute.side_effect = iptables_restore_failer
# _apply_synchronized calls iptables-restore so it should raise
# a RuntimeError
self.assertRaises(RuntimeError,
self.iptables._apply_synchronized)
# The RuntimeError should have triggered a log of the input to the
# process that it failed to execute. Verify by comparing the log
# call to the 'process_input' arg given to the failed iptables-restore
# call.
# Line 11 of the input was marked as failing so lines (11 - context)
# to (11 + context) should be logged
ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, 11 - ctx)
log_end = 11 + ctx
logged = ['%7d. %s' % (n, l)
for n, l in enumerate(self.input_lines[log_start:log_end],
log_start + 1)]
log.error.assert_called_once_with(_(
'IPTablesManager.apply failed to apply the '
'following set of iptables rules:\n%s'),
'\n'.join(logged)
)
def test_get_traffic_counters_chain_notexists(self):
with mock.patch.object(iptables_manager, "LOG") as log:
acc = self.iptables.get_traffic_counters('chain1')
self.assertIsNone(acc)
self.assertEqual(0, self.execute.call_count)
log.warn.assert_called_once_with(
'Attempted to get traffic counters of chain %s which '
'does not exist', 'chain1')
def _test_get_traffic_counters_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
exp_packets = 800
exp_bytes = 131802
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP),
(mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
]
if use_ipv6:
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP))
exp_packets *= 2
exp_bytes *= 2
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT')
self.assertEqual(acc['pkts'], exp_packets)
self.assertEqual(acc['bytes'], exp_bytes)
tools.verify_mock_calls(self.execute, expected_calls_and_values,
any_order=True)
def test_get_traffic_counters(self):
self._test_get_traffic_counters_helper(False)
def test_get_traffic_counters_with_ipv6(self):
self._test_get_traffic_counters_helper(True)
def _test_get_traffic_counters_with_zero_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
exp_packets = 800
exp_bytes = 131802
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP),
(mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
'')
]
if use_ipv6:
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP))
exp_packets *= 2
exp_bytes *= 2
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT', zero=True)
self.assertEqual(acc['pkts'], exp_packets)
self.assertEqual(acc['bytes'], exp_bytes)
tools.verify_mock_calls(self.execute, expected_calls_and_values,
any_order=True)
def test_get_traffic_counters_with_zero(self):
self._test_get_traffic_counters_with_zero_helper(False)
def test_get_traffic_counters_with_zero_with_ipv6(self):
self._test_get_traffic_counters_with_zero_helper(True)
def _test_find_last_entry(self, find_str):
filter_list = [':neutron-filter-top - [0:0]',
':%(bn)s-FORWARD - [0:0]',
':%(bn)s-INPUT - [0:0]',
':%(bn)s-local - [0:0]',
':%(wrap)s - [0:0]',
':%(bn)s-OUTPUT - [0:0]',
'[0:0] -A FORWARD -j neutron-filter-top',
'[0:0] -A OUTPUT -j neutron-filter-top'
% IPTABLES_ARG]
return self.iptables._find_last_entry(filter_list, find_str)
def test_find_last_entry_old_dup(self):
find_str = 'neutron-filter-top'
match_str = '[0:0] -A OUTPUT -j neutron-filter-top'
ret_str = self._test_find_last_entry(find_str)
self.assertEqual(ret_str, match_str)
def test_find_last_entry_none(self):
find_str = 'neutron-filter-NOTFOUND'
ret_str = self._test_find_last_entry(find_str)
self.assertIsNone(ret_str)
class IptablesManagerStateLessTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateLessTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.iptables = (iptables_manager.IptablesManager(state_less=True))
def test_nat_not_found(self):
self.assertNotIn('nat', self.iptables.ipv4)
def test_mangle_not_found(self):
self.assertNotIn('mangle', self.iptables.ipv4)
|
mahim97/zulip
|
refs/heads/master
|
zerver/lib/email_mirror.py
|
2
|
from typing import Any, Dict, List, Optional, Text
import logging
import re
from email.header import decode_header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message, internal_send_private_message, \
internal_send_stream_message, internal_send_huddle_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.queue import queue_json_publish
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.lib.send_email import FromAddress
from zerver.models import Stream, Recipient, \
get_user_profile_by_id, get_display_recipient, get_personal_recipient, \
Message, Realm, UserProfile, get_system_bot, get_user
import talon
from talon import quotations
talon.init()
logger = logging.getLogger(__name__)
def redact_stream(error_message: Text) -> Text:
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search('\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message: Text) -> None:
if settings.ERROR_BOT is None:
return
error_bot = get_system_bot(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error",
u"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message: message.Message, error_message: Text, debug_info: Dict[str, Any]) -> None:
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.string_id,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token: Text) -> Text:
return 'missed_message:' + token
def is_missed_message_address(address: Text) -> bool:
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string: Optional[Text]) -> bool:
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string is not None and msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address: Text) -> Text:
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile: UserProfile, message: Message) -> str:
if settings.EMAIL_GATEWAY_PATTERN == '':
logger.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return FromAddress.NOREPLY
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_personal_recipient(message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject.encode('utf-8'),
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = 'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address: Text) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def construct_zulip_body(message: message.Message, realm: Realm) -> Text:
body = extract_body(message)
# Remove null characters, since Zulip will reject
body = body.replace("\x00", "")
body = filter_footer(body)
body += extract_and_upload_attachments(message, realm)
body = body.strip()
if not body:
body = '(No email body)'
return body
def send_to_missed_message_address(address: Text, message: message.Message) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject_b = result # type: (bytes, bytes, bytes)
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
body = construct_zulip_body(message, user_profile.realm)
if recipient.type == Recipient.STREAM:
assert isinstance(display_recipient, str)
recipient_str = display_recipient
internal_send_stream_message(user_profile.realm, user_profile, recipient_str,
subject_b.decode('utf-8'), body)
elif recipient.type == Recipient.PERSONAL:
assert not isinstance(display_recipient, str)
recipient_str = display_recipient[0]['email']
recipient_user = get_user(recipient_str, user_profile.realm)
internal_send_private_message(user_profile.realm, user_profile,
recipient_user, body)
elif recipient.type == Recipient.HUDDLE:
assert not isinstance(display_recipient, str)
emails = [user_dict['email'] for user_dict in display_recipient]
recipient_str = ', '.join(emails)
internal_send_huddle_message(user_profile.realm, user_profile,
emails, body)
else:
raise AssertionError("Invalid recipient type!")
logger.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender: Text, stream: Stream, topic: Text, content: Text) -> None:
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:60],
content[:2000],
email_gateway=True)
def valid_stream(stream_name: Text, token: Text) -> bool:
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message: message.Message, content_type: Text) -> Optional[Text]:
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, bytes)
if charsets[idx]:
return content.decode(charsets[idx], errors="ignore")
return None
def extract_body(message: message.Message) -> Text:
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
return convert_html_to_markdown(quotations.extract_from_html(html_content))
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text: Text) -> Text:
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message: message.Message, realm: Realm) -> Text:
user_profile = get_system_bot(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, bytes):
s3_url = upload_message_image(filename, len(attachment), content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = u"[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return u"\n".join(attachment_links)
def extract_and_validate(email: Text) -> Stream:
temp = decode_email_address(email)
if temp is None:
raise ZulipEmailForwardError("Malformed email recipient " + email)
stream_name, token = temp
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message: message.Message) -> Text:
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Text]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to: Text, subject: Text, message: message.Message,
debug_info: Dict[str, Any]) -> None:
stream = extract_and_validate(to)
body = construct_zulip_body(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logger.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.string_id))
def process_missed_message(to: Text, message: message.Message, pre_checked: bool) -> None:
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message: message.Message, rcpt_to: Optional[Text]=None, pre_checked: bool=False) -> None:
subject_header = message.get("Subject", "(no subject)")
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = u"(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
def mirror_email_message(data: Dict[Text, Text]) -> Dict[str, str]:
rcpt_to = data['recipient']
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Bad or expired missed message address."
}
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page."
}
queue_json_publish(
"email_mirror",
{
"message": data['msg_text'],
"rcpt_to": rcpt_to
}
)
return {"status": "success"}
|
reeselevine/iTerm2
|
refs/heads/master
|
tools/ply/ply-3.4/test/lex_state3.py
|
174
|
# lex_state3.py
#
# Bad state declaration
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
comment = 1
states = ((comment, 'inclusive'),
('example', 'exclusive'))
t_PLUS = r'\+'
t_MINUS = r'-'
t_NUMBER = r'\d+'
# Comments
def t_comment(t):
r'/\*'
t.lexer.begin('comment')
print("Entering comment state")
def t_comment_body_part(t):
r'(.|\n)*\*/'
print("comment body %s" % t)
t.lexer.begin('INITIAL')
def t_error(t):
pass
lex.lex()
|
otron/zenodo
|
refs/heads/master
|
zenodo/demosite/fixtures/accounts.py
|
2
|
# -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2014 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
from fixture import DataSet
from invenio.config import CFG_SITE_ADMIN_EMAIL
class UserData(DataSet):
class admin:
id = 1
email = CFG_SITE_ADMIN_EMAIL
password = ''
password_salt = ''
password_scheme = 'sha512_crypt'
note = '1'
nickname = 'admin'
class info:
id = 2
email = 'info@zenodo.org'
password = 'info'
password_salt = ''
password_scheme = 'sha512_crypt'
note = '1'
nickname = 'info'
class usera:
id = 3
email = 'user.a@zenodo.org'
password = 'usera'
password_salt = ''
password_scheme = 'sha512_crypt'
note = '1'
nickname = 'usera'
class userb:
id = 4
email = 'user.b@zenodo.org'
password = 'userb'
password_salt = ''
password_scheme = 'sha512_crypt'
note = '1'
nickname = 'userb'
class user_inactive:
id = 5
email = 'inactive@zenodo.org'
password = 'inactive'
password_salt = ''
password_scheme = 'sha512_crypt'
note = '2' # Email confirm required
nickname = 'inactive'
class user_blocked:
id = 6
email = 'blocked@zenodo.org'
password = 'blocked'
password_salt = ''
password_scheme = 'sha512_crypt'
note = '0' # Administrator approval required
nickname = 'blocked'
|
aasensio/elecciones2015
|
refs/heads/master
|
dump.py
|
2
|
from bs4 import BeautifulSoup
import urllib2
import numpy as np
# wiki = "http://en.wikipedia.org/wiki/Opinion_polling_for_the_Spanish_general_election,_2015"
# header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia
# req = urllib2.Request(wiki,headers=header)
# page = urllib2.urlopen(req)
# result = page.read()
# f = open("page.html", "w")
# f.write(result)
# f.close()
f = open("page.html", "r")
page = f.read()
f.close()
soup = BeautifulSoup(page)
table = soup.find_all("table")[0]
rows = table.findAll('tr')
dat = []
loop = 0
for tr in rows[2:]:
cols = tr.findAll('td')
if (len(cols) == 14):
temp = []
for i in range(14):
temp.append(cols[i].string)
if (len(cols) == 13):
temp = []
for i in range(13):
if (i == 11):
temp.append('')
temp.append(cols[i].string)
if (len(cols) == 12):
temp = []
for i in range(12):
if (i == 10 | i == 11):
temp.append('')
temp.append(cols[i].string)
dat.append(temp)
|
maggienj/ActiveData
|
refs/heads/es5
|
mo_threads/multiprocess.py
|
2
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import subprocess
from types import NoneType
from mo_dots import set_default, unwrap, get_module, NullType
from mo_logs import Log, strings
from mo_logs.exceptions import Except
from mo_threads.lock import Lock
from mo_threads.queues import Queue
from mo_threads.signal import Signal
from mo_threads.threads import Thread, THREAD_STOP
DEBUG = False
class Process(object):
def __init__(self, name, params, cwd=None, env=None, debug=False, shell=False, bufsize=-1):
self.name = name
self.service_stopped = Signal("stopped signal for " + strings.quote(name))
self.stdin = Queue("stdin for process " + strings.quote(name), silent=True)
self.stdout = Queue("stdout for process " + strings.quote(name), silent=True)
self.stderr = Queue("stderr for process " + strings.quote(name), silent=True)
try:
self.debug = debug or DEBUG
self.service = service = subprocess.Popen(
params,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=bufsize,
cwd=cwd if isinstance(cwd, (basestring, NullType, NoneType)) else cwd.abspath,
env=unwrap(set_default(env, os.environ)),
shell=shell
)
self.please_stop = Signal()
self.please_stop.on_go(self._kill)
self.thread_locker = Lock()
self.children = [
Thread.run(self.name + " stdin", self._writer, service.stdin, self.stdin, please_stop=self.service_stopped, parent_thread=self),
Thread.run(self.name + " stdout", self._reader, "stdout", service.stdout, self.stdout, please_stop=self.service_stopped, parent_thread=self),
Thread.run(self.name + " stderr", self._reader, "stderr", service.stderr, self.stderr, please_stop=self.service_stopped, parent_thread=self),
Thread.run(self.name + " waiter", self._monitor, parent_thread=self),
]
except Exception as e:
Log.error("Can not call", e)
if self.debug:
Log.note("{{process}} START: {{command}}", process=self.name, command=" ".join(map(strings.quote, params)))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.join(raise_on_error=True)
def stop(self):
self.stdin.add("exit") # ONE MORE SEND
self.please_stop.go()
def join(self, raise_on_error=False):
self.service_stopped.wait()
with self.thread_locker:
child_threads, self.children = self.children, []
for c in child_threads:
c.join()
if raise_on_error and self.returncode != 0:
Log.error(
"{{process}} FAIL: returncode={{code}}\n{{stderr}}",
process=self.name,
code=self.service.returncode,
stderr=list(self.stderr)
)
return self
def remove_child(self, child):
with self.thread_locker:
try:
self.children.remove(child)
except Exception:
pass
@property
def pid(self):
return self.service.pid
@property
def returncode(self):
return self.service.returncode
def _monitor(self, please_stop):
self.service.wait()
if self.debug:
Log.note("{{process}} STOP: returncode={{returncode}}", process=self.name, returncode=self.service.returncode)
self.service_stopped.go()
please_stop.go()
def _reader(self, name, pipe, recieve, please_stop):
try:
line = "dummy"
while not please_stop and self.service.returncode is None and line:
line = pipe.readline().rstrip()
if line:
recieve.add(line)
if self.debug:
Log.note("{{process}} ({{name}}): {{line}}", name=name, process=self.name, line=line)
# GRAB A FEW MORE LINES
max = 100
while max:
try:
line = pipe.readline().rstrip()
if line:
max = 100
recieve.add(line)
if self.debug:
Log.note("{{process}} ({{name}}): {{line}}", name=name, process=self.name, line=line)
else:
max -= 1
except Exception:
break
finally:
pipe.close()
recieve.add(THREAD_STOP)
def _writer(self, pipe, send, please_stop):
while not please_stop:
line = send.pop(till=please_stop)
if line == THREAD_STOP:
please_stop.go()
break
if line:
if self.debug:
Log.note("{{process}} (stdin): {{line}}", process=self.name, line=line.rstrip())
pipe.write(line + b"\n")
pipe.close()
def _kill(self):
try:
self.service.kill()
except Exception as e:
ee = Except.wrap(e)
if 'The operation completed successfully' in ee:
return
if 'No such process' in ee:
return
Log.warning("Failure to kill process {{process|quote}}", process=self.name, cause=ee)
|
HerrSubset/FamilyTreeManager
|
refs/heads/master
|
ftm/__init__.py
|
1
|
import domain as dom
import ui
from sys import argv
pc = dom.ParametersContainer(argv)
fm = dom.FamilyManager(pc)
interface = ui.CLInterface(pc, fm)
|
vpstudios/Codecademy-Exercise-Answers
|
refs/heads/master
|
Language Skills/Python/Unit 3/2-PygLatin/PygLatin PART2/8-Word up.py
|
2
|
pyg = 'ay'
original = raw_input('Enter a word:')
if len(original) > 0 and original.isalpha():
word = original.lower()
first = word[0]
print original
else:
print 'empty'
|
SerCeMan/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyRemoveStatementQuickFixTest/variable_after.py
|
80
|
def foo(r):
"""
:param r:
:return:
"""
def a():
pass
x = 2
|
beni55/django
|
refs/heads/master
|
django/contrib/auth/checks.py
|
374
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.core import checks
def check_user_model(**kwargs):
errors = []
cls = apps.get_model(settings.AUTH_USER_MODEL)
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
errors.append(
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=cls,
id='auth.E001',
)
)
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
errors.append(
checks.Error(
("The field named as the 'USERNAME_FIELD' "
"for a custom user model must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=cls,
id='auth.E002',
)
)
# Check that the username field is unique
if not cls._meta.get_field(cls.USERNAME_FIELD).unique:
if (settings.AUTHENTICATION_BACKENDS ==
['django.contrib.auth.backends.ModelBackend']):
errors.append(
checks.Error(
"'%s.%s' must be unique because it is named as the 'USERNAME_FIELD'." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=None,
obj=cls,
id='auth.E003',
)
)
else:
errors.append(
checks.Warning(
"'%s.%s' is named as the 'USERNAME_FIELD', but it is not unique." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=cls,
id='auth.W004',
)
)
return errors
|
ThePletch/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/rackspace/rax_cbs.py
|
25
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
|
paisely65/Assignments--IS-362
|
refs/heads/master
|
IS362_1.py
|
1
|
## Dealing with duplicate entries. Last value wins!
dict = {'Name': 'Alice', 'Age': 47, 'Name': 'Manni'}
print("dict['Name']: ", dict['Name'])
## Applying Function and Methods
breakfast = {'ham': 'roll', 'egg': 'scramble'}
lunch = {'burger': 'well', 'fries': 'yes', 'salad': 'yes'}
print("Length : %d" % len (lunch))
## seq -- list of values which would be used for dictionary keys preparation.
## value -- if provided then value would be set to this value
seq = ('name', 'height', 'sex')
dict = dict.fromkeys(seq)
print("New Dictionary : %s" % str(dict))
dict = dict.fromkeys(seq, 10,)
print ("New Dictionary : %s" % str(dict))
|
srikantbmandal/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/atomic/atomic_host.py
|
50
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public licenses
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: atomic_host
short_description: Manage the atomic host platform
description:
- Manage the atomic host platform
- Rebooting of Atomic host platform should be done outside this module
version_added: "2.2"
author: "Saravanan KR @krsacme"
notes:
- Host should be an atomic platform (verified by existence of '/run/ostree-booted' file)
requirements:
- atomic
- "python >= 2.6"
options:
revision:
description:
- The version number of the atomic host to be deployed. Providing C(latest) will upgrade to the latest available version.
required: false
default: latest
aliases: ["version"]
'''
EXAMPLES = '''
# Upgrade the atomic host platform to the latest version (atomic host upgrade)
- atomic_host:
revision: latest
# Deploy a specific revision as the atomic host (atomic host deploy 23.130)
- atomic_host:
revision: 23.130
'''
RETURN = '''
msg:
description: The command standard output
returned: always
type: string
sample: 'Already on latest'
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule, os
def core(module):
revision = module.params['revision']
args = []
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
if revision == 'latest':
args = ['atomic', 'host', 'upgrade']
else:
args = ['atomic', 'host', 'deploy', revision]
out = {}
err = {}
rc = 0
rc, out, err = module.run_command(args, check_rc=False)
if rc == 77 and revision == 'latest':
module.exit_json(msg="Already on latest", changed=False)
elif rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
revision=dict(default='latest', required=False, aliases=["version"]),
),
)
# Verify that the platform is atomic host
if not os.path.exists("/run/ostree-booted"):
module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
try:
core(module)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gangadhar-kadam/laganfrappe
|
refs/heads/master
|
frappe/core/report/subscription_details/subscription_details.py
|
2
|
from __future__ import unicode_literals
import frappe
def get_columns():
return ["Site Name:220", "Site Created on:Date:120",
"Customer Name:120","Country:80","Administrator Email Id:250","Subscription Plan:200"]
def execute(filters=None):
columns = get_columns()
qry="select name ,DATE(creation) ,client_name,country ,email_id__if_administrator as `Administrator Email`,subscription_plan from `tabSite Master` order by creation asc"
data = []
act_details=frappe.db.sql(qry,as_list=1,debug=1)
data=act_details
return columns, data
def get_columns():
return [
"Site Name:Data:220",
"Site Created on:Date:120",
"Customer Name:Data:120",
"Country:Data:80",
"Administrator Email Id:Data:250",
"Subscription Plan:200"
]
|
chengjf/database-interface-doc-management
|
refs/heads/master
|
flask-demo/flask/Lib/site-packages/pip/_vendor/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
phektus/Django-Google-AppEngine-OpenId-Auth
|
refs/heads/master
|
django/shortcuts/__init__.py
|
254
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
|
jvkops/django
|
refs/heads/master
|
django/utils/translation/__init__.py
|
108
|
"""
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_text
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = ugettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
|
bprodoehl/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/optparser_unittest.py
|
124
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for parser.py."""
import unittest2 as unittest
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.optparser import ArgumentParser
from webkitpy.style.optparser import ArgumentPrinter
from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions
from webkitpy.style.optparser import DefaultCommandOptionValues
class ArgumentPrinterTest(unittest.TestCase):
"""Tests the ArgumentPrinter class."""
_printer = ArgumentPrinter()
def _create_options(self,
output_format='emacs',
min_confidence=3,
filter_rules=[],
git_commit=None):
return ProcessorOptions(filter_rules=filter_rules,
git_commit=git_commit,
min_confidence=min_confidence,
output_format=output_format)
def test_to_flag_string(self):
options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git')
self.assertEqual('--filter=+foo,-bar --git-commit=git '
'--min-confidence=5 --output=vs7',
self._printer.to_flag_string(options))
# This is to check that --filter and --git-commit do not
# show up when not user-specified.
options = self._create_options()
self.assertEqual('--min-confidence=3 --output=emacs',
self._printer.to_flag_string(options))
class ArgumentParserTest(LoggingTestCase):
"""Test the ArgumentParser class."""
class _MockStdErr(object):
def write(self, message):
# We do not want the usage string or style categories
# to print during unit tests, so print nothing.
return
def _parse(self, args):
"""Call a test parser.parse()."""
parser = self._create_parser()
return parser.parse(args)
def _create_defaults(self):
"""Return a DefaultCommandOptionValues instance for testing."""
base_filter_rules = ["-", "+whitespace"]
return DefaultCommandOptionValues(min_confidence=3,
output_format="vs7")
def _create_parser(self):
"""Return an ArgumentParser instance for testing."""
default_options = self._create_defaults()
all_categories = ["build" ,"whitespace"]
mock_stderr = self._MockStdErr()
return ArgumentParser(all_categories=all_categories,
base_filter_rules=[],
default_options=default_options,
mock_stderr=mock_stderr,
usage="test usage")
def test_parse_documentation(self):
parse = self._parse
# FIXME: Test both the printing of the usage string and the
# filter categories help.
# Request the usage string.
self.assertRaises(SystemExit, parse, ['--help'])
# Request default filter rules and available style categories.
self.assertRaises(SystemExit, parse, ['--filter='])
def test_parse_bad_values(self):
parse = self._parse
# Pass an unsupported argument.
self.assertRaises(SystemExit, parse, ['--bad'])
self.assertLog(['ERROR: no such option: --bad\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=bad'])
self.assertLog(['ERROR: option --min-confidence: '
"invalid integer value: 'bad'\n"])
self.assertRaises(SystemExit, parse, ['--min-confidence=0'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: '
'value must be between 1 and 5\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=6'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: '
'value must be between 1 and 5\n'])
parse(['--min-confidence=1']) # works
parse(['--min-confidence=5']) # works
self.assertRaises(SystemExit, parse, ['--output=bad'])
self.assertLog(['ERROR: option --output-format: invalid choice: '
"'bad' (choose from 'emacs', 'vs7')\n"])
parse(['--output=vs7']) # works
# Pass a filter rule not beginning with + or -.
self.assertRaises(SystemExit, parse, ['--filter=build'])
self.assertLog(['ERROR: Invalid filter rule "build": '
'every rule must start with + or -.\n'])
parse(['--filter=+build']) # works
def test_parse_default_arguments(self):
parse = self._parse
(files, options) = parse([])
self.assertEqual(files, [])
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.diff_files)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, 'vs7')
def test_parse_explicit_arguments(self):
parse = self._parse
# Pass non-default explicit values.
(files, options) = parse(['--min-confidence=4'])
self.assertEqual(options.min_confidence, 4)
(files, options) = parse(['--output=emacs'])
self.assertEqual(options.output_format, 'emacs')
(files, options) = parse(['-g', 'commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-commit=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-diff=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--verbose'])
self.assertTrue(options.is_verbose)
(files, options) = parse(['--diff-files', 'file.txt'])
self.assertTrue(options.diff_files)
# Pass user_rules.
(files, options) = parse(['--filter=+build,-whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
# Pass spurious white space in user rules.
(files, options) = parse(['--filter=+build, -whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
def test_parse_files(self):
parse = self._parse
(files, options) = parse(['foo.cpp'])
self.assertEqual(files, ['foo.cpp'])
# Pass multiple files.
(files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
self.assertEqual(files, ['foo.cpp', 'bar.cpp'])
class CommandOptionValuesTest(unittest.TestCase):
"""Tests CommandOptionValues class."""
def test_init(self):
"""Test __init__ constructor."""
# Check default parameters.
options = ProcessorOptions()
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 1)
self.assertEqual(options.output_format, "emacs")
# Check argument validation.
self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
ProcessorOptions(output_format="emacs") # No ValueError: works
ProcessorOptions(output_format="vs7") # works
self.assertRaises(ValueError, ProcessorOptions, min_confidence=0)
self.assertRaises(ValueError, ProcessorOptions, min_confidence=6)
ProcessorOptions(min_confidence=1) # works
ProcessorOptions(min_confidence=5) # works
# Check attributes.
options = ProcessorOptions(filter_rules=["+"],
git_commit="commit",
is_verbose=True,
min_confidence=3,
output_format="vs7")
self.assertEqual(options.filter_rules, ["+"])
self.assertEqual(options.git_commit, "commit")
self.assertTrue(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, "vs7")
def test_eq(self):
"""Test __eq__ equality function."""
self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions()))
# Also verify that a difference in any argument causes equality to fail.
# Explicitly create a ProcessorOptions instance with all default
# values. We do this to be sure we are assuming the right default
# values in our self.assertFalse() calls below.
options = ProcessorOptions(filter_rules=[],
git_commit=None,
is_verbose=False,
min_confidence=1,
output_format="emacs")
# Verify that we created options correctly.
self.assertTrue(options.__eq__(ProcessorOptions()))
self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"])))
self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit")))
self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True)))
self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2)))
self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7")))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions()))
|
nathanial/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/contrib/localflavor/es/forms.py
|
309
|
# -*- coding: utf-8 -*-
"""
Spanish-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
import re
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|8|9)\d{8}$',
max_length=None, min_length=None, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHKLMNPQS'
self.nie_types = 'XT'
id_card_re = re.compile(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), re.IGNORECASE)
super(ESIdentityCardNumberField, self).__init__(id_card_re, max_length=None, min_length=None,
error_message=self.default_error_messages['invalid%s' % (self.only_nif and '_only_nif' or '')],
*args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
nif_get_checksum = lambda d: self.nif_control[int(d)%23]
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
get_checksum = lambda d: str(11 - sum([int(digit) * int(control) for digit, control in zip(d, control_str)]) % 11).replace('10', '1').replace('11', '0')
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
from es_regions import REGION_CHOICES
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
from es_provinces import PROVINCE_CHOICES
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)]) for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
|
mrlinux777/vkbottsone
|
refs/heads/master
|
plugins/site_screenshot.py
|
1
|
import aiohttp
from plugin_system import Plugin
plugin = Plugin("Скриншот сайта",
usage=["скрин [адрес сайта] - сделать скриншот сайта [адрес сайта]"])
# Желательно первой командой указывать основную (она будет в списке команд)
@plugin.on_command('скрин')
async def screen(msg, args):
if not args:
return msg.answer('Вы не указали сайт!')
async with aiohttp.ClientSession() as sess:
async with sess.get("http://mini.s-shot.ru/1024x768/1024/png/?" + args.pop()) as resp:
result = await msg.vk.upload_photo(await resp.read())
return await msg.answer('Держи', attachment=str(result))
|
YueHao/PyEPIC
|
refs/heads/master
|
epicIO.py
|
1
|
import argparse
import ply.lex as lex
import ply.yacc as yacc
import copy
import re
import numpy
class EpicLexer(object):
literals = "=,+-*/:&()"
reserved = {'beam1':'KWB1','beam2':'KWB2','end':'KWEND', 'global':'KWGLOBAL'}
tokens = ['NUMBER' , 'STRING', 'RET']+list(reserved.values())
def t_NUMBER(self, t):
#r'([+-]?[0-9]*\.?[0-9]+|[0-9]+\.[0-9]*)(([eE][-+]?[0-9]+)?)'
r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)'
t.value = float(t.value)
return t
t_ignore = ' \t'
def t_STRING(self, t):
r'[A-Za-z][A-Za-z0-9_\.]*'
t.type = EpicLexer.reserved.get(t.value, 'STRING')
return t
def t_QUOTE(self, t):
r'\"\''
pass
def t_COMMENT(self, t):
r'\#.*'
pass
def t_RET(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
t.value = 'RET'
return t
def t_EOF (self, t):
r'<<EOF>>'
t.value = 'EOF'
return t
def t_error(self, t):
print("Illegal character {}".format(t.value[0]));
t.lexer.skip(1)
def __init__(self,**kwargs):
self.lexer = lex.lex(module=self, reflags=re.IGNORECASE, **kwargs)
self.continuetor = 0
def lexit(self ,data):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok: break
#print tok
class EpicParser(object):
#starting = "statement"
tokens=EpicLexer.tokens
def __init__(self, **kwargs):
self.which_beam = 0
self.temp_dict = {}
self.nlist = []
self.parser = yacc.yacc(module=self, debug=0, **kwargs)
def p_statment(self, p):
'''statment : '&' KWB1 RET
| '&' KWB2 RET
| '&' KWEND RET
| '&' KWGLOBAL RET
| RET
| STRING '=' numlist RET
| STRING '=' STRING RET
'''
if len(p) == 4:
if p[2] == 'beam1':
self.which_beam = 1
elif p[2] == 'beam2':
self.which_beam = 2
elif p[2] == 'global':
self.which_beam = -1
else:
self.which_beam = 0
if len(p) == 5:
if type(p[3]) is str:
self.temp_dict[p[1]] = p[3]
else:
if len(self.nlist) > 1:
self.temp_dict[p[1]] = numpy.array(copy.deepcopy(self.nlist))
else:
self.temp_dict[p[1]] = self.nlist[0]
self.nlist = []
def p_numlist(self, p):
'''numlist : numbers
| numlist ',' numbers'''
if len(p) == 2:
self.nlist = [p[1],]
if len(p) == 4:
self.nlist.append(p[3])
def p_numbers(self, p):
'''numbers : NUMBER
| NUMBER '+' NUMBER
| NUMBER '-' NUMBER
| NUMBER '*' NUMBER
| NUMBER '/' NUMBER'''
if len(p) == 2:
p[0] = p[1]
if len(p) == 4:
if p[2] == '+':
p[0] = p[1] + p[3]
elif p[2] == '-':
p[0] = p[1] - p[3]
elif p[2] == '*':
p[0] = p[1] * p[3]
elif p[2] == '/':
p[0] = p[1] / p[3]
def p_error(self, p):
raise Exception(p)
def parse_file(file_stream):
beam1_dict = {}
beam2_dict = {}
global_dict = {}
which_beam = 0
for line in file_stream:
m = EpicLexer()
m.lexit(line)
p = EpicParser()
p.parser.parse(line,lexer=m.lexer)
if p.which_beam is not 0:
which_beam = p.which_beam
if which_beam == 0:
print("ERROR in the file, no beam is specified")
exit(-1)
if which_beam == 1:
beam1_dict.update(p.temp_dict)
elif which_beam == 2:
beam2_dict.update(p.temp_dict)
else:
global_dict.update(p.temp_dict)
return global_dict,beam1_dict,beam2_dict
if __name__ == '__main__':
parse_file('testinput_example.in')
else:
epic_parser = argparse.ArgumentParser()
epic_parser.add_argument('-i', '--input_file', help="input file of beam parameter")
epic_parser.add_argument('-sw', '--strong_weak', help='treat beam 1 strong', action='store_true', default=True)
|
michaelgallacher/intellij-community
|
refs/heads/master
|
python/testData/refactoring/changeSignature/fixGoogleDocStringRemoveMultiple.before.py
|
45
|
def f(a, b, c, d):
"""
Parameters:
a : foo
b : bar
c : baz
d : quux
"""
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/requests/packages/urllib3/util/retry.py
|
155
|
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/response retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
Surbias/surbias.github.io
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_stan_builtins.py
|
292
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer.
:copyright: Copyright 2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = ['else', 'for', 'if', 'in', 'lower', 'lp__', 'print', 'upper', 'while']
TYPES = [ 'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector']
FUNCTIONS = [ 'Phi',
'Phi_approx',
'abs',
'acos',
'acosh',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_cdf',
'bernoulli_log',
'bernoulli_logit_log',
'bernoulli_rng',
'beta_binomial_cdf',
'beta_binomial_log',
'beta_binomial_rng',
'beta_cdf',
'beta_log',
'beta_rng',
'binary_log_loss',
'binomial_cdf',
'binomial_coefficient_log',
'binomial_log',
'binomial_logit_log',
'binomial_rng',
'block',
'categorical_log',
'categorical_rng',
'cauchy_cdf',
'cauchy_log',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_log',
'chi_square_rng',
'cholesky_decompose',
'col',
'cols',
'cos',
'cosh',
'crossprod',
'cumulative_sum',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'dims',
'dirichlet_log',
'dirichlet_rng',
'dot_product',
'dot_self',
'double_exponential_log',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'epsilon',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_cdf',
'exp_mod_normal_log',
'exp_mod_normal_rng',
'expm1',
'exponential_cdf',
'exponential_log',
'exponential_rng',
'fabs',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'gamma_log',
'gamma_rng',
'gumbel_cdf',
'gumbel_log',
'gumbel_rng',
'hypergeometric_log',
'hypergeometric_rng',
'hypot',
'if_else',
'int_step',
'inv_chi_square_cdf',
'inv_chi_square_log',
'inv_chi_square_rng',
'inv_cloglog',
'inv_gamma_cdf',
'inv_gamma_log',
'inv_gamma_rng',
'inv_logit',
'inv_wishart_log',
'inv_wishart_rng',
'inverse',
'lbeta',
'lgamma',
'lkj_corr_cholesky_log',
'lkj_corr_cholesky_rng',
'lkj_corr_log',
'lkj_corr_rng',
'lkj_cov_log',
'lmgamma',
'log',
'log10',
'log1m',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_inv_logit',
'log_sum_exp',
'logistic_cdf',
'logistic_log',
'logistic_rng',
'logit',
'lognormal_cdf',
'lognormal_log',
'lognormal_rng',
'max',
'mdivide_left_tri_low',
'mdivide_right_tri_low',
'mean',
'min',
'multi_normal_cholesky_log',
'multi_normal_log',
'multi_normal_prec_log',
'multi_normal_rng',
'multi_student_t_log',
'multi_student_t_rng',
'multinomial_cdf',
'multinomial_log',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_cdf',
'neg_binomial_log',
'neg_binomial_rng',
'negative_epsilon',
'negative_infinity',
'normal_cdf',
'normal_log',
'normal_rng',
'not_a_number',
'ordered_logistic_log',
'ordered_logistic_rng',
'owens_t',
'pareto_cdf',
'pareto_log',
'pareto_rng',
'pi',
'poisson_cdf',
'poisson_log',
'poisson_log_log',
'poisson_rng',
'positive_infinity',
'pow',
'prod',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'round',
'row',
'rows',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_log',
'scaled_inv_chi_square_rng',
'sd',
'sin',
'singular_values',
'sinh',
'size',
'skew_normal_cdf',
'skew_normal_log',
'skew_normal_rng',
'softmax',
'sqrt',
'sqrt2',
'square',
'step',
'student_t_cdf',
'student_t_log',
'student_t_rng',
'sum',
'tan',
'tanh',
'tcrossprod',
'tgamma',
'trace',
'trunc',
'uniform_log',
'uniform_rng',
'variance',
'weibull_cdf',
'weibull_log',
'weibull_rng',
'wishart_log',
'wishart_rng']
DISTRIBUTIONS = [ 'bernoulli',
'bernoulli_logit',
'beta',
'beta_binomial',
'binomial',
'binomial_coefficient',
'binomial_logit',
'categorical',
'cauchy',
'chi_square',
'dirichlet',
'double_exponential',
'exp_mod_normal',
'exponential',
'gamma',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'lkj_cov',
'logistic',
'lognormal',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'multiply',
'neg_binomial',
'normal',
'ordered_logistic',
'pareto',
'poisson',
'poisson_log',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
'weibull',
'wishart']
RESERVED = [ 'alignas',
'alignof',
'and',
'and_eq',
'asm',
'auto',
'bitand',
'bitor',
'bool',
'break',
'case',
'catch',
'char',
'char16_t',
'char32_t',
'class',
'compl',
'const',
'const_cast',
'constexpr',
'continue',
'decltype',
'default',
'delete',
'do',
'double',
'dynamic_cast',
'enum',
'explicit',
'export',
'extern',
'false',
'false',
'float',
'friend',
'goto',
'inline',
'int',
'long',
'mutable',
'namespace',
'new',
'noexcept',
'not',
'not_eq',
'nullptr',
'operator',
'or',
'or_eq',
'private',
'protected',
'public',
'register',
'reinterpret_cast',
'repeat',
'return',
'short',
'signed',
'sizeof',
'static',
'static_assert',
'static_cast',
'struct',
'switch',
'template',
'then',
'this',
'thread_local',
'throw',
'true',
'true',
'try',
'typedef',
'typeid',
'typename',
'union',
'unsigned',
'until',
'using',
'virtual',
'void',
'volatile',
'wchar_t',
'xor',
'xor_eq']
|
gurch101/rosalind
|
refs/heads/master
|
iev.py
|
1
|
AAAA, AAAa, AAaa, AaAa, Aaaa, aaaa = map(int, raw_input().split())
print (
2 * AAAA +
2 * AAAa +
2 * AAaa +
2 * 0.75 * AaAa +
Aaaa
)
|
neilisaac/luigi
|
refs/heads/master
|
examples/elasticsearch_index.py
|
14
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import luigi
from luigi.contrib.esindex import CopyToIndex
class FakeDocuments(luigi.Task):
"""
Generates a local file containing 5 elements of data in JSON format.
"""
#: the date parameter.
date = luigi.DateParameter(default=datetime.date.today())
def run(self):
"""
Writes data in JSON format into the task's output target.
The data objects have the following attributes:
* `_id` is the default Elasticsearch id field,
* `text`: the text,
* `date`: the day when the data was created.
"""
today = datetime.date.today()
with self.output().open('w') as output:
for i in range(5):
output.write(json.dumps({'_id': i, 'text': 'Hi %s' % i,
'date': str(today)}))
output.write('\n')
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(path='/tmp/_docs-%s.ldj' % self.date)
class IndexDocuments(CopyToIndex):
"""
This task loads JSON data contained in a :py:class:`luigi.target.Target` into an ElasticSearch index.
This task's input will the target returned by :py:meth:`~.FakeDocuments.output`.
This class uses :py:meth:`luigi.contrib.esindex.CopyToIndex.run`.
After running this task you can run:
.. code-block:: console
$ curl "localhost:9200/example_index/_search?pretty"
to see the indexed documents.
To see the update log, run
.. code-block:: console
$ curl "localhost:9200/update_log/_search?q=target_index:example_index&pretty"
To cleanup both indexes run:
.. code-block:: console
$ curl -XDELETE "localhost:9200/example_index"
$ curl -XDELETE "localhost:9200/update_log/_query?q=target_index:example_index"
"""
#: date task parameter (default = today)
date = luigi.DateParameter(default=datetime.date.today())
#: the name of the index in ElasticSearch to be updated.
index = 'example_index'
#: the name of the document type.
doc_type = 'greetings'
#: the host running the ElasticSearch service.
host = 'localhost'
#: the port used by the ElasticSearch service.
port = 9200
def requires(self):
"""
This task's dependencies:
* :py:class:`~.FakeDocuments`
:return: object (:py:class:`luigi.task.Task`)
"""
return FakeDocuments()
if __name__ == "__main__":
luigi.run(['--task', 'IndexDocuments'], use_optparse=True)
|
deshipu/micropython
|
refs/heads/master
|
tests/import/import_pkg8.py
|
46
|
# import with no __init__.py files
import pkg8.mod
|
SG345/autopep8
|
refs/heads/master
|
test/suite/W19.py
|
12
|
#: W191
if False:
print # indented with 1 tab
#:
#: W191
y = x == 2 \
or x == 3
#: E101 W191
if (
x == (
3
) or
y == 4):
pass
#: E101 W191
if x == 2 \
or y > 1 \
or x == 3:
pass
#: E101 W191
if x == 2 \
or y > 1 \
or x == 3:
pass
#:
#: E101 W191
if (foo == bar and
baz == frop):
pass
#: E101 W191
if (
foo == bar and
baz == frop
):
pass
#:
#: E101 E101 W191 W191
if start[1] > end_col and not (
over_indent == 4 and indent_next):
return(0, "E121 continuation line over-"
"indented for visual indent")
#:
#: E101 W191
def long_function_name(
var_one, var_two, var_three,
var_four):
print(var_one)
#: E101 W191
if ((row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col)):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E101 E101 E101 E101 W191 W191 W191 W191 W191 W191
if bar:
return(
start, 'E121 lines starting with a '
'closing bracket should be indented '
"to match that of the opening "
"bracket's line"
)
#
#: E101 W191
# you want vertical alignment, so use a parens
if ((foo.bar("baz") and
foo.bar("frop")
)):
print "yes"
#: E101 W191
# also ok, but starting to look like LISP
if ((foo.bar("baz") and
foo.bar("frop"))):
print "yes"
#: E101 W191
if (a == 2 or
b == "abc def ghi"
"jkl mno"):
return True
#: E101 W191
if (a == 2 or
b == """abc def ghi
jkl mno"""):
return True
#: E101 W191
if length > options.max_line_length:
return options.max_line_length, \
"E501 line too long (%d characters)" % length
#
#: E101 W191 W191
if os.path.exists(os.path.join(path, PEP8_BIN)):
cmd = ([os.path.join(path, PEP8_BIN)] +
self._pep8_options(targetfile))
#: W191
'''
multiline string with tab in it'''
#: E101 W191
'''multiline string
with tabs
and spaces
'''
#: Okay
'''sometimes, you just need to go nuts in a multiline string
and allow all sorts of crap
like mixed tabs and spaces
or trailing whitespace
or long long long long long long long long long long long long long long long long long lines
''' # nopep8
#: Okay
'''this one
will get no warning
even though the noqa comment is not immediately after the string
''' + foo # noqa
#
#: E101 W191
if foo is None and bar is "frop" and \
blah == 'yeah':
blah = 'yeahnah'
#
#: W191 W191 W191
if True:
foo(
1,
2)
#: W191 W191 W191 W191 W191
def test_keys(self):
"""areas.json - All regions are accounted for."""
expected = set([
u'Norrbotten',
u'V\xe4sterbotten',
])
#: W191
x = [
'abc'
]
#:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.