repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
richardliaw/ray
|
refs/heads/master
|
python/ray/internal/internal_api.py
|
2
|
import ray.worker
from ray import profiling
__all__ = ["free", "global_gc"]
def global_gc():
"""Trigger gc.collect() on all workers in the cluster."""
worker = ray.worker.global_worker
worker.core_worker.global_gc()
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
return reply.memory_summary
def free(object_refs, local_only=False, delete_creating_tasks=False):
"""Free a list of IDs from the in-process and plasma object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to the object store. If
some of the objects are in use, the object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_refs (List[ObjectRef]): List of object refs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
delete_creating_tasks (bool): Whether also delete the object creating
tasks.
"""
worker = ray.worker.global_worker
if isinstance(object_refs, ray.ObjectRef):
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise TypeError("free() expects a list of ObjectRef, got {}".format(
type(object_refs)))
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"Attempting to call `free` on the value {}, "
"which is not an ray.ObjectRef.".format(object_ref))
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_refs) == 0:
return
worker.core_worker.free_objects(object_refs, local_only,
delete_creating_tasks)
|
henry808/euler
|
refs/heads/master
|
035/test_035.py
|
1
|
from eul035 import are_rotations_primes
def test_are_rotations_primes():
# fails on 5 by design
inputs_true = [3, 7, 11, 13, 17, 31, 37, 71, 73, 79, 97]
inputs_false = [4, 6, 8, 9, 10, 19, 23, 29]
for i in inputs_true:
assert(are_rotations_primes(i) is True)
for i in inputs_false:
assert(are_rotations_primes(i) is False)
|
hckiang/DBLite
|
refs/heads/master
|
boost_1_54_0/libs/python/test/args.py
|
46
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from args_ext import *
>>> raw(3, 4, foo = 'bar', baz = 42)
((3, 4), {'foo': 'bar', 'baz': 42})
Prove that we can handle empty keywords and non-keywords
>>> raw(3, 4)
((3, 4), {})
>>> raw(foo = 'bar')
((), {'foo': 'bar'})
>>> f(x= 1, y = 3, z = 'hello')
(1, 3.0, 'hello')
>>> f(z = 'hello', x = 3, y = 2.5)
(3, 2.5, 'hello')
>>> f(1, z = 'hi', y = 3)
(1, 3.0, 'hi')
>>> try: f(1, 2, 'hello', bar = 'baz')
... except TypeError: pass
... else: print 'expected an exception: unknown keyword'
Exercise the functions using default stubs
>>> f1(z = 'nix', y = .125, x = 2)
(2, 0.125, 'nix')
>>> f1(y = .125, x = 2)
(2, 0.125, 'wow')
>>> f1(x = 2)
(2, 4.25, 'wow')
>>> f1()
(1, 4.25, 'wow')
>>> f2(z = 'nix', y = .125, x = 2)
(2, 0.125, 'nix')
>>> f2(y = .125, x = 2)
(2, 0.125, 'wow')
>>> f2(x = 2)
(2, 4.25, 'wow')
>>> f2()
(1, 4.25, 'wow')
>>> f3(z = 'nix', y = .125, x = 2)
(2, 0.125, 'nix')
>>> f3(y = .125, x = 2)
(2, 0.125, 'wow')
>>> f3(x = 2)
(2, 4.25, 'wow')
>>> f3()
(1, 4.25, 'wow')
Member function tests
>>> q = X()
>>> q.f(x= 1, y = 3, z = 'hello')
(1, 3.0, 'hello')
>>> q.f(z = 'hello', x = 3, y = 2.5)
(3, 2.5, 'hello')
>>> q.f(1, z = 'hi', y = 3)
(1, 3.0, 'hi')
>>> try: q.f(1, 2, 'hello', bar = 'baz')
... except TypeError: pass
... else: print 'expected an exception: unknown keyword'
Exercise member functions using default stubs
>>> q.f1(z = 'nix', y = .125, x = 2)
(2, 0.125, 'nix')
>>> q.f1(y = .125, x = 2)
(2, 0.125, 'wow')
>>> q.f1(x = 2)
(2, 4.25, 'wow')
>>> q.f1()
(1, 4.25, 'wow')
>>> q.f2.__doc__.splitlines()[1]
'f2( (X)self [, (int)x [, (float)y [, (str)z]]]) -> tuple :'
>>> q.f2.__doc__.splitlines()[2]
" f2's docstring"
>>> X.f.__doc__.splitlines()[1:5]
['f( (X)self, (int)x, (float)y, (str)z) -> tuple :', " This is X.f's docstring", '', ' C++ signature :']
>>> xfuncs = (X.inner0, X.inner1, X.inner2, X.inner3, X.inner4, X.inner5)
>>> for f in xfuncs:
... print f(q,1).value(),
... print f(q, n = 1).value(),
... print f(q, n = 0).value(),
... print f.__doc__.splitlines()[1:5]
1 1 0 ['inner0( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
1 1 0 ['inner1( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
1 1 0 ['inner2( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
1 1 0 ['inner3( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
1 1 0 ['inner4( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
1 1 0 ['inner5( (X)self, (bool)n) -> Y :', ' docstring', '', ' C++ signature :']
>>> x = X(a1 = 44, a0 = 22)
>>> x.inner0(0).value()
22
>>> x.inner0(1).value()
44
>>> x = X(a0 = 7)
>>> x.inner0(0).value()
7
>>> x.inner0(1).value()
1
>>> inner(n = 1, self = q).value()
1
>>> y = Y(value = 33)
>>> y.raw(this = 1, that = 'the other')[1]
{'this': 1, 'that': 'the other'}
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
import args_ext
help(args_ext)
sys.exit(status)
|
mpeters/ansible
|
refs/heads/devel
|
v2/ansible/playbook/vars.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
YoungKwonJo/mlxtend
|
refs/heads/master
|
tests/tests_regression/test_linear_regression.py
|
1
|
from mlxtend.regression import LinearRegression
from mlxtend.data import iris_data
import numpy as np
from numpy.testing import assert_almost_equal
def test_univariate_normal_equation():
X = np.array([ 1, 2, 3, 4, 5])[:, np.newaxis]
y = np.array([ 1, 2, 3, 4, 5])
ne_lr = LinearRegression(solver='normal_equation')
ne_lr.fit(X, y)
assert_almost_equal(ne_lr.w_, np.array([0.00, 1.00]), decimal=7)
def test_univariate_gradient_descent():
X = np.array([ 1, 2, 3, 4, 5])[:, np.newaxis]
y = np.array([ 1, 2, 3, 4, 5])
X_std = (X - np.mean(X)) / X.std()
y_std = (y - np.mean(y)) / y.std()
gd_lr = LinearRegression(solver='gd', eta=0.1, epochs=50, random_seed=0)
gd_lr.fit(X_std, y_std)
assert_almost_equal(gd_lr.w_, np.array([0.00, 1.00]), decimal=2)
def test_univariate_stochastic_gradient_descent():
X = np.array([ 1, 2, 3, 4, 5])[:, np.newaxis]
y = np.array([ 1, 2, 3, 4, 5])
X_std = (X - np.mean(X)) / X.std()
y_std = (y - np.mean(y)) / y.std()
sgd_lr = LinearRegression(solver='sgd', eta=0.1, epochs=10, random_seed=0)
sgd_lr.fit(X_std, y_std)
assert_almost_equal(sgd_lr.w_, np.array([0.00, 1.00]), decimal=2)
def test_multivariate_normal_equation():
X = np.array([[1, 2], [2, 3], [4, 5], [6, 7], [7, 8]])
y = np.array([ 1, 2, 3, 4, 5])
X_std = (X - np.mean(X)) / X.std()
y_std = (y - np.mean(y)) / y.std()
ne_lr = LinearRegression(solver='normal_equation')
ne_lr.fit(X_std, y_std)
assert_almost_equal(ne_lr.predict(X_std), np.array(
[ -1.3054, -0.87029, 0.0000, 0.8703, 1.3054]),
decimal=4)
def test_multivariate_gradient_descent():
X = np.array([[1, 2], [2, 3], [4, 5], [6, 7], [7, 8]])
y = np.array([ 1, 2, 3, 4, 5])
X_std = (X - np.mean(X)) / X.std()
y_std = (y - np.mean(y)) / y.std()
gd_lr = LinearRegression(solver='gd', eta=0.01, epochs=100, random_seed=0)
gd_lr.fit(X_std, y_std)
assert_almost_equal(gd_lr.predict(X_std), np.array(
[ -1.3054, -0.87029, 0.0000, 0.8703, 1.3054]),
decimal=2)
def test_multivariate_stochastic_gradient_descent():
X = np.array([[1, 2], [2, 3], [4, 5], [6, 7], [7, 8]])
y = np.array([ 1, 2, 3, 4, 5])
X_std = (X - np.mean(X)) / X.std()
y_std = (y - np.mean(y)) / y.std()
sgd_lr = LinearRegression(solver='sgd', eta=0.001, epochs=1000, random_seed=0)
sgd_lr.fit(X_std, y_std)
assert_almost_equal(sgd_lr.predict(X_std), np.array(
[ -1.3054, -0.87029, 0.0000, 0.8703, 1.3054]),
decimal=2)
|
illfelder/compute-image-packages
|
refs/heads/master
|
packages/python-google-compute-engine/google_compute_engine/instance_setup/instance_config.py
|
6
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library used to set up the instance's default configurations file.
Note that the configurations in
/etc/default/instance_configs.cfg.template override the values set in
/etc/default/instance_configs.cfg. The system instance_configs.cfg may be
overridden during package upgrade.
"""
import logging
import os
from google_compute_engine import config_manager
from google_compute_engine import constants
from google_compute_engine.compat import parser
from google_compute_engine.compat import stringio
class InstanceConfig(config_manager.ConfigManager):
"""Creates a defaults config file for instance configuration."""
instance_config = constants.SYSCONFDIR + '/instance_configs.cfg'
instance_config_distro = '%s.distro' % instance_config
instance_config_template = '%s.template' % instance_config
instance_config_script = os.path.abspath(__file__)
instance_config_header = (
'This file is automatically created at boot time by the %s script. Do '
'not edit this file directly. If you need to add items to this file, '
'create or edit %s instead and then run '
'/usr/bin/google_instance_setup.')
instance_config_options = {
'Accounts': {
'deprovision_remove': 'false',
'groups': 'adm,dip,docker,lxd,plugdev,video',
# The encrypted password is set to '*' for SSH on Linux systems
# without PAM.
#
# SSH uses '!' as its locked account token:
# https://github.com/openssh/openssh-portable/blob/master/configure.ac
#
# When the token is specified, SSH denies login:
# https://github.com/openssh/openssh-portable/blob/master/auth.c
#
# To solve the issue, make the password '*' which is also recognized
# as locked but does not prevent SSH login.
'gpasswd_add_cmd': 'gpasswd -a {user} {group}',
'gpasswd_remove_cmd': 'gpasswd -d {user} {group}',
'groupadd_cmd': 'groupadd {group}',
'useradd_cmd': 'useradd -m -s /bin/bash -p * {user}',
'userdel_cmd': 'userdel -r {user}',
'usermod_cmd': 'usermod -G {groups} {user}',
},
'Daemons': {
'accounts_daemon': 'true',
'clock_skew_daemon': 'true',
'ip_forwarding_daemon': 'true', # Deprecated.
'network_daemon': 'true',
},
'Instance': {
'instance_id': '0',
},
'InstanceSetup': {
'host_key_types': 'ecdsa,ed25519,rsa',
'optimize_local_ssd': 'true',
'network_enabled': 'true',
# WARNING: Do not change the value of 'set_boto_config' without first
# consulting the gsutil team (GoogleCloudPlatform/gsutil).
'set_boto_config': 'true',
'set_host_keys': 'true',
'set_multiqueue': 'true',
},
'IpForwarding': {
'ethernet_proto_id': '66',
'ip_aliases': 'true',
'target_instance_ips': 'true',
},
'MetadataScripts': {
'run_dir': '',
'startup': 'true',
'shutdown': 'true',
'default_shell': '/bin/bash',
},
'NetworkInterfaces': {
'setup': 'true',
'ip_forwarding': 'true',
'dhcp_command': '',
'dhclient_script': '/sbin/google-dhclient-script',
},
}
def __init__(self, logger=logging, instance_config_metadata=None):
"""Constructor.
Inherit from the ConfigManager class. Read the template for instance
defaults and write new sections and options. This prevents package
updates from overriding user set defaults.
Args:
logger: logger object, used to write to SysLog and serial port.
instance_config_metadata: string, a config file specified in metadata.
"""
self.logger = logger
self.instance_config_metadata = instance_config_metadata
self.instance_config_header %= (
self.instance_config_script, self.instance_config_template)
# User provided instance configs should always take precedence.
super(InstanceConfig, self).__init__(
config_file=self.instance_config_template,
config_header=self.instance_config_header)
# Use the instance config settings from metadata if specified. Then use
# settings in an instance config file if one exists. If a config
# file does not already exist, try to use the distro provided defaults. If
# no file exists, use the default configuration settings.
config_files = [self.instance_config, self.instance_config_distro]
config_defaults = []
if self.instance_config_metadata:
config = parser.Parser()
try:
config.read_file(stringio.StringIO(self.instance_config_metadata))
except parser.Error as e:
self.logger.error('Error parsing metadata configs: %s', str(e))
else:
config_defaults.append(
dict((s, dict(config.items(s))) for s in config.sections()))
for config_file in config_files:
if os.path.exists(config_file):
config = parser.Parser()
try:
config.read(config_file)
except parser.Error as e:
self.logger.error('Error parsing config file: %s', str(e))
else:
config_defaults.append(
dict((s, dict(config.items(s))) for s in config.sections()))
config_defaults.append(self.instance_config_options)
for defaults in config_defaults:
for section, options in sorted(defaults.items()):
for option, value in sorted(options.items()):
super(InstanceConfig, self).SetOption(
section, option, value, overwrite=False)
def WriteConfig(self):
"""Write the config values to the instance defaults file."""
super(InstanceConfig, self).WriteConfig(config_file=self.instance_config)
|
ssh-odoo/scrapy
|
refs/heads/master
|
scrapy/core/spidermw.py
|
80
|
"""
Spider Middleware manager
See documentation in docs/topics/spider-middleware.rst
"""
import six
from twisted.python.failure import Failure
from scrapy.middleware import MiddlewareManager
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.conf import build_component_list
def _isiterable(possible_iterator):
return hasattr(possible_iterator, '__iter__')
class SpiderMiddlewareManager(MiddlewareManager):
component_name = 'spider middleware'
@classmethod
def _get_mwlist_from_settings(cls, settings):
return build_component_list(settings['SPIDER_MIDDLEWARES_BASE'], \
settings['SPIDER_MIDDLEWARES'])
def _add_middleware(self, mw):
super(SpiderMiddlewareManager, self)._add_middleware(mw)
if hasattr(mw, 'process_spider_input'):
self.methods['process_spider_input'].append(mw.process_spider_input)
if hasattr(mw, 'process_spider_output'):
self.methods['process_spider_output'].insert(0, mw.process_spider_output)
if hasattr(mw, 'process_spider_exception'):
self.methods['process_spider_exception'].insert(0, mw.process_spider_exception)
if hasattr(mw, 'process_start_requests'):
self.methods['process_start_requests'].insert(0, mw.process_start_requests)
def scrape_response(self, scrape_func, response, request, spider):
fname = lambda f:'%s.%s' % (
six.get_method_self(f).__class__.__name__,
six.get_method_function(f).__name__)
def process_spider_input(response):
for method in self.methods['process_spider_input']:
try:
result = method(response=response, spider=spider)
assert result is None, \
'Middleware %s must returns None or ' \
'raise an exception, got %s ' \
% (fname(method), type(result))
except:
return scrape_func(Failure(), request, spider)
return scrape_func(response, request, spider)
def process_spider_exception(_failure):
exception = _failure.value
for method in self.methods['process_spider_exception']:
result = method(response=response, exception=exception, spider=spider)
assert result is None or _isiterable(result), \
'Middleware %s must returns None, or an iterable object, got %s ' % \
(fname(method), type(result))
if result is not None:
return result
return _failure
def process_spider_output(result):
for method in self.methods['process_spider_output']:
result = method(response=response, result=result, spider=spider)
assert _isiterable(result), \
'Middleware %s must returns an iterable object, got %s ' % \
(fname(method), type(result))
return result
dfd = mustbe_deferred(process_spider_input, response)
dfd.addErrback(process_spider_exception)
dfd.addCallback(process_spider_output)
return dfd
def process_start_requests(self, start_requests, spider):
return self._process_chain('process_start_requests', start_requests, spider)
|
jbreitbart/fast-lib
|
refs/heads/master
|
vendor/yaml-cpp/test/gmock-1.7.0/gtest/test/gtest_xml_output_unittest.py
|
1815
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
recursion-cn/recursion
|
refs/heads/master
|
bin/activate_this.py
|
1076
|
"""By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
|
jeremiahsavage/cwltool
|
refs/heads/master
|
cwltool/workflow.py
|
1
|
from . import job
from . import draft2tool
from .utils import aslist
from .process import Process, get_feature, empty_subtree, shortname, uniquename
from .errors import WorkflowException
import copy
import logging
import random
import os
from collections import namedtuple
import functools
import schema_salad.validate as validate
import urlparse
import tempfile
import shutil
import json
import schema_salad
from . import expression
from .load_tool import load_tool
from typing import Any, Callable, cast, Generator, Iterable, List, Text, Union
_logger = logging.getLogger("cwltool")
WorkflowStateItem = namedtuple('WorkflowStateItem', ['parameter', 'value'])
def defaultMakeTool(toolpath_object, **kwargs):
# type: (Dict[Text, Any], **Any) -> Process
if not isinstance(toolpath_object, dict):
raise WorkflowException(u"Not a dict: `%s`" % toolpath_object)
if "class" in toolpath_object:
if toolpath_object["class"] == "CommandLineTool":
return draft2tool.CommandLineTool(toolpath_object, **kwargs)
elif toolpath_object["class"] == "ExpressionTool":
return draft2tool.ExpressionTool(toolpath_object, **kwargs)
elif toolpath_object["class"] == "Workflow":
return Workflow(toolpath_object, **kwargs)
raise WorkflowException(u"Missing or invalid 'class' field in %s, expecting one of: CommandLineTool, ExpressionTool, Workflow" % toolpath_object["id"])
def findfiles(wo, fn=None): # type: (Any, List) -> List[Dict[Text, Any]]
if fn is None:
fn = []
if isinstance(wo, dict):
if wo.get("class") == "File":
fn.append(wo)
findfiles(wo.get("secondaryFiles", None), fn)
else:
for w in wo.values():
findfiles(w, fn)
elif isinstance(wo, list):
for w in wo:
findfiles(w, fn)
return fn
def match_types(sinktype, src, iid, inputobj, linkMerge, valueFrom):
# type: (Union[List[Text],Text], WorkflowStateItem, Text, Dict[Text, Any], Text, Text) -> bool
if isinstance(sinktype, list):
# Sink is union type
for st in sinktype:
if match_types(st, src, iid, inputobj, linkMerge, valueFrom):
return True
elif isinstance(src.parameter["type"], list):
# Source is union type
# Check that every source type is compatible with the sink.
for st in src.parameter["type"]:
srccopy = copy.deepcopy(src)
srccopy.parameter["type"] = st
if not match_types(st, srccopy, iid, inputobj, linkMerge, valueFrom):
return False
return True
elif linkMerge:
if iid not in inputobj:
inputobj[iid] = []
if linkMerge == "merge_nested":
inputobj[iid].append(src.value)
elif linkMerge == "merge_flattened":
if isinstance(src.value, list):
inputobj[iid].extend(src.value)
else:
inputobj[iid].append(src.value)
else:
raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
return True
elif valueFrom is not None or can_assign_src_to_sink(src.parameter["type"], sinktype) or sinktype == "Any":
# simply assign the value from state to input
inputobj[iid] = copy.deepcopy(src.value)
return True
return False
def can_assign_src_to_sink(src, sink): # type: (Any, Any) -> bool
"""Check for identical type specifications, ignoring extra keys like inputBinding.
"""
if sink == "Any":
return True
if isinstance(src, dict) and isinstance(sink, dict):
if src["type"] == "array" and sink["type"] == "array":
return can_assign_src_to_sink(src["items"], sink["items"])
elif src["type"] == "record" and sink["type"] == "record":
return _compare_records(src, sink)
elif isinstance(src, list):
for t in src:
if can_assign_src_to_sink(t, sink):
return True
elif isinstance(sink, list):
for t in sink:
if can_assign_src_to_sink(src, t):
return True
else:
return src == sink
return False
def _compare_records(src, sink):
# type: (Dict[Text, Any], Dict[Text, Any]) -> bool
"""Compare two records, ensuring they have compatible fields.
This handles normalizing record names, which will be relative to workflow
step, so that they can be compared.
"""
def _rec_fields(rec): # type: (Dict[Text, Any]) -> Dict[Text, Any]
out = {}
for field in rec["fields"]:
name = shortname(field["name"])
out[name] = field["type"]
return out
srcfields = _rec_fields(src)
sinkfields = _rec_fields(sink)
for key in sinkfields.iterkeys():
if (not can_assign_src_to_sink(
srcfields.get(key, "null"), sinkfields.get(key, "null"))
and sinkfields.get(key) is not None):
_logger.info("Record comparison failure for %s and %s\n"
"Did not match fields for %s: %s and %s" %
(src["name"], sink["name"], key, srcfields.get(key),
sinkfields.get(key)))
return False
return True
def object_from_state(state, parms, frag_only, supportsMultipleInput, sourceField):
# type: (Dict[Text, WorkflowStateItem], List[Dict[Text, Any]], bool, bool, Text) -> Dict[Text, Any]
inputobj = {} # type: Dict[Text, Any]
for inp in parms:
iid = inp["id"]
if frag_only:
iid = shortname(iid)
if sourceField in inp:
if (isinstance(inp[sourceField], list) and not
supportsMultipleInput):
raise WorkflowException(
"Workflow contains multiple inbound links to a single "
"parameter but MultipleInputFeatureRequirement is not "
"declared.")
connections = aslist(inp[sourceField])
for src in connections:
if src in state and state[src] is not None:
if not match_types(
inp["type"], state[src], iid, inputobj,
inp.get("linkMerge", ("merge_nested"
if len(connections) > 1 else None)),
valueFrom=inp.get("valueFrom")):
raise WorkflowException(
u"Type mismatch between source '%s' (%s) and "
"sink '%s' (%s)" % (src,
state[src].parameter["type"], inp["id"],
inp["type"]))
elif src not in state:
raise WorkflowException(
u"Connect source '%s' on parameter '%s' does not "
"exist" % (src, inp["id"]))
else:
return None
elif "default" in inp:
inputobj[iid] = inp["default"]
elif "valueFrom" in inp:
inputobj[iid] = None
else:
raise WorkflowException(u"Value for %s not specified" % (inp["id"]))
return inputobj
class WorkflowJobStep(object):
def __init__(self, step): # type: (Any) -> None
self.step = step
self.tool = step.tool
self.id = step.id
self.submitted = False
self.completed = False
self.iterable = None # type: Iterable
self.name = uniquename(u"step %s" % shortname(self.id))
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Text], functools.partial[None], **Any) -> Generator
kwargs["part_of"] = self.name
kwargs["name"] = shortname(self.id)
for j in self.step.job(joborder, output_callback, **kwargs):
yield j
class WorkflowJob(object):
def __init__(self, workflow, **kwargs):
# type: (Workflow, **Any) -> None
self.workflow = workflow
self.tool = workflow.tool
self.steps = [WorkflowJobStep(s) for s in workflow.steps]
self.state = None # type: Dict[Text, WorkflowStateItem]
self.processStatus = None # type: Text
if "outdir" in kwargs:
self.outdir = kwargs["outdir"]
elif "tmp_outdir_prefix" in kwargs:
self.outdir = tempfile.mkdtemp(prefix=kwargs["tmp_outdir_prefix"])
else:
# tmp_outdir_prefix defaults to tmp, so this is unlikely to be used
self.outdir = tempfile.mkdtemp()
self.name = uniquename(u"workflow %s" % kwargs.get("name", shortname(self.workflow.tool.get("id", "embedded"))))
_logger.debug(u"[%s] initialized from %s", self.name, self.tool.get("id", "workflow embedded in %s" % kwargs.get("part_of")))
def receive_output(self, step, outputparms, jobout, processStatus):
# type: (WorkflowJobStep, List[Dict[Text,Text]], Dict[Text,Text], Text) -> None
for i in outputparms:
if "id" in i:
if i["id"] in jobout:
self.state[i["id"]] = WorkflowStateItem(i, jobout[i["id"]])
else:
_logger.error(u"Output is missing expected field %s" % i["id"])
processStatus = "permanentFail"
_logger.debug(u"[%s] produced output %s", step.name, json.dumps(jobout, indent=4))
if processStatus != "success":
if self.processStatus != "permanentFail":
self.processStatus = processStatus
_logger.warn(u"[%s] completion status is %s", step.name, processStatus)
else:
_logger.info(u"[%s] completion status is %s", step.name, processStatus)
step.completed = True
def try_make_job(self, step, **kwargs):
# type: (WorkflowJobStep, **Any) -> Generator
inputparms = step.tool["inputs"]
outputparms = step.tool["outputs"]
supportsMultipleInput = bool(self.workflow.get_requirement(
"MultipleInputFeatureRequirement")[0])
try:
inputobj = object_from_state(
self.state, inputparms, False, supportsMultipleInput, "source")
if inputobj is None:
_logger.debug(u"[%s] job step %s not ready", self.name, step.id)
return
if step.submitted:
return
_logger.debug(u"[%s] starting %s", self.name, step.name)
callback = functools.partial(self.receive_output, step, outputparms)
valueFrom = {
i["id"]: i["valueFrom"] for i in step.tool["inputs"]
if "valueFrom" in i}
if len(valueFrom) > 0 and not bool(self.workflow.get_requirement("StepInputExpressionRequirement")[0]):
raise WorkflowException("Workflow step contains valueFrom but StepInputExpressionRequirement not in requirements")
vfinputs = {shortname(k): v for k,v in inputobj.iteritems()}
def postScatterEval(io):
# type: (Dict[Text, Any]) -> Dict[Text, Any]
shortio = {shortname(k): v for k,v in io.iteritems()}
def valueFromFunc(k, v): # type: (Any, Any) -> Any
if k in valueFrom:
return expression.do_eval(
valueFrom[k], shortio, self.workflow.requirements,
None, None, {}, context=v)
else:
return v
return {k: valueFromFunc(k, v) for k,v in io.items()}
if "scatter" in step.tool:
scatter = aslist(step.tool["scatter"])
method = step.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
kwargs["postScatterEval"] = postScatterEval
if method == "dotproduct" or method is None:
jobs = dotproduct_scatter(step, inputobj, scatter,
cast( # known bug with mypy
# https://github.com/python/mypy/issues/797
Callable[[Any], Any],callback), **kwargs)
elif method == "nested_crossproduct":
jobs = nested_crossproduct_scatter(step, inputobj,
scatter, cast(Callable[[Any], Any], callback),
# known bug in mypy
# https://github.com/python/mypy/issues/797
**kwargs)
elif method == "flat_crossproduct":
jobs = cast(Generator,
flat_crossproduct_scatter(step, inputobj,
scatter,
cast(Callable[[Any], Any],
# known bug in mypy
# https://github.com/python/mypy/issues/797
callback), 0, **kwargs))
else:
_logger.debug(u"[job %s] job input %s", step.name, json.dumps(inputobj, indent=4))
inputobj = postScatterEval(inputobj)
_logger.debug(u"[job %s] evaluated job input to %s", step.name, json.dumps(inputobj, indent=4))
jobs = step.job(inputobj, callback, **kwargs)
step.submitted = True
for j in jobs:
yield j
except WorkflowException:
raise
except Exception:
_logger.exception("Unhandled exception")
self.processStatus = "permanentFail"
step.completed = True
def run(self, **kwargs):
_logger.debug(u"[%s] workflow starting", self.name)
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Any], Callable[[Any, Any], Any], **Any) -> Generator
self.state = {}
self.processStatus = "success"
if "outdir" in kwargs:
del kwargs["outdir"]
for i in self.tool["inputs"]:
iid = shortname(i["id"])
if iid in joborder:
self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(joborder[iid]))
elif "default" in i:
self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(i["default"]))
else:
raise WorkflowException(u"Input '%s' not in input object and does not have a default value." % (i["id"]))
for s in self.steps:
for out in s.tool["outputs"]:
self.state[out["id"]] = None
completed = 0
while completed < len(self.steps) and self.processStatus == "success":
made_progress = False
for step in self.steps:
if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
break
if not step.submitted:
step.iterable = self.try_make_job(step, **kwargs)
if step.iterable:
for newjob in step.iterable:
if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
break
if newjob:
made_progress = True
yield newjob
else:
break
completed = sum(1 for s in self.steps if s.completed)
if not made_progress and completed < len(self.steps):
yield None
supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput, "outputSource")
if wo is None:
raise WorkflowException("Output for workflow not available")
_logger.info(u"[%s] outdir is %s", self.name, self.outdir)
output_callback(wo, self.processStatus)
class Workflow(Process):
def __init__(self, toolpath_object, **kwargs):
# type: (Dict[Text, Any], **Any) -> None
super(Workflow, self).__init__(toolpath_object, **kwargs)
kwargs["requirements"] = self.requirements
kwargs["hints"] = self.hints
makeTool = kwargs.get("makeTool")
self.steps = [WorkflowStep(step, n, **kwargs) for n,step in enumerate(self.tool.get("steps", []))]
random.shuffle(self.steps)
# TODO: statically validate data links instead of doing it at runtime.
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator
builder = self._init_job(joborder, **kwargs)
wj = WorkflowJob(self, **kwargs)
yield wj
kwargs["part_of"] = u"workflow %s" % wj.name
for w in wj.job(builder.job, output_callback, **kwargs):
yield w
def visit(self, op):
op(self.tool)
for s in self.steps:
s.visit(op)
class WorkflowStep(Process):
def __init__(self, toolpath_object, pos, **kwargs):
# type: (Dict[Text, Any], int, **Any) -> None
if "id" in toolpath_object:
self.id = toolpath_object["id"]
else:
self.id = "#step" + Text(pos)
kwargs["requirements"] = kwargs.get("requirements", []) + toolpath_object.get("requirements", [])
kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get("hints", [])
try:
if isinstance(toolpath_object["run"], dict):
self.embedded_tool = kwargs.get("makeTool")(toolpath_object["run"], **kwargs)
else:
self.embedded_tool = load_tool(
toolpath_object["run"], kwargs.get("makeTool"), kwargs,
enable_dev=kwargs.get("enable_dev"),
strict=kwargs.get("strict"))
except validate.ValidationException as v:
raise WorkflowException(
u"Tool definition %s failed validation:\n%s" %
(toolpath_object["run"], validate.indent(str(v))))
self.tool = toolpath_object = copy.deepcopy(toolpath_object)
for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
toolpath_object[toolfield] = []
for step_entry in toolpath_object[stepfield]:
if isinstance(step_entry, (str, unicode)):
param = {} # type: Dict[Text, Any]
inputid = step_entry
else:
param = step_entry.copy()
inputid = step_entry["id"]
shortinputid = shortname(inputid)
found = False
for tool_entry in self.embedded_tool.tool[toolfield]:
frag = shortname(tool_entry["id"])
if frag == shortinputid:
param.update(tool_entry)
found = True
break
if not found:
if stepfield == "in":
param["type"] = "Any"
else:
raise WorkflowException("[%s] Workflow step output '%s' not found in the outputs of the tool (expected one of '%s')" % (
self.id, shortname(step_entry), "', '".join([shortname(tool_entry["id"]) for tool_entry in self.embedded_tool.tool[toolfield]])))
param["id"] = inputid
toolpath_object[toolfield].append(param)
super(WorkflowStep, self).__init__(toolpath_object, **kwargs)
if self.embedded_tool.tool["class"] == "Workflow":
(feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
if not feature:
raise WorkflowException("Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements")
if "scatter" in self.tool:
(feature, _) = self.get_requirement("ScatterFeatureRequirement")
if not feature:
raise WorkflowException("Workflow contains scatter but ScatterFeatureRequirement not in requirements")
inputparms = copy.deepcopy(self.tool["inputs"])
outputparms = copy.deepcopy(self.tool["outputs"])
scatter = aslist(self.tool["scatter"])
method = self.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
inp_map = {i["id"]: i for i in inputparms}
for s in scatter:
if s not in inp_map:
raise WorkflowException(u"Scatter parameter '%s' does not correspond to an input parameter of this step, inputs are %s" % (s, inp_map.keys()))
inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}
if self.tool.get("scatterMethod") == "nested_crossproduct":
nesting = len(scatter)
else:
nesting = 1
for r in xrange(0, nesting):
for i in outputparms:
i["type"] = {"type": "array", "items": i["type"]}
self.tool["inputs"] = inputparms
self.tool["outputs"] = outputparms
def receive_output(self, output_callback, jobout, processStatus):
# type: (Callable[...,Any], Dict[Text, Text], Text) -> None
#_logger.debug("WorkflowStep output from run is %s", jobout)
output = {}
for i in self.tool["outputs"]:
field = shortname(i["id"])
if field in jobout:
output[i["id"]] = jobout[field]
else:
processStatus = "permanentFail"
output_callback(output, processStatus)
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Any], Callable[...,Any], **Any) -> Generator
for i in self.tool["inputs"]:
p = i["id"]
field = shortname(p)
joborder[field] = joborder[i["id"]]
del joborder[i["id"]]
try:
for t in self.embedded_tool.job(joborder,
functools.partial(self.receive_output, output_callback),
**kwargs):
yield t
except WorkflowException:
_logger.error(u"Exception on step '%s'", kwargs.get("name"))
raise
except Exception as e:
_logger.exception("Unexpected exception")
raise WorkflowException(Text(e))
def visit(self, op):
self.embedded_tool.visit(op)
class ReceiveScatterOutput(object):
def __init__(self, output_callback, dest):
# type: (Callable[..., Any], Dict[Text,List[Text]]) -> None
self.dest = dest
self.completed = 0
self.processStatus = u"success"
self.total = None # type: int
self.output_callback = output_callback
def receive_scatter_output(self, index, jobout, processStatus):
# type: (int, Dict[Text, Text], Text) -> None
for k,v in jobout.items():
self.dest[k][index] = v
if processStatus != "success":
if self.processStatus != "permanentFail":
self.processStatus = processStatus
self.completed += 1
if self.completed == self.total:
self.output_callback(self.dest, self.processStatus)
def setTotal(self, total): # type: (int) -> None
self.total = total
if self.completed == self.total:
self.output_callback(self.dest, self.processStatus)
def parallel_steps(steps, rc, kwargs): # type: (List[Generator], ReceiveScatterOutput, Dict[str, Any]) -> Generator
while rc.completed < rc.total:
made_progress = False
for step in steps:
if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
break
for j in step:
if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
break
if j:
made_progress = True
yield j
else:
break
if not made_progress and rc.completed < rc.total:
yield None
def dotproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
l = None
for s in scatter_keys:
if l is None:
l = len(joborder[s])
elif l != len(joborder[s]):
raise WorkflowException("Length of input arrays must be equal when performing dotproduct scatter.")
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * l
rc = ReceiveScatterOutput(output_callback, output)
steps = []
for n in range(0, l):
jo = copy.copy(joborder)
for s in scatter_keys:
jo[s] = joborder[s][n]
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
rc.setTotal(l)
return parallel_steps(steps, rc, kwargs)
def nested_crossproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
scatter_key = scatter_keys[0]
l = len(joborder[scatter_key])
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * l
rc = ReceiveScatterOutput(output_callback, output)
steps = []
for n in range(0, l):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
else:
steps.append(nested_crossproduct_scatter(process, jo,
scatter_keys[1:], cast( # known bug with mypy
# https://github.com/python/mypy/issues/797g
Callable[[Any], Any],
functools.partial(rc.receive_scatter_output, n)), **kwargs))
rc.setTotal(l)
return parallel_steps(steps, rc, kwargs)
def crossproduct_size(joborder, scatter_keys):
# type: (Dict[Text, Any], List[Text]) -> int
scatter_key = scatter_keys[0]
if len(scatter_keys) == 1:
sum = len(joborder[scatter_key])
else:
sum = 0
for n in range(0, len(joborder[scatter_key])):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
sum += crossproduct_size(joborder, scatter_keys[1:])
return sum
def flat_crossproduct_scatter(process, joborder, scatter_keys, output_callback, startindex, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Union[ReceiveScatterOutput,Callable[..., Any]], int, **Any) -> Union[List[Generator], Generator]
scatter_key = scatter_keys[0]
l = len(joborder[scatter_key])
rc = None # type: ReceiveScatterOutput
if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * crossproduct_size(joborder, scatter_keys)
rc = ReceiveScatterOutput(output_callback, output)
elif isinstance(output_callback, ReceiveScatterOutput):
rc = output_callback
else:
raise Exception("Unhandled code path. Please report this.")
steps = []
put = startindex
for n in range(0, l):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, put), **kwargs))
put += 1
else:
add = flat_crossproduct_scatter(process, jo, scatter_keys[1:], rc, put, **kwargs)
put += len(cast(List[Generator], add))
steps.extend(add)
if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
rc.setTotal(put)
return parallel_steps(steps, rc, kwargs)
else:
return steps
|
hogarthj/ansible
|
refs/heads/devel
|
test/units/modules/remote_management/oneview/test_oneview_fcoe_network_facts.py
|
77
|
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.compat.tests import unittest
from oneview_module_loader import FcoeNetworkFactsModule
from hpe_test_utils import FactsParamsTestCase
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test FCoE Networks"
)
PRESENT_NETWORKS = [{
"name": "Test FCoE Networks",
"uri": "/rest/fcoe-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
}]
class FcoeNetworkFactsSpec(unittest.TestCase,
FactsParamsTestCase
):
def setUp(self):
self.configure_mocks(self, FcoeNetworkFactsModule)
self.fcoe_networks = self.mock_ov_client.fcoe_networks
FactsParamsTestCase.configure_client_mock(self, self.fcoe_networks)
def test_should_get_all_fcoe_network(self):
self.fcoe_networks.get_all.return_value = PRESENT_NETWORKS
self.mock_ansible_module.params = PARAMS_GET_ALL
FcoeNetworkFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fcoe_networks=PRESENT_NETWORKS)
)
def test_should_get_fcoe_network_by_name(self):
self.fcoe_networks.get_by.return_value = PRESENT_NETWORKS
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
FcoeNetworkFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fcoe_networks=PRESENT_NETWORKS)
)
if __name__ == '__main__':
unittest.main()
|
ptrendx/mxnet
|
refs/heads/master
|
python/mxnet/initializer.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Weight initializer."""
from __future__ import absolute_import, print_function
import re
import logging
import warnings
import json
from math import sqrt
import numpy as np
from .base import string_types
from .ndarray import NDArray, load
from . import random
from . import registry
from . import ndarray
# inherit str for backward compatibility
class InitDesc(str):
"""
Descriptor for the initialization pattern.
Parameters
----------
name : str
Name of variable.
attrs : dict of str to str
Attributes of this variable taken from ``Symbol.attr_dict``.
global_init : Initializer
Global initializer to fallback to.
"""
def __new__(cls, name, attrs=None, global_init=None):
ret = super(InitDesc, cls).__new__(cls, name)
ret.attrs = attrs or {}
ret.global_init = global_init
return ret
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self._kwargs = kwargs
self._verbose = False
self._print_func = None
def set_verbosity(self, verbose=False, print_func=None):
"""Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute value str((abs(x)/size(x)).asscalar()).
"""
self._verbose = verbose
if print_func is None:
def asum_stat(x):
"""returns |x|/size(x), async execution."""
return str((ndarray.norm(x)/sqrt(x.size)).asscalar())
print_func = asum_stat
self._print_func = print_func
return self
def _verbose_print(self, desc, init, arr):
"""Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
"""
if self._verbose and self._print_func:
logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
def dumps(self):
"""Saves the initializer to string
Returns
-------
str
JSON formatted string that describes the initializer.
Examples
--------
>>> # Create initializer and retrieve its parameters
...
>>> init = mx.init.Normal(0.5)
>>> init.dumps()
'["normal", {"sigma": 0.5}]'
>>> init = mx.init.Xavier(factor_type="in", magnitude=2.34)
>>> init.dumps()
'["xavier", {"rnd_type": "uniform", "magnitude": 2.34, "factor_type": "in"}]'
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : InitDesc
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if not isinstance(desc, InitDesc):
self._legacy_init(desc, arr)
return
if desc.global_init is None:
desc.global_init = self
init = desc.attrs.get('__init__', "")
if init:
# when calling Variable initializer
create(init)._init_weight(desc, arr)
self._verbose_print(desc, init, arr)
else:
# register nnvm::FSetInputVariableAttrs in the backend for new patterns
# don't add new cases here.
if desc.endswith('weight'):
self._init_weight(desc, arr)
self._verbose_print(desc, 'weight', arr)
elif desc.endswith('bias'):
self._init_bias(desc, arr)
self._verbose_print(desc, 'bias', arr)
elif desc.endswith('gamma'):
self._init_gamma(desc, arr)
self._verbose_print(desc, 'gamma', arr)
elif desc.endswith('beta'):
self._init_beta(desc, arr)
self._verbose_print(desc, 'beta', arr)
elif desc.endswith('min'):
self._init_zero(desc, arr)
self._verbose_print(desc, 'min', arr)
elif desc.endswith('max'):
self._init_one(desc, arr)
self._verbose_print(desc, 'max', arr)
else:
self._init_default(desc, arr)
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
elif name.endswith('min'):
self._init_zero(name, arr)
elif name.endswith('max'):
self._init_one(name, arr)
else:
self._init_default(name, arr)
def _init_bilinear(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
def _init_loc_bias(self, _, arr):
shape = arr.shape
assert(shape[0] == 6)
arr[:] = np.array([1.0, 0, 0, 0, 1.0, 0])
def _init_zero(self, _, arr):
arr[:] = 0.0
def _init_one(self, _, arr):
arr[:] = 1.0
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
'Unknown initialization pattern for %s. ' \
'Default initialization is now limited to '\
'"weight", "bias", "gamma" (1.0), and "beta" (0.0).' \
'Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern' % name)
# pylint: disable=invalid-name
_register = registry.get_register_func(Initializer, 'initializer')
alias = registry.get_alias_func(Initializer, 'initializer')
create = registry.get_create_func(Initializer, 'initializer')
# pylint: enable=invalid-name
def register(klass):
"""Registers a custom initializer.
Custom initializers can be created by extending `mx.init.Initializer` and implementing the
required functions like `_init_weight` and `_init_bias`. The created initializer must be
registered using `mx.init.register` before it can be called by name.
Parameters
----------
klass : class
A subclass of `mx.init.Initializer` that needs to be registered as a custom initializer.
Example
-------
>>> # Create and register a custom initializer that
... # initializes weights to 0.1 and biases to 1.
...
>>> @mx.init.register
... @alias('myinit')
... class CustomInit(mx.init.Initializer):
... def __init__(self):
... super(CustomInit, self).__init__()
... def _init_weight(self, _, arr):
... arr[:] = 0.1
... def _init_bias(self, _, arr):
... arr[:] = 1
...
>>> # Module is an instance of 'mxnet.module.Module'
...
>>> module.init_params("custominit")
>>> # module.init_params("myinit")
>>> # module.init_params(CustomInit())
"""
return _register(klass)
class Load(object):
"""Initializes variables by loading data from file or dict.
**Note** Load will drop ``arg:`` or ``aux:`` from name and
initialize the variables that match with the prefix dropped.
Parameters
----------
param: str or dict of str->`NDArray`
Parameter file or dict mapping name to NDArray.
default_init: Initializer
Default initializer when name is not found in `param`.
verbose: bool
Flag for enabling logging of source when initializing.
"""
def __init__(self, param, default_init=None, verbose=False):
if isinstance(param, str):
param = load(param)
assert isinstance(param, dict)
self.param = {}
for name, arr in param.items():
if name.startswith('arg:') or name.startswith('aux:'):
self.param[name[4:]] = arr
else:
self.param[name] = arr
self.default_init = default_init
self.verbose = verbose
def __call__(self, name, arr):
if name in self.param:
assert arr.shape == self.param[name].shape, \
'Parameter %s cannot be initialized from loading. '%name + \
'Shape mismatch, target %s vs loaded %s'%(str(arr.shape),
self.param[name].shape)
arr[:] = self.param[name]
if self.verbose:
logging.info('Initialized %s by loading', name)
else:
assert self.default_init is not None, \
"Cannot Initialize %s. Not found in loaded param "%name + \
"and no default Initializer is provided."
self.default_init(name, arr)
if self.verbose:
logging.info('Initialized %s by default', name)
class Mixed(object):
"""Initialize parameters using multiple initializers.
Parameters
----------
patterns: list of str
List of regular expressions matching parameter names.
initializers: list of Initializer
List of initializers corresponding to `patterns`.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize biases to zero
... # and every other parameter to random values with uniform distribution.
...
>>> init = mx.initializer.Mixed(['bias', '.*'], [mx.init.Zero(), mx.init.Uniform(0.1)])
>>> module.init_params(init)
>>>
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected1_weight
[[ 0.0097627 0.01856892 0.04303787]]
fullyconnected1_bias
[ 0.]
"""
def __init__(self, patterns, initializers):
assert len(patterns) == len(initializers)
self.map = list(zip([re.compile(p) for p in patterns], initializers))
def __call__(self, name, arr):
for prog, init in self.map:
if prog.match(name):
init(name, arr)
return
raise ValueError('Parameter name %s did not match any pattern. Consider' +
'add a ".*" pattern at the and with default Initializer.')
@register
@alias("zeros")
class Zero(Initializer):
"""Initializes weights to zero.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to zero.
...
>>> init = mx.initializer.Zero()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0. 0. 0.]]
"""
def __init__(self):
super(Zero, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 0
@register
@alias("ones")
class One(Initializer):
"""Initializes weights to one.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to one.
...
>>> init = mx.initializer.One()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 1. 1. 1.]]
"""
def __init__(self):
super(One, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 1
@register
class Constant(Initializer):
"""Initializes the weights to a given value.
The value passed in can be a scalar or a NDarray that matches the shape
of the parameter to be set.
Parameters
----------
value : float, NDArray
Value to set.
"""
def __init__(self, value):
super(Constant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = self.value
@register
class Uniform(Initializer):
"""Initializes weights with random values uniformly sampled from a given range.
Parameters
----------
scale : float, optional
The bound on the range of the generated random values.
Values are generated from the range [-`scale`, `scale`].
Default scale is 0.07.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values uniformly sampled between -0.1 and 0.1.
...
>>> init = mx.init.Uniform(0.1)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0.01360891 -0.02144304 0.08511933]]
"""
def __init__(self, scale=0.07):
super(Uniform, self).__init__(scale=scale)
self.scale = scale
def _init_weight(self, _, arr):
random.uniform(-self.scale, self.scale, out=arr)
@register
class Normal(Initializer):
"""Initializes weights with random values sampled from a normal distribution
with a mean of zero and standard deviation of `sigma`.
Parameters
----------
sigma : float, optional
Standard deviation of the normal distribution.
Default standard deviation is 0.01.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values sampled from a normal distribution.
...
>>> init = mx.init.Normal(0.5)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[-0.3214761 -0.12660924 0.53789419]]
"""
def __init__(self, sigma=0.01):
super(Normal, self).__init__(sigma=sigma)
self.sigma = sigma
def _init_weight(self, _, arr):
random.normal(0, self.sigma, out=arr)
@register
class Orthogonal(Initializer):
"""Initialize weight as orthogonal matrix.
This initializer implements *Exact solutions to the nonlinear dynamics of
learning in deep linear neural networks*, available at
https://arxiv.org/abs/1312.6120.
Parameters
----------
scale : float optional
Scaling factor of weight.
rand_type: string optional
Use "uniform" or "normal" random number to initialize weight.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super(Orthogonal, self).__init__(scale=scale, rand_type=rand_type)
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, _, arr):
nout = arr.shape[0]
nin = np.prod(arr.shape[1:])
if self.rand_type == "uniform":
tmp = random.uniform(-1.0, 1.0, shape=(nout, nin)).asnumpy()
elif self.rand_type == "normal":
tmp = random.normal(0.0, 1.0, shape=(nout, nin)).asnumpy()
u, _, v = np.linalg.svd(tmp, full_matrices=False) # pylint: disable=invalid-name
if u.shape == tmp.shape:
res = u
else:
res = v
res = self.scale * res.reshape(arr.shape)
arr[:] = res
@register
class Xavier(Initializer):
"""Returns an initializer performing "Xavier" initialization for weights.
This initializer is designed to keep the scale of gradients roughly the same
in all layers.
By default, `rnd_type` is ``'uniform'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with random numbers in the range
of :math:`[-c, c]`, where :math:`c = \\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
:math:`n_{in}` is the number of neurons feeding into weights, and :math:`n_{out}` is
the number of neurons the result is fed to.
If `rnd_type` is ``'uniform'`` and `factor_type` is ``'in'``,
the :math:`c = \\sqrt{\\frac{3.}{n_{in}}}`.
Similarly when `factor_type` is ``'out'``, the :math:`c = \\sqrt{\\frac{3.}{n_{out}}}`.
If `rnd_type` is ``'gaussian'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with numbers from normal distribution with
a standard deviation of :math:`\\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(rnd_type=rnd_type, factor_type=factor_type,
magnitude=magnitude)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, name, arr):
shape = arr.shape
hw_scale = 1.
if len(shape) < 2:
raise ValueError('Xavier initializer cannot be applied to vector {0}. It requires at'
' least 2D.'.format(name))
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
random.uniform(-scale, scale, out=arr)
elif self.rnd_type == "gaussian":
random.normal(0, scale, out=arr)
else:
raise ValueError("Unknown random type")
@register
class MSRAPrelu(Xavier):
"""Initialize the weight according to a MSRA paper.
This initializer implements *Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification*, available at
https://arxiv.org/abs/1502.01852.
This initializer is proposed for initialization related to ReLu activation,
it maked some changes on top of Xavier method.
Parameters
----------
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
slope: float, optional
initial slope of any PReLU (or similar) nonlinearities.
"""
def __init__(self, factor_type="avg", slope=0.25):
magnitude = 2. / (1 + slope ** 2)
super(MSRAPrelu, self).__init__("gaussian", factor_type, magnitude)
self._kwargs = {'factor_type': factor_type, 'slope': slope}
@register
class Bilinear(Initializer):
"""Initialize weight for upsampling layers."""
def __init__(self):
super(Bilinear, self).__init__()
def _init_weight(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
@register
class LSTMBias(Initializer):
"""Initialize all biases of an LSTMCell to 0.0 except for
the forget gate whose bias is set to custom value.
Parameters
----------
forget_bias: float, default 1.0
bias for the forget gate. Jozefowicz et al. 2015 recommends
setting this to 1.0.
"""
def __init__(self, forget_bias=1.0):
super(LSTMBias, self).__init__(forget_bias=forget_bias)
self.forget_bias = forget_bias
def _init_weight(self, name, arr):
arr[:] = 0.0
# in the case of LSTMCell the forget gate is the second
# gate of the 4 LSTM gates, we modify the according values.
num_hidden = int(arr.shape[0] / 4)
arr[num_hidden:2*num_hidden] = self.forget_bias
@register
class FusedRNN(Initializer):
"""Initialize parameters for fused rnn layers.
Parameters
----------
init : Initializer
initializer applied to unpacked weights. Fall back to global
initializer if None.
num_hidden : int
should be the same with arguments passed to FusedRNNCell.
num_layers : int
should be the same with arguments passed to FusedRNNCell.
mode : str
should be the same with arguments passed to FusedRNNCell.
bidirectional : bool
should be the same with arguments passed to FusedRNNCell.
forget_bias : float
should be the same with arguments passed to FusedRNNCell.
"""
def __init__(self, init, num_hidden, num_layers, mode, bidirectional=False, forget_bias=1.0):
if isinstance(init, string_types):
klass, kwargs = json.loads(init)
init = registry._REGISTRY[klass.lower()](**kwargs)
super(FusedRNN, self).__init__(init=init.dumps() if init is not None else None,
num_hidden=num_hidden, num_layers=num_layers, mode=mode,
bidirectional=bidirectional, forget_bias=forget_bias)
self._init = init
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._forget_bias = forget_bias
def _init_weight(self, desc, arr): # pylint: disable=arguments-differ
from .rnn import rnn_cell
cell = rnn_cell.FusedRNNCell(self._num_hidden, self._num_layers,
self._mode, self._bidirectional,
forget_bias=self._forget_bias, prefix='')
args = cell.unpack_weights({'parameters': arr})
for name in args:
arg_desc = InitDesc(name, global_init=desc.global_init)
# for lstm bias, we use a custom initializer
# which adds a bias to the forget gate
if self._mode == 'lstm' and name.endswith("_f_bias"):
args[name][:] = self._forget_bias
elif self._init is None:
desc.global_init(arg_desc, args[name])
else:
self._init(arg_desc, args[name])
arr[:] = cell.pack_weights(args)['parameters']
|
go-bears/nupic
|
refs/heads/master
|
src/nupic/frameworks/opf/common_models/cluster_params.py
|
15
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import numpy as np
import os
from pkg_resources import resource_stream
def getScalarMetricWithTimeOfDayAnomalyParams(metricData,
minVal=None,
maxVal=None,
minResolution=None):
"""
Return a dict that can be used to create an anomaly model via OPF's
ModelFactory.
:param metricData: numpy array of metric data. Used to calculate minVal
and maxVal if either is unspecified
:param minVal: minimum value of metric. Used to set up encoders. If None
will be derived from metricData.
:param maxVal: maximum value of metric. Used to set up input encoders. If
None will be derived from metricData
:param minResolution: minimum resolution of metric. Used to set up
encoders. If None, will use default value of 0.001.
:returns: a dict containing "modelConfig" and "inferenceArgs" top-level
properties. The value of the "modelConfig" property is for passing to
the OPF `ModelFactory.create()` method as the `modelConfig` parameter. The
"inferenceArgs" property is for passing to the resulting model's
`enableInference()` method as the inferenceArgs parameter. NOTE: the
timestamp field corresponds to input "c0"; the predicted field corresponds
to input "c1".
:rtype: dict
Example:
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
params = getScalarMetricWithTimeOfDayAnomalyParams(
metricData=[0],
minVal=0.0,
maxVal=100.0)
model = ModelFactory.create(modelConfig=params["modelConfig"])
model.enableLearning()
model.enableInference(params["inferenceArgs"])
"""
# Default values
if minResolution is None:
minResolution = 0.001
# Compute min and/or max from the data if not specified
if minVal is None or maxVal is None:
compMinVal, compMaxVal = _rangeGen(metricData)
if minVal is None:
minVal = compMinVal
if maxVal is None:
maxVal = compMaxVal
# Handle the corner case where the incoming min and max are the same
if minVal == maxVal:
maxVal = minVal + 1
# Load model parameters and update encoder params
paramFileRelativePath = os.path.join(
"anomaly_params_random_encoder",
"best_single_metric_anomaly_params.json")
with resource_stream(__name__, paramFileRelativePath) as infile:
paramSet = json.load(infile)
_fixupRandomEncoderParams(paramSet, minVal, maxVal, minResolution)
return paramSet
def _rangeGen(data, std=1):
"""
Return reasonable min/max values to use given the data.
"""
dataStd = np.std(data)
if dataStd == 0:
dataStd = 1
minval = np.min(data) - std * dataStd
maxval = np.max(data) + std * dataStd
return minval, maxval
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution):
"""
Given model params, figure out the correct parameters for the
RandomDistributed encoder. Modifies params in place.
"""
encodersDict = (
params["modelConfig"]["modelParams"]["sensorParams"]["encoders"]
)
for encoder in encodersDict.itervalues():
if encoder is not None:
if encoder["type"] == "RandomDistributedScalarEncoder":
resolution = max(minResolution,
(maxVal - minVal) / encoder.pop("numBuckets")
)
encodersDict["c1"]["resolution"] = resolution
|
williamHuang5468/StockServer
|
refs/heads/master
|
stockserver/views.py
|
1
|
from flask import jsonify, render_template, request, Response, session
from stockserver import app
import uuid
import hashlib
import sqlcommand as sql
# register done
# login
# logout
@app.route("/")
def index():
user = {'nickname': 'Guest'}
return render_template('index.html', title="Home", user=user)
# repeat user not yet process
@app.route('/register', methods=['GET'])
def register():
data = {
'status' : 'register',
}
resp = jsonify(data)
resp.status_code = 200
return resp
@app.route('/api/register', methods=['POST'])
def api_register():
username = request.form['username']
password = request.form['password']
salt = create_salt()
hash_password = hash(password, salt)
sql.create_user(username, hash_password, salt)
message = {
'username':username,
'password':hash_password,
'status': True
}
return jsonify(message)
# Check is login
@app.route('/api/login', methods=['POST'])
def api_login():
username = request.form['username']
hash_password, salt = sql.filter(username)
password = request.form['password']
if check(hash_password, password, salt):
status = True
session['username'] = username
else:
status = False
message = {
'session':session['username'],
'login status':status
}
return jsonify(message)
@app.route('/login', methods=['GET'])
def login():
data = {
'status' : 'login',
}
resp = jsonify(data)
resp.status_code = 200
return resp
@app.route('/logout/<string:username>', methods=['GET'])
def logout(username):
message = {
'username':username,
'status' :'logout'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.route('/api/logout', methods=['POST'])
def api_logout():
session.pop('username', None)
message = {
'session':'pop username',
'status' :'logout'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
def create_salt():
return uuid.uuid4().hex
def hash(word, salt):
return (hashlib.md5((word + salt).encode()).hexdigest())
def check(hash_password, user_password, salt):
return hash_password == hash(user_password, salt)
# test
def test_hash():
salt = "c33d6b6f45864925bb86cbba04a308b9"
hash_password = "6569880915e9ec7cbebf80b6fba4bbeb"
assert check(hash_password, "123", salt)
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
|
mrquim/repository.mrquim
|
refs/heads/master
|
script.module.livestreamer/lib/livestreamer/stream/rtmpdump.py
|
37
|
import re
from time import sleep
from .streamprocess import StreamProcess
from ..compat import str
from ..exceptions import StreamError
from ..packages import pbs as sh
from ..utils import rtmpparse
class RTMPStream(StreamProcess):
"""RTMP stream using rtmpdump.
*Attributes:*
- :attr:`params` A :class:`dict` containing parameters passed to rtmpdump
"""
__shortname__ = "rtmp"
def __init__(self, session, params, redirect=False):
StreamProcess.__init__(self, session, params)
self.cmd = self.session.options.get("rtmp-rtmpdump")
self.timeout = self.session.options.get("rtmp-timeout")
self.redirect = redirect
self.logger = session.logger.new_module("stream.rtmp")
def __repr__(self):
return ("<RTMPStream({0!r}, redirect={1!r}>").format(self.params,
self.redirect)
def __json__(self):
return dict(type=RTMPStream.shortname(), params=self.params)
def open(self):
if self.session.options.get("rtmp-proxy"):
if not self._supports_param("socks"):
raise StreamError("Installed rtmpdump does not support --socks argument")
self.params["socks"] = self.session.options.get("rtmp-proxy")
if "jtv" in self.params and not self._supports_param("jtv"):
raise StreamError("Installed rtmpdump does not support --jtv argument")
if "weeb" in self.params and not self._supports_param("weeb"):
raise StreamError("Installed rtmpdump does not support --weeb argument")
if self.redirect:
self._check_redirect()
self.params["flv"] = "-"
return StreamProcess.open(self)
def _check_redirect(self, timeout=20):
cmd = self._check_cmd()
params = self.params.copy()
params["verbose"] = True
params["_bg"] = True
self.logger.debug("Attempting to find tcURL redirect")
stream = cmd(**params)
elapsed = 0
process_alive = True
while elapsed < timeout and process_alive:
stream.process.poll()
process_alive = stream.process.returncode is None
sleep(0.25)
elapsed += 0.25
if process_alive:
try:
stream.process.kill()
except Exception:
pass
stream.process.wait()
try:
stderr = stream.stderr()
except sh.ErrorReturnCode as err:
self._update_redirect(err.stderr)
def _update_redirect(self, stderr):
tcurl, redirect = None, None
stderr = str(stderr, "utf8")
m = re.search("DEBUG: Property: <Name:\s+redirect,\s+STRING:\s+(\w+://.+?)>", stderr)
if m:
redirect = m.group(1)
if redirect:
self.logger.debug("Found redirect tcUrl: {0}", redirect)
if "rtmp" in self.params:
tcurl, playpath = rtmpparse(self.params["rtmp"])
rtmp = "{redirect}/{playpath}".format(**locals())
self.params["rtmp"] = rtmp
if "tcUrl" in self.params:
self.params["tcUrl"] = redirect
def _supports_param(self, param):
cmd = self._check_cmd()
try:
help = cmd(help=True, _err_to_out=True)
except sh.ErrorReturnCode as err:
err = str(err.stdout, "ascii")
raise StreamError("Error while checking rtmpdump compatibility: {0}".format(err))
for line in help.splitlines():
m = re.match("^--(\w+)", line)
if not m:
continue
if m.group(1) == param:
return True
return False
@classmethod
def is_usable(cls, session):
cmd = session.options.get("rtmp-rtmpdump")
return StreamProcess.is_usable(cmd)
|
hurrinico/stock-logistics-workflow
|
refs/heads/8.0
|
__unported__/stock_filter_none_zero_qty/product.py
|
23
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2010-2012, OPENTIA Group (<http://opentia.com>)
# The word "OPENTIA" is an European Community Trademark property
# of the Opentia Group
#
# @author: Opentia "Happy Hacking" Team
# @e-mail: consultoria@opentia·es
#
# This module is an improvement of the module "zero_stock_filter"
# This is base on this module and improved by Julius Network Solutions
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
from osv import fields, orm
from tools.translate import _
import decimal_precision as dp
class product_product(orm.Model):
_inherit = "product.product"
def _product_available(self, cr, uid, ids, field_names=None, arg=False,
context=None):
return super(product_product, self)._product_available(
cr, uid, ids,
field_names=field_names,
arg=arg,
context=context)
def _qty_available_search(self, cr, uid, obj, name, args, context=None):
ops = ['>', ]
prod_ids = ()
if not len(args):
return []
prod_ids = []
for a in args:
operator = a[1]
if operator not in ops:
raise orm.except_orm(_('Error !'),
_('Operator %s not suported in '
'searches for qty_available '
'(product.product).' % operator))
if operator == '>':
todos = self.search(cr, uid, [], context=context)
ids = self.read(cr, uid, todos, ['qty_available'],
context=context)
for d in ids:
if d['qty_available'] > 0:
prod_ids.append(d['id'])
return [('id', 'in', tuple(prod_ids))]
_columns = {
'qty_available': fields.function(
_product_available,
fnct_search=_qty_available_search,
method=True,
multi='qty_available',
type='float',
digits_compute=dp.get_precision('Product '
'Unit of Measure'),
string='Quantity On Hand',
help="""Current quantity of products.
In a context with a
single Stock Location, this includes goods stored at this
Location, or any of its children.
In a context with a single Warehouse, this includes goods stored in
the Stock Location of this Warehouse, or any of its children.
In a context with a single Shop, this includes goods stored in the
Stock Location of the Warehouse of this Shop, or any of its
children.
Otherwise, this includes goods stored in any Stock
Location with 'internal' type."""
),
}
|
Thoshh/wapad
|
refs/heads/master
|
lib/python2.7/site-packages/pip/commands/show.py
|
142
|
from __future__ import absolute_import
from email.parser import FeedParser
import logging
import os
from pip.basecommand import Command
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(results, options.files):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
query_names = [name.lower() for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
installer = None
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
installer = line.strip()
break
package['installer'] = installer
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser can not deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if not line:
break
# Classifier: License :: OSI Approved :: MIT License
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for dist in distributions:
results_printed = True
logger.info("---")
logger.info("Metadata-Version: %s", dist.get('metadata-version'))
logger.info("Name: %s", dist['name'])
logger.info("Version: %s", dist['version'])
logger.info("Summary: %s", dist.get('summary'))
logger.info("Home-page: %s", dist.get('home-page'))
logger.info("Author: %s", dist.get('author'))
logger.info("Author-email: %s", dist.get('author-email'))
if dist['installer'] is not None:
logger.info("Installer: %s", dist['installer'])
logger.info("License: %s", dist.get('license'))
logger.info("Location: %s", dist['location'])
logger.info("Requires: %s", ', '.join(dist['requires']))
logger.info("Classifiers:")
for classifier in dist['classifiers']:
logger.info(" %s", classifier)
if list_all_files:
logger.info("Files:")
if 'files' in dist:
for line in dist['files']:
logger.info(" %s", line.strip())
else:
logger.info("Cannot locate installed-files.txt")
if 'entry_points' in dist:
logger.info("Entry-points:")
for line in dist['entry_points']:
logger.info(" %s", line.strip())
return results_printed
|
swoop-inc/graflux
|
refs/heads/master
|
graflux/metric_index.py
|
1
|
import json
import weakref
import re
GRAPHITE_GLOB_REGEX = re.compile('\*|{')
# TODO there are some dumb things done in here, could be faster
class MetricNode(object):
__slots__ = ['parent', 'children', '__weakref__']
def __init__(self, parent):
# weakref here as python's circular reference finder doesn't seem to be able to understand this structure
self.parent = weakref.ref(parent) if parent else parent
self.children = {}
def is_leaf(self):
return len(self.children) == 0
def insert(self, path):
if len(path) == 0: return
child_name = path.pop(0)
if child_name in self.children:
target_child = self.children[child_name]
else:
target_child = MetricNode(self)
self.children[child_name] = target_child
target_child.insert(path)
def to_array(self):
return [[name, node.to_array()] for name, node in self.children.items()]
@staticmethod
def from_array(parent, array):
metric = MetricNode(parent)
for child_name, child_array in array:
child = MetricNode.from_array(metric, child_array)
metric.children[child_name] = child
return metric
class MetricIndex(object):
__slots__ = ['index']
def __init__(self):
self.index = MetricNode(None)
def insert(self, metric_name):
path = metric_name.split('.')
self.index.insert(path)
def clear(self):
self.index.children = {}
def query(self, query):
search_path = [MetricIndex.search_instruction(token) for token in query.split('.')]
result = self.search(self.index, search_path, [])
return [{'metric': '.'.join(path), 'is_leaf': node.is_leaf()} for path, node in result]
def search(self, node, query_path, path):
instruction, arg = query_path[0]
if instruction == 'EXACT':
matched_children = [(arg, node.children[arg])] if arg in node.children else []
elif instruction == 'ALL':
matched_children = node.children.items()
elif instruction == 'REGEX':
matched_children = [(key, value) for key, value in node.children.items() if arg.match(key)]
else:
raise 'Unknown Search Instruction: ' + instruction
# recursively construct results building the path to the final node along the way
result = []
for child_name, child_node in matched_children:
child_path = list(path)
child_path.append(child_name)
child_query = query_path[1:]
if len(child_query) != 0:
for sub in self.search(child_node, child_query, child_path):
result.append(sub)
else:
result.append([child_path, child_node])
return result
def to_json(self):
return json.dumps(self.to_array())
def to_array(self):
return self.index.to_array()
@staticmethod
def search_instruction(token):
if token == '*':
return 'ALL', None
elif GRAPHITE_GLOB_REGEX.search(token):
# Convert graphite glob expression token into a regex
regex = re.compile(token.replace('*', '.*').replace('{', '(').replace(',', '|').replace('}', ')') + '$')
return 'REGEX', regex
else:
return 'EXACT', token
@staticmethod
def from_array(model):
metric_index = MetricIndex()
metric_index.index = MetricNode.from_array(None, model)
return metric_index
@staticmethod
def from_json(data):
model = json.loads(data)
return MetricIndex.from_array(model)
|
raskolnikova/infomaps
|
refs/heads/master
|
node_modules/node-gyp/gyp/tools/pretty_sln.py
|
1831
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mydongistiny/external_chromium_org
|
refs/heads/benzo
|
third_party/protobuf/python/google/protobuf/internal/enum_type_wrapper.py
|
292
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple wrapper around enum types to expose utility functions.
Instances are created as properties with the same name as the enum they wrap
on proto classes. For usage, see:
reflection_test.py
"""
__author__ = 'rabsatt@google.com (Kevin Rabsatt)'
class EnumTypeWrapper(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type;
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number))
def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]
|
tranlyvu/find-link
|
refs/heads/master
|
wikilink/__init__.py
|
1
|
"""
wikilink
~~~~~~~~
wikilink is a multiprocessing web-scraping application to scrape wiki pages and
find minimum number of links between two given wiki pages.
:copyright: (c) 2016 - 2019 by Tran Ly VU. All Rights Reserved.
:license: Apache License 2.0.
"""
from .wiki_link import WikiLink
name = "wikilink"
__all__ = ["WikiLink"]
__author__ = "Tran Ly Vu (vutransingapore@gmail.com)"
__copyright__ = "Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved."
__credits__ = ["Tranlyvu"]
__license__ = "Apache License 2.0"
__maintainer__ = "Tran Ly Vu"
__email__ = "vutransingapore@gmail.com"
__status__ = "Beta"
|
RachaelT/UTDchess-RospyXbee
|
refs/heads/master
|
src/FullChess/src/ROS_Templates/PID_Template.py
|
1
|
#!/usr/bin/env python
import rospy
import tf
import math
from geometry_msgs.msg import PointStamped
from chessbot.msg import RobCMD
import time
import struct
import binascii
import thread
#this gives the transform listener thesub correct frame subscription for every node
NAME = rospy.get_namespace()
NAME = NAME[1:(len(NAME)-1)]
print "Bot: %s" % NAME
point = PointStamped()
t = 0
def get_point():
global NAME
global point
global t
pt = rospy.get_param("/%s_point" % NAME)
point.point.x = pt[0]
point.point.y = pt[1]
point.point.z = 0
return point
def get_target():
#transforms the target to the robot's coordinate frame for the PI controller
global NAME
point = get_point()
point.header.frame_id = '/world'
rate = rospy.Rate(1000)
while not rospy.is_shutdown():
try:
target = listener.transformPoint('/%s' % NAME, point)
return target
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rate.sleep()
continue
def build_vector(target, current):
#builds a vector between two points
vect = [0, 0]
vect[0] = calc_angle(target, current)
vect[1] = calc_mag(target, current)
return vect
def _clockwise(vect):
#Tells the robot whether it needs to turn clockwise or counterclockwise
#For the fastest path to the correct heading
if math.sin( (vect[0] * math.pi)/180 ) > 0:
return 0
else:
return 1
def calc_angle(target, current):
#calculates the direction of a vector relative to a coordinate frame (origin given by current)
deltaX = target[0] - current[0]
deltaY = target[1] - current[1]
angle = math.atan2(deltaY, deltaX) * 180 / math.pi
return angle
def calc_mag(target, current):
#calculates a vector's magnitude relative to a coordinate frame (origin given by current)
mag = (target[1] - current[1])**2 + (target[0] - current [0])**2
mag = math.sqrt(mag)
return mag
def proportion_controller():
#Initializing the command publisher
global point
cmd_pub = rospy.Publisher("cmd_hex", RobCMD, queue_size=100)
rate = rospy.Rate(10.0)
#Setting the PI constants
kp = 1.2
ki = 0.1
integral_ang = 0
t = 0
try:
while not rospy.is_shutdown():
point = get_target()
command = RobCMD()
t = t + .005
path = build_vector([point.point.x * 1000, point.point.y * 1000], [0,0])
print "Path vector: %r " % path
angle = path[0]
mag = path[1]
proportional_ang = (kp * angle)
integral_ang += (ki * angle)
#Setting the turn speed
if integral_ang >= 255:
integral_ang = 255
if integral_ang <= -255:
integral_ang = -255
turn_speed = proportional_ang + integral_ang
if turn_speed >= 255:
turn_speed = 255
if turn_speed <= -255:
turn_speed = -255
#Setting the turn to clockwise or counterclockwise
turn_direction = _clockwise(path)
rospy.loginfo(NAME)
rospy.loginfo("Turning Speed: %r" %turn_speed)
rospy.loginfo("Turning Direction: %r" %turn_direction)
command.code = 4
command.turn = turn_direction
command.accel = int(abs(turn_speed))
command.magnitude = 0
command.direction = 0
#The robot moves forward when it is facing the target
if abs(angle) < 40:
command.magnitude = 100
#The robot stops once it reaches the target
if abs(mag) < 50:
command.magnitude = 0
command.direction = 0
command.turn = 0
command.accel = 0
cmd_pub.publish(command)
#while get_point() == point:
#time.sleep(.5)
cmd_pub.publish(command)
rate.sleep()
rate.sleep()
except KeyboardInterrupt:
raise
if __name__ == '__main__':
try:
rospy.init_node('tf_listener', anonymous=False)
listener = tf.TransformListener()
get_point()
proportion_controller()
except rospy.ROSInterruptException, KeyboardInterrupt:
raise
|
CopeX/odoo
|
refs/heads/8.0
|
openerp/cli/deploy.py
|
369
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import requests
import sys
import tempfile
import zipfile
from . import Command
class Deploy(Command):
"""Deploy a module on an Odoo instance"""
def __init__(self):
super(Deploy, self).__init__()
self.session = requests.session()
def deploy_module(self, module_path, url, login, password, db='', force=False):
url = url.rstrip('/')
self.authenticate(url, login, password, db)
module_file = self.zip_module(module_path)
try:
return self.upload_module(url, module_file, force=force)
finally:
os.remove(module_file)
def upload_module(self, server, module_file, force=False):
print("Uploading module file...")
url = server + '/base_import_module/upload'
files = dict(mod_file=open(module_file, 'rb'))
force = '1' if force else ''
res = self.session.post(url, files=files, data=dict(force=force))
if res.status_code != 200:
raise Exception("Could not authenticate on server '%s'" % server)
return res.text
def authenticate(self, server, login, password, db=''):
print("Authenticating on server '%s' ..." % server)
# Fixate session with a given db if any
self.session.get(server + '/web/login', params=dict(db=db))
args = dict(login=login, password=password, db=db)
res = self.session.post(server + '/base_import_module/login', args)
if res.status_code == 404:
raise Exception("The server '%s' does not have the 'base_import_module' installed." % server)
elif res.status_code != 200:
raise Exception(res.text)
def zip_module(self, path):
path = os.path.abspath(path)
if not os.path.isdir(path):
raise Exception("Could not find module directory '%s'" % path)
container, module_name = os.path.split(path)
temp = tempfile.mktemp(suffix='.zip')
try:
print("Zipping module directory...")
with zipfile.ZipFile(temp, 'w') as zfile:
for root, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
zfile.write(file_path, file_path.split(container).pop())
return temp
except Exception:
os.remove(temp)
raise
def run(self, cmdargs):
parser = argparse.ArgumentParser(
prog="%s deploy" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__
)
parser.add_argument('path', help="Path of the module to deploy")
parser.add_argument('url', nargs='?', help='Url of the server (default=http://localhost:8069)', default="http://localhost:8069")
parser.add_argument('--db', dest='db', help='Database to use if server does not use db-filter.')
parser.add_argument('--login', dest='login', default="admin", help='Login (default=admin)')
parser.add_argument('--password', dest='password', default="admin", help='Password (default=admin)')
parser.add_argument('--verify-ssl', action='store_true', help='Verify SSL certificate')
parser.add_argument('--force', action='store_true', help='Force init even if module is already installed. (will update `noupdate="1"` records)')
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
if not args.verify_ssl:
self.session.verify = False
try:
if not args.url.startswith(('http://', 'https://')):
args.url = 'https://%s' % args.url
result = self.deploy_module(args.path, args.url, args.login, args.password, args.db, force=args.force)
print(result)
except Exception, e:
sys.exit("ERROR: %s" % e)
|
studio666/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/generator-output/gyptest-rules.py
|
198
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_3/django/contrib/gis/db/backends/mysql/operations.py
|
312
|
from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains' : 'MBRContains', # For consistency w/PostGIS API
'bboverlaps' : 'MBROverlaps', # .. ..
'contained' : 'MBRWithin', # .. ..
'contains' : 'MBRContains',
'disjoint' : 'MBRDisjoint',
'equals' : 'MBREqual',
'exact' : 'MBREqual',
'intersects' : 'MBRIntersects',
'overlaps' : 'MBROverlaps',
'same_as' : 'MBREqual',
'touches' : 'MBRTouches',
'within' : 'MBRWithin',
}
gis_terms = dict([(term, None) for term in geometry_functions.keys() + ['isnull']])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
return "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
|
Sbalbp/DIRAC
|
refs/heads/integration
|
WorkloadManagementSystem/DB/__init__.py
|
18
|
############################################################
# $HeadURL$
############################################################
"""
DIRAC.WorkloadManagementSystem.DB package
"""
__RCSID__ = "$Id$"
|
t2m-foxfone/android_kernel_msm
|
refs/heads/foxfone-one-lollipop
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
nwokeo/supysonic
|
refs/heads/master
|
venv/lib/python2.7/site-packages/PIL/XpmImagePlugin.py
|
16
|
#
# The Python Imaging Library.
# $Id$
#
# XPM File handling
#
# History:
# 1996-12-29 fl Created
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8, o8
__version__ = "0.2"
# XPM header
xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)")
def _accept(prefix):
return prefix[:9] == b"/* XPM */"
##
# Image plugin for X11 pixel maps.
class XpmImageFile(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self):
if not _accept(self.fp.read(9)):
raise SyntaxError("not an XPM file")
# skip forward to next string
while True:
s = self.fp.readline()
if not s:
raise SyntaxError("broken XPM file")
m = xpm_head.match(s)
if m:
break
self.size = int(m.group(1)), int(m.group(2))
pal = int(m.group(3))
bpp = int(m.group(4))
if pal > 256 or bpp != 1:
raise ValueError("cannot read this XPM file")
#
# load palette description
palette = [b"\0\0\0"] * 256
for i in range(pal):
s = self.fp.readline()
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] in b'\r\n':
s = s[:-1]
c = i8(s[1])
s = s[2:-2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i+1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb[0:1] == b"#":
# FIXME: handle colour names (see ImagePalette.py)
rgb = int(rgb[1:], 16)
palette[c] = (o8((rgb >> 16) & 255) +
o8((rgb >> 8) & 255) +
o8(rgb & 255))
else:
# unknown colour
raise ValueError("cannot read this XPM file")
break
else:
# missing colour key
raise ValueError("cannot read this XPM file")
self.mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette))
self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))]
def load_read(self, bytes):
#
# load all image data in one chunk
xsize, ysize = self.size
s = [None] * ysize
for i in range(ysize):
s[i] = self.fp.readline()[1:xsize+1].ljust(xsize)
return b"".join(s)
#
# Registry
Image.register_open(XpmImageFile.format, XpmImageFile, _accept)
Image.register_extension(XpmImageFile.format, ".xpm")
Image.register_mime(XpmImageFile.format, "image/xpm")
|
blablack/deteriorate-lv2
|
refs/heads/master
|
waflib/extras/run_r_script.py
|
51
|
#!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012
"""
Run a R script in the directory specified by **ctx.bldnode**.
For error-catching purposes, keep an own log-file that is destroyed if the
task finished without error. If not, it will show up as rscript_[index].log
in the bldnode directory.
Usage::
ctx(features='run_r_script',
source='some_script.r',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv')
"""
import os, sys
from waflib import Task, TaskGen, Logs
R_COMMANDS = ['RTerm', 'R', 'r']
def configure(ctx):
ctx.find_program(R_COMMANDS, var='RCMD', errmsg = """\n
No R executable found!\n\n
If R is needed:\n
1) Check the settings of your system path.
2) Note we are looking for R executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_r_script' tool in the main wscript.\n\n""" % R_COMMANDS)
ctx.env.RFLAGS = 'CMD BATCH --slave'
class run_r_script_base(Task.Task):
"""Run a R script."""
run_str = '"${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" "${LOGFILEPATH}"'
shell = True
class run_r_script(run_r_script_base):
"""Erase the R overall log file if everything went okay, else raise an
error and print its 10 last lines.
"""
def run(self):
ret = run_r_script_base.run(self)
logfile = self.env.LOGFILEPATH
if ret:
mode = 'r'
if sys.version_info.major >= 3:
mode = 'rb'
with open(logfile, mode=mode) as f:
tail = f.readlines()[-10:]
Logs.error("""Running R on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""",
self.inputs[0], ret, logfile, '\n'.join(tail))
else:
os.remove(logfile)
return ret
@TaskGen.feature('run_r_script')
@TaskGen.before_method('process_source')
def apply_run_r_script(tg):
"""Task generator customising the options etc. to call R in batch
mode for running a R script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes)
tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (os.path.splitext(src_node.name)[0], tg.idx))
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath()))
tsk.dep_nodes.append(node)
Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath())
# Bypass the execution of process_source by setting the source to an empty list
tg.source = []
|
infyponics/infyponics
|
refs/heads/master
|
bson/__init__.py
|
14
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BSON (Binary JSON) encoding and decoding.
"""
import calendar
import collections
import datetime
import itertools
import re
import struct
import sys
import uuid
from codecs import (utf_8_decode as _utf_8_decode,
utf_8_encode as _utf_8_encode)
from bson.binary import (Binary, OLD_UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY,
UUIDLegacy)
from bson.code import Code
from bson.codec_options import (
CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class)
from bson.dbref import DBRef
from bson.errors import (InvalidBSON,
InvalidDocument,
InvalidStringData)
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import (b,
PY3,
iteritems,
text_type,
string_type,
reraise)
from bson.regex import Regex
from bson.son import SON, RE_TYPE
from bson.timestamp import Timestamp
from bson.tz_util import utc
try:
from bson import _cbson
_USE_C = True
except ImportError:
_USE_C = False
EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc)
EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0)
BSONNUM = b"\x01" # Floating point
BSONSTR = b"\x02" # UTF-8 string
BSONOBJ = b"\x03" # Embedded document
BSONARR = b"\x04" # Array
BSONBIN = b"\x05" # Binary
BSONUND = b"\x06" # Undefined
BSONOID = b"\x07" # ObjectId
BSONBOO = b"\x08" # Boolean
BSONDAT = b"\x09" # UTC Datetime
BSONNUL = b"\x0A" # Null
BSONRGX = b"\x0B" # Regex
BSONREF = b"\x0C" # DBRef
BSONCOD = b"\x0D" # Javascript code
BSONSYM = b"\x0E" # Symbol
BSONCWS = b"\x0F" # Javascript code with scope
BSONINT = b"\x10" # 32bit int
BSONTIM = b"\x11" # Timestamp
BSONLON = b"\x12" # 64bit int
BSONMIN = b"\xFF" # Min key
BSONMAX = b"\x7F" # Max key
_UNPACK_FLOAT = struct.Struct("<d").unpack
_UNPACK_INT = struct.Struct("<i").unpack
_UNPACK_LENGTH_SUBTYPE = struct.Struct("<iB").unpack
_UNPACK_LONG = struct.Struct("<q").unpack
_UNPACK_TIMESTAMP = struct.Struct("<II").unpack
def _raise_unknown_type(element_type, element_name):
"""Unknown type helper."""
raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are "
"you using the latest driver version?" % (
element_type, element_name))
def _get_int(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON int32 to python int."""
end = position + 4
return _UNPACK_INT(data[position:end])[0], end
def _get_c_string(data, position, opts):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_float(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end
def _get_string(data, position, obj_end, opts, dummy):
"""Decode a BSON string to python unicode string."""
length = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length < 1 or obj_end - position < length:
raise InvalidBSON("invalid string length")
end = position + length - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("invalid end of string")
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_object(data, position, obj_end, opts, dummy):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
if _raw_document_class(opts.document_class):
return (opts.document_class(data[position:end + 1], opts),
position + obj_size)
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position
def _get_array(data, position, obj_end, opts, element_name):
"""Decode a BSON array to python list."""
size = _UNPACK_INT(data[position:position + 4])[0]
end = position + size - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("bad eoo")
position += 4
end -= 1
result = []
# Avoid doing global and attibute lookups in the loop.
append = result.append
index = data.index
getter = _ELEMENT_GETTER
while position < end:
element_type = data[position:position + 1]
# Just skip the keys.
position = index(b'\x00', position) + 1
try:
value, position = getter[element_type](
data, position, obj_end, opts, element_name)
except KeyError:
_raise_unknown_type(element_type, element_name)
append(value)
return result, position + 1
def _get_binary(data, position, dummy0, opts, dummy1):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
end = position + length
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end])
# Python
else:
value = uuid.UUID(bytes=data[position:end])
return value, end
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end]
else:
value = Binary(data[position:end], subtype)
return value, end
def _get_oid(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON ObjectId to bson.objectid.ObjectId."""
end = position + 12
return ObjectId(data[position:end]), end
def _get_boolean(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON true/false to python True/False."""
end = position + 1
return data[position:end] == b"\x01", end
def _get_date(data, position, dummy0, opts, dummy1):
"""Decode a BSON datetime to python datetime.datetime."""
end = position + 8
millis = _UNPACK_LONG(data[position:end])[0]
diff = ((millis % 1000) + 1000) % 1000
seconds = (millis - diff) / 1000
micros = diff * 1000
if opts.tz_aware:
dt = EPOCH_AWARE + datetime.timedelta(
seconds=seconds, microseconds=micros)
if opts.tzinfo:
dt = dt.astimezone(opts.tzinfo)
else:
dt = EPOCH_NAIVE + datetime.timedelta(
seconds=seconds, microseconds=micros)
return dt, end
def _get_code(data, position, obj_end, opts, element_name):
"""Decode a BSON code to bson.code.Code."""
code, position = _get_string(data, position, obj_end, opts, element_name)
return Code(code), position
def _get_code_w_scope(data, position, obj_end, opts, element_name):
"""Decode a BSON code_w_scope to bson.code.Code."""
code, position = _get_string(
data, position + 4, obj_end, opts, element_name)
scope, position = _get_object(data, position, obj_end, opts, element_name)
return Code(code, scope), position
def _get_regex(data, position, dummy0, opts, dummy1):
"""Decode a BSON regex to bson.regex.Regex or a python pattern object."""
pattern, position = _get_c_string(data, position, opts)
bson_flags, position = _get_c_string(data, position, opts)
bson_re = Regex(pattern, bson_flags)
return bson_re, position
def _get_ref(data, position, obj_end, opts, element_name):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(
data, position, obj_end, opts, element_name)
oid, position = _get_oid(data, position, obj_end, opts, element_name)
return DBRef(collection, oid), position
def _get_timestamp(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON timestamp to bson.timestamp.Timestamp."""
end = position + 8
inc, timestamp = _UNPACK_TIMESTAMP(data[position:end])
return Timestamp(timestamp, inc), end
def _get_int64(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON int64 to bson.int64.Int64."""
end = position + 8
return Int64(_UNPACK_LONG(data[position:end])[0]), end
# Each decoder function's signature is:
# - data: bytes
# - position: int, beginning of object in 'data' to decode
# - obj_end: int, end of object to decode in 'data' if variable-length type
# - opts: a CodecOptions
_ELEMENT_GETTER = {
BSONNUM: _get_float,
BSONSTR: _get_string,
BSONOBJ: _get_object,
BSONARR: _get_array,
BSONBIN: _get_binary,
BSONUND: lambda v, w, x, y, z: (None, w), # Deprecated undefined
BSONOID: _get_oid,
BSONBOO: _get_boolean,
BSONDAT: _get_date,
BSONNUL: lambda v, w, x, y, z: (None, w),
BSONRGX: _get_regex,
BSONREF: _get_ref, # Deprecated DBPointer
BSONCOD: _get_code,
BSONSYM: _get_string, # Deprecated symbol
BSONCWS: _get_code_w_scope,
BSONINT: _get_int,
BSONTIM: _get_timestamp,
BSONLON: _get_int64,
BSONMIN: lambda v, w, x, y, z: (MinKey(), w),
BSONMAX: lambda v, w, x, y, z: (MaxKey(), w)}
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
try:
value, position = _ELEMENT_GETTER[element_type](data, position,
obj_end, opts,
element_name)
except KeyError:
_raise_unknown_type(element_type, element_name)
return element_name, value, position
if _USE_C:
_element_to_dict = _cbson._element_to_dict
def _iterate_elements(data, position, obj_end, opts):
end = obj_end - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, obj_end, opts)
yield key, value
def _elements_to_dict(data, position, obj_end, opts):
"""Decode a BSON document."""
result = opts.document_class()
for key, value in _iterate_elements(data, position, obj_end, opts):
result[key] = value
return result
def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
obj_size = _UNPACK_INT(data[:4])[0]
except struct.error as exc:
raise InvalidBSON(str(exc))
if obj_size != len(data):
raise InvalidBSON("invalid object size")
if data[obj_size - 1:obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
try:
if _raw_document_class(opts.document_class):
return opts.document_class(data, opts)
return _elements_to_dict(data, 4, obj_size - 1, opts)
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
_bson_to_dict = _cbson._bson_to_dict
_PACK_FLOAT = struct.Struct("<d").pack
_PACK_INT = struct.Struct("<i").pack
_PACK_LENGTH_SUBTYPE = struct.Struct("<iB").pack
_PACK_LONG = struct.Struct("<q").pack
_PACK_TIMESTAMP = struct.Struct("<II").pack
_LIST_NAMES = tuple(b(str(i)) + b"\x00" for i in range(1000))
def gen_list_name():
"""Generate "keys" for encoded lists in the sequence
b"0\x00", b"1\x00", b"2\x00", ...
The first 1000 keys are returned from a pre-built cache. All
subsequent keys are generated on the fly.
"""
for name in _LIST_NAMES:
yield name
counter = itertools.count(1000)
while True:
yield b(str(next(counter))) + b"\x00"
def _make_c_string_check(string):
"""Make a 'C' string, checking for embedded NUL characters."""
if isinstance(string, bytes):
if b"\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
def _make_c_string(string):
"""Make a 'C' string."""
if isinstance(string, bytes):
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
return _utf_8_encode(string)[0] + b"\x00"
if PY3:
def _make_name(string):
"""Make a 'C' string suitable for a BSON key."""
# Keys can only be text in python 3.
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
else:
# Keys can be unicode or bytes in python 2.
_make_name = _make_c_string_check
def _encode_float(name, value, dummy0, dummy1):
"""Encode a float."""
return b"\x01" + name + _PACK_FLOAT(value)
if PY3:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python bytes."""
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value
else:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python str (python 2.x)."""
try:
_utf_8_decode(value, None, True)
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % (value,))
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_mapping(name, value, check_keys, opts):
"""Encode a mapping type."""
if _raw_document_class(value):
return b'\x03' + name + value.raw
data = b"".join([_element_to_bson(key, val, check_keys, opts)
for key, val in iteritems(value)])
return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_dbref(name, value, check_keys, opts):
"""Encode bson.dbref.DBRef."""
buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00")
begin = len(buf) - 4
buf += _name_value_to_bson(b"$ref\x00",
value.collection, check_keys, opts)
buf += _name_value_to_bson(b"$id\x00",
value.id, check_keys, opts)
if value.database is not None:
buf += _name_value_to_bson(
b"$db\x00", value.database, check_keys, opts)
for key, val in iteritems(value._DBRef__kwargs):
buf += _element_to_bson(key, val, check_keys, opts)
buf += b"\x00"
buf[begin:begin + 4] = _PACK_INT(len(buf) - begin)
return bytes(buf)
def _encode_list(name, value, check_keys, opts):
"""Encode a list/tuple."""
lname = gen_list_name()
data = b"".join([_name_value_to_bson(next(lname), item,
check_keys, opts)
for item in value])
return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_text(name, value, dummy0, dummy1):
"""Encode a python unicode (python 2.x) / str (python 3.x)."""
value = _utf_8_encode(value)[0]
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_binary(name, value, dummy0, dummy1):
"""Encode bson.binary.Binary."""
subtype = value.subtype
if subtype == 2:
value = _PACK_INT(len(value)) + value
return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value
def _encode_uuid(name, value, dummy, opts):
"""Encode uuid.UUID."""
uuid_representation = opts.uuid_representation
# Python Legacy Common Case
if uuid_representation == OLD_UUID_SUBTYPE:
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes
# Java Legacy
elif uuid_representation == JAVA_LEGACY:
from_uuid = value.bytes
data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
# Microsoft GUID representation.
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le
# New
else:
return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes
def _encode_objectid(name, value, dummy0, dummy1):
"""Encode bson.objectid.ObjectId."""
return b"\x07" + name + value.binary
def _encode_bool(name, value, dummy0, dummy1):
"""Encode a python boolean (True/False)."""
return b"\x08" + name + (value and b"\x01" or b"\x00")
def _encode_datetime(name, value, dummy0, dummy1):
"""Encode datetime.datetime."""
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return b"\x09" + name + _PACK_LONG(millis)
def _encode_none(name, dummy0, dummy1, dummy2):
"""Encode python None."""
return b"\x0A" + name
def _encode_regex(name, value, dummy0, dummy1):
"""Encode a python regex or bson.regex.Regex."""
flags = value.flags
# Python 2 common case
if flags == 0:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00"
# Python 3 common case
elif flags == re.UNICODE:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00"
else:
sflags = b""
if flags & re.IGNORECASE:
sflags += b"i"
if flags & re.LOCALE:
sflags += b"l"
if flags & re.MULTILINE:
sflags += b"m"
if flags & re.DOTALL:
sflags += b"s"
if flags & re.UNICODE:
sflags += b"u"
if flags & re.VERBOSE:
sflags += b"x"
sflags += b"\x00"
return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
def _encode_code(name, value, dummy, opts):
"""Encode bson.code.Code."""
cstring = _make_c_string(value)
cstrlen = len(cstring)
if not value.scope:
return b"\x0D" + name + _PACK_INT(cstrlen) + cstring
scope = _dict_to_bson(value.scope, False, opts, False)
full_length = _PACK_INT(8 + cstrlen + len(scope))
return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
def _encode_int(name, value, dummy0, dummy1):
"""Encode a python int."""
if -2147483648 <= value <= 2147483647:
return b"\x10" + name + _PACK_INT(value)
else:
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_minkey(name, dummy0, dummy1, dummy2):
"""Encode bson.min_key.MinKey."""
return b"\xFF" + name
def _encode_maxkey(name, dummy0, dummy1, dummy2):
"""Encode bson.max_key.MaxKey."""
return b"\x7F" + name
# Each encoder function's signature is:
# - name: utf-8 bytes
# - value: a Python data type, e.g. a Python int for _encode_int
# - check_keys: bool, whether to check for invalid names
# - opts: a CodecOptions
_ENCODERS = {
bool: _encode_bool,
bytes: _encode_bytes,
datetime.datetime: _encode_datetime,
dict: _encode_mapping,
float: _encode_float,
int: _encode_int,
list: _encode_list,
# unicode in py2, str in py3
text_type: _encode_text,
tuple: _encode_list,
type(None): _encode_none,
uuid.UUID: _encode_uuid,
Binary: _encode_binary,
Int64: _encode_long,
Code: _encode_code,
DBRef: _encode_dbref,
MaxKey: _encode_maxkey,
MinKey: _encode_minkey,
ObjectId: _encode_objectid,
Regex: _encode_regex,
RE_TYPE: _encode_regex,
SON: _encode_mapping,
Timestamp: _encode_timestamp,
UUIDLegacy: _encode_binary,
# Special case. This will never be looked up directly.
collections.Mapping: _encode_mapping,
}
_MARKERS = {
5: _encode_binary,
7: _encode_objectid,
11: _encode_regex,
13: _encode_code,
17: _encode_timestamp,
18: _encode_long,
100: _encode_dbref,
127: _encode_maxkey,
255: _encode_minkey,
}
if not PY3:
_ENCODERS[long] = _encode_long
def _name_value_to_bson(name, value, check_keys, opts):
"""Encode a single name, value pair."""
# First see if the type is already cached. KeyError will only ever
# happen once per subtype.
try:
return _ENCODERS[type(value)](name, value, check_keys, opts)
except KeyError:
pass
# Second, fall back to trying _type_marker. This has to be done
# before the loop below since users could subclass one of our
# custom types that subclasses a python built-in (e.g. Binary)
marker = getattr(value, "_type_marker", None)
if isinstance(marker, int) and marker in _MARKERS:
func = _MARKERS[marker]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# If all else fails test each base type. This will only happen once for
# a subtype of a supported base type.
for base in _ENCODERS:
if isinstance(value, base):
func = _ENCODERS[base]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
raise InvalidDocument("cannot convert value of type %s to bson" %
type(value))
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts)
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
if _raw_document_class(doc):
return doc.raw
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00"
if _USE_C:
_dict_to_bson = _cbson._dict_to_bson
_CODEC_OPTIONS_TYPE_ERROR = TypeError(
"codec_options must be an instance of CodecOptions")
def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON regular
expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
docs = []
position = 0
end = len(data) - 1
use_raw = _raw_document_class(codec_options.document_class)
try:
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
if len(data) - position < obj_size:
raise InvalidBSON("invalid object size")
obj_end = position + obj_size - 1
if data[obj_end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if use_raw:
docs.append(
codec_options.document_class(
data[position:obj_end + 1], codec_options))
else:
docs.append(_elements_to_dict(data,
position + 4,
obj_end,
codec_options))
position += obj_size
return docs
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
decode_all = _cbson.decode_all
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents as a generator.
Works similarly to the decode_all function, but yields one document at a
time.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
position = 0
end = len(data) - 1
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
elements = data[position:position + obj_size]
position += obj_size
yield _bson_to_dict(elements, codec_options)
def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode bson data from a file to multiple documents as a generator.
Works similarly to the decode_all function, but reads from the file object
in chunks and parses bson in chunks, yielding one document at a time.
:Parameters:
- `file_obj`: A file object containing BSON data.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
while True:
# Read size of next object.
size_data = file_obj.read(4)
if len(size_data) == 0:
break # Finished with file normaly.
elif len(size_data) != 4:
raise InvalidBSON("cut off in middle of objsize")
obj_size = _UNPACK_INT(size_data)[0] - 4
elements = size_data + file_obj.read(obj_size)
yield _bson_to_dict(elements, codec_options)
def is_valid(bson):
"""Check that the given string represents valid :class:`BSON` data.
Raises :class:`TypeError` if `bson` is not an instance of
:class:`str` (:class:`bytes` in python 3). Returns ``True``
if `bson` is valid :class:`BSON`, ``False`` otherwise.
:Parameters:
- `bson`: the data to be validated
"""
if not isinstance(bson, bytes):
raise TypeError("BSON data must be an instance of a subclass of bytes")
try:
_bson_to_dict(bson, DEFAULT_CODEC_OPTIONS)
return True
except Exception:
return False
class BSON(bytes):
"""BSON (Binary JSON) data.
"""
@classmethod
def encode(cls, document, check_keys=False,
codec_options=DEFAULT_CODEC_OPTIONS):
"""Encode a document to a new :class:`BSON` instance.
A document can be any mapping type (like :class:`dict`).
Raises :class:`TypeError` if `document` is not a mapping type,
or contains keys that are not instances of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~bson.errors.InvalidDocument` if `document` cannot be
converted to :class:`BSON`.
:Parameters:
- `document`: mapping type representing a document
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~bson.errors.InvalidDocument` in
either case
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `uuid_subtype` option with `codec_options`.
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return cls(_dict_to_bson(document, check_keys, codec_options))
def decode(self, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode this BSON data.
By default, returns a BSON document represented as a Python
:class:`dict`. To use a different :class:`MutableMapping` class,
configure a :class:`~bson.codec_options.CodecOptions`::
>>> import collections # From Python standard library.
>>> import bson
>>> from bson.codec_options import CodecOptions
>>> data = bson.BSON.encode({'a': 1})
>>> decoded_doc = bson.BSON.decode(data)
<type 'dict'>
>>> options = CodecOptions(document_class=collections.OrderedDict)
>>> decoded_doc = bson.BSON.decode(data, codec_options=options)
>>> type(decoded_doc)
<class 'collections.OrderedDict'>
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return _bson_to_dict(self, codec_options)
def has_c():
"""Is the C extension installed?
"""
return _USE_C
|
inares/edx-platform
|
refs/heads/inares_sass
|
lms/djangoapps/courseware/tests/test_group_access.py
|
77
|
"""
This module defines tests for courseware.access that are specific to group
access control rules.
"""
import ddt
from nose.plugins.attrib import attr
from stevedore.extension import Extension, ExtensionManager
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition, USER_PARTITION_SCHEME_NAMESPACE
from xmodule.modulestore.django import modulestore
import courseware.access as access
from courseware.tests.factories import StaffFactory, UserFactory
class MemoryUserPartitionScheme(object):
"""
In-memory partition scheme for testing.
"""
name = "memory"
def __init__(self):
self.current_group = {}
def set_group_for_user(self, user, user_partition, group):
"""
Link this user to this group in this partition, in memory.
"""
self.current_group.setdefault(user.id, {})[user_partition.id] = group
def get_group_for_user(self, course_id, user, user_partition, track_function=None): # pylint: disable=unused-argument
"""
Fetch the group to which this user is linked in this partition, or None.
"""
return self.current_group.get(user.id, {}).get(user_partition.id)
def resolve_attrs(test_method):
"""
Helper function used with ddt. It allows passing strings to test methods
via @ddt.data, which are the names of instance attributes on `self`, and
replaces them with the resolved values of those attributes in the method
call.
"""
def _wrapper(self, *args): # pylint: disable=missing-docstring
new_args = [getattr(self, arg) for arg in args]
return test_method(self, *new_args)
return _wrapper
@attr('shard_1')
@ddt.ddt
class GroupAccessTestCase(ModuleStoreTestCase):
"""
Tests to ensure that has_access() correctly enforces the visibility
restrictions specified in the `group_access` field of XBlocks.
"""
def set_user_group(self, user, partition, group):
"""
Internal DRY / shorthand.
"""
partition.scheme.set_group_for_user(user, partition, group)
def set_group_access(self, block_location, access_dict):
"""
Set group_access on block specified by location.
"""
block = modulestore().get_item(block_location)
block.group_access = access_dict
modulestore().update_item(block, 1)
def set_user_partitions(self, block_location, partitions):
"""
Sets the user_partitions on block specified by location.
"""
block = modulestore().get_item(block_location)
block.user_partitions = partitions
modulestore().update_item(block, 1)
def setUp(self):
super(GroupAccessTestCase, self).setUp()
UserPartition.scheme_extensions = ExtensionManager.make_test_instance(
[
Extension(
"memory",
USER_PARTITION_SCHEME_NAMESPACE,
MemoryUserPartitionScheme(),
None
),
Extension(
"random",
USER_PARTITION_SCHEME_NAMESPACE,
MemoryUserPartitionScheme(),
None
)
],
namespace=USER_PARTITION_SCHEME_NAMESPACE
)
self.cat_group = Group(10, 'cats')
self.dog_group = Group(20, 'dogs')
self.worm_group = Group(30, 'worms')
self.animal_partition = UserPartition(
0,
'Pet Partition',
'which animal are you?',
[self.cat_group, self.dog_group, self.worm_group],
scheme=UserPartition.get_scheme("memory"),
)
self.red_group = Group(1000, 'red')
self.blue_group = Group(2000, 'blue')
self.gray_group = Group(3000, 'gray')
self.color_partition = UserPartition(
100,
'Color Partition',
'what color are you?',
[self.red_group, self.blue_group, self.gray_group],
scheme=UserPartition.get_scheme("memory"),
)
self.course = CourseFactory.create(
user_partitions=[self.animal_partition, self.color_partition],
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
chapter = ItemFactory.create(category='chapter', parent=self.course)
section = ItemFactory.create(category='sequential', parent=chapter)
vertical = ItemFactory.create(category='vertical', parent=section)
component = ItemFactory.create(category='problem', parent=vertical)
self.chapter_location = chapter.location
self.section_location = section.location
self.vertical_location = vertical.location
self.component_location = component.location
self.red_cat = UserFactory() # student in red and cat groups
self.set_user_group(self.red_cat, self.animal_partition, self.cat_group)
self.set_user_group(self.red_cat, self.color_partition, self.red_group)
self.blue_dog = UserFactory() # student in blue and dog groups
self.set_user_group(self.blue_dog, self.animal_partition, self.dog_group)
self.set_user_group(self.blue_dog, self.color_partition, self.blue_group)
self.white_mouse = UserFactory() # student in no group
self.gray_worm = UserFactory() # student in deleted group
self.set_user_group(self.gray_worm, self.animal_partition, self.worm_group)
self.set_user_group(self.gray_worm, self.color_partition, self.gray_group)
# delete the gray/worm groups from the partitions now so we can test scenarios
# for user whose group is missing.
self.animal_partition.groups.pop()
self.color_partition.groups.pop()
# add a staff user, whose access will be unconditional in spite of group access.
self.staff = StaffFactory.create(course_key=self.course.id)
# avoid repeatedly declaring the same sequence for ddt in all the test cases.
PARENT_CHILD_PAIRS = (
('chapter_location', 'chapter_location'),
('chapter_location', 'section_location'),
('chapter_location', 'vertical_location'),
('chapter_location', 'component_location'),
('section_location', 'section_location'),
('section_location', 'vertical_location'),
('section_location', 'component_location'),
('vertical_location', 'vertical_location'),
('vertical_location', 'component_location'),
)
def tearDown(self):
"""
Clear out the stevedore extension points on UserPartition to avoid
side-effects in other tests.
"""
UserPartition.scheme_extensions = None
super(GroupAccessTestCase, self).tearDown()
def check_access(self, user, block_location, is_accessible):
"""
DRY helper.
"""
self.assertIs(
bool(access.has_access(user, 'load', modulestore().get_item(block_location), self.course.id)),
is_accessible
)
def ensure_staff_access(self, block_location):
"""
Another DRY helper.
"""
block = modulestore().get_item(block_location)
self.assertTrue(access.has_access(self.staff, 'load', block, self.course.id))
# NOTE: in all the tests that follow, `block_specified` and
# `block_accessed` designate the place where group_access rules are
# specified, and where access is being checked in the test, respectively.
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_partition_single_group(self, block_specified, block_accessed):
"""
Access checks are correctly enforced on the block when a single group
is specified for a single partition.
"""
self.set_group_access(
block_specified,
{self.animal_partition.id: [self.cat_group.id]},
)
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_partition_two_groups(self, block_specified, block_accessed):
"""
Access checks are correctly enforced on the block when multiple groups
are specified for a single partition.
"""
self.set_group_access(
block_specified,
{self.animal_partition.id: [self.cat_group.id, self.dog_group.id]},
)
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_partition_disjoint_groups(self, block_specified, block_accessed):
"""
When the parent's and child's group specifications do not intersect,
access is denied to the child regardless of the user's groups.
"""
if block_specified == block_accessed:
# this test isn't valid unless block_accessed is a descendant of
# block_specified.
return
self.set_group_access(
block_specified,
{self.animal_partition.id: [self.dog_group.id]},
)
self.set_group_access(
block_accessed,
{self.animal_partition.id: [self.cat_group.id]},
)
self.check_access(self.red_cat, block_accessed, False)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_empty_partition(self, block_specified, block_accessed):
"""
No group access checks are enforced on the block when group_access
declares a partition but does not specify any groups.
"""
self.set_group_access(block_specified, {self.animal_partition.id: []})
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, True)
self.check_access(self.gray_worm, block_accessed, True)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_empty_dict(self, block_specified, block_accessed):
"""
No group access checks are enforced on the block when group_access is an
empty dictionary.
"""
self.set_group_access(block_specified, {})
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, True)
self.check_access(self.gray_worm, block_accessed, True)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_none(self, block_specified, block_accessed):
"""
No group access checks are enforced on the block when group_access is None.
"""
self.set_group_access(block_specified, None)
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, True)
self.check_access(self.gray_worm, block_accessed, True)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_partition_group_none(self, block_specified, block_accessed):
"""
No group access checks are enforced on the block when group_access
specifies a partition but its value is None.
"""
self.set_group_access(block_specified, {self.animal_partition.id: None})
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, True)
self.check_access(self.gray_worm, block_accessed, True)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_single_partition_group_empty_list(self, block_specified, block_accessed):
"""
No group access checks are enforced on the block when group_access
specifies a partition but its value is an empty list.
"""
self.set_group_access(block_specified, {self.animal_partition.id: []})
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, True)
self.check_access(self.white_mouse, block_accessed, True)
self.check_access(self.gray_worm, block_accessed, True)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_nonexistent_nonempty_partition(self, block_specified, block_accessed):
"""
Access will be denied to the block when group_access specifies a
nonempty partition that does not exist in course.user_partitions.
"""
self.set_group_access(block_specified, {9: [99]})
self.check_access(self.red_cat, block_accessed, False)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_has_access_nonexistent_group(self, block_specified, block_accessed):
"""
Access will be denied to the block when group_access contains a group
id that does not exist in its referenced partition.
"""
self.set_group_access(block_specified, {self.animal_partition.id: [99]})
self.check_access(self.red_cat, block_accessed, False)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_multiple_partitions(self, block_specified, block_accessed):
"""
Group access restrictions are correctly enforced when multiple partition
/ group rules are defined.
"""
self.set_group_access(
block_specified,
{
self.animal_partition.id: [self.cat_group.id],
self.color_partition.id: [self.red_group.id],
},
)
self.check_access(self.red_cat, block_accessed, True)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.white_mouse, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
@resolve_attrs
def test_multiple_partitions_deny_access(self, block_specified, block_accessed):
"""
Group access restrictions correctly deny access even when some (but not
all) group_access rules are satisfied.
"""
self.set_group_access(
block_specified,
{
self.animal_partition.id: [self.cat_group.id],
self.color_partition.id: [self.blue_group.id],
},
)
self.check_access(self.red_cat, block_accessed, False)
self.check_access(self.blue_dog, block_accessed, False)
self.check_access(self.gray_worm, block_accessed, False)
self.ensure_staff_access(block_accessed)
def test_group_access_short_circuits(self):
"""
Test that the group_access check short-circuits if there are no user_partitions defined
except user_partitions in use by the split_test module.
"""
# Initially, "red_cat" user can't view the vertical.
self.set_group_access(self.chapter_location, {self.animal_partition.id: [self.dog_group.id]})
self.check_access(self.red_cat, self.vertical_location, False)
# Change the vertical's user_partitions value to the empty list. Now red_cat can view the vertical.
self.set_user_partitions(self.vertical_location, [])
self.check_access(self.red_cat, self.vertical_location, True)
# Change the vertical's user_partitions value to include only "split_test" partitions.
split_test_partition = UserPartition(
199,
'split_test partition',
'nothing to look at here',
[Group(2, 'random group')],
scheme=UserPartition.get_scheme("random"),
)
self.set_user_partitions(self.vertical_location, [split_test_partition])
self.check_access(self.red_cat, self.vertical_location, True)
# Finally, add back in a cohort user_partition
self.set_user_partitions(self.vertical_location, [split_test_partition, self.animal_partition])
self.check_access(self.red_cat, self.vertical_location, False)
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/django-extensions/django_extensions/utils/text.py
|
46
|
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
def truncate_letters(s, num):
""" truncates a string to a number of letters, similar to truncate_words """
s = force_unicode(s)
length = int(num)
if len(s) > length:
s = s[:length]
if not s.endswith('...'):
s += '...'
return s
truncate_letters = allow_lazy(truncate_letters, unicode)
|
ericvandenbergfb/spark
|
refs/heads/master
|
examples/src/main/python/ml/index_to_string_example.py
|
123
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import IndexToString, StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("IndexToStringExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = indexer.fit(df)
indexed = model.transform(df)
print("Transformed string column '%s' to indexed column '%s'"
% (indexer.getInputCol(), indexer.getOutputCol()))
indexed.show()
print("StringIndexer will store labels in output column metadata\n")
converter = IndexToString(inputCol="categoryIndex", outputCol="originalCategory")
converted = converter.transform(indexed)
print("Transformed indexed column '%s' back to original string column '%s' using "
"labels in metadata" % (converter.getInputCol(), converter.getOutputCol()))
converted.select("id", "categoryIndex", "originalCategory").show()
# $example off$
spark.stop()
|
matrixise/odoo
|
refs/heads/8.0
|
addons/note/tests/test_note.py
|
427
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestNote(common.TransactionCase):
def test_bug_lp_1156215(self):
"""ensure any users can create new users"""
cr, uid = self.cr, self.uid
IMD = self.registry('ir.model.data')
Users = self.registry('res.users')
_, demo_user = IMD.get_object_reference(cr, uid, 'base', 'user_demo')
_, group_id = IMD.get_object_reference(cr, uid, 'base', 'group_erp_manager')
Users.write(cr, uid, [demo_user], {
'groups_id': [(4, group_id)],
})
# must not fail
Users.create(cr, demo_user, {
'name': 'test bug lp:1156215',
'login': 'lp_1156215',
})
|
2ndQuadrant/ansible
|
refs/heads/master
|
test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py
|
298
|
data = 'spam3'
|
abhinavsingh/proxy.py
|
refs/heads/develop
|
proxy/testing/test_case.py
|
1
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import contextlib
import time
import unittest
from typing import Optional, List, Generator, Any
from ..proxy import Proxy
from ..common.constants import DEFAULT_TIMEOUT
from ..common.utils import get_available_port, new_socket_connection
from ..plugin import CacheResponsesPlugin
class TestCase(unittest.TestCase):
"""Base TestCase class that automatically setup and teardown proxy.py."""
DEFAULT_PROXY_PY_STARTUP_FLAGS = [
'--num-workers', '1',
'--threadless',
]
PROXY_PORT: int = 8899
PROXY: Optional[Proxy] = None
INPUT_ARGS: Optional[List[str]] = None
@classmethod
def setUpClass(cls) -> None:
cls.PROXY_PORT = get_available_port()
cls.INPUT_ARGS = getattr(cls, 'PROXY_PY_STARTUP_FLAGS') \
if hasattr(cls, 'PROXY_PY_STARTUP_FLAGS') \
else cls.DEFAULT_PROXY_PY_STARTUP_FLAGS
cls.INPUT_ARGS.append('--hostname')
cls.INPUT_ARGS.append('0.0.0.0')
cls.INPUT_ARGS.append('--port')
cls.INPUT_ARGS.append(str(cls.PROXY_PORT))
cls.PROXY = Proxy(input_args=cls.INPUT_ARGS)
cls.PROXY.flags.plugins[b'HttpProxyBasePlugin'].append(
CacheResponsesPlugin)
cls.PROXY.__enter__()
cls.wait_for_server(cls.PROXY_PORT)
@staticmethod
def wait_for_server(proxy_port: int,
wait_for_seconds: int = DEFAULT_TIMEOUT) -> None:
"""Wait for proxy.py server to come up."""
start_time = time.time()
while True:
try:
conn = new_socket_connection(
('localhost', proxy_port))
conn.close()
break
except ConnectionRefusedError:
time.sleep(0.1)
if time.time() - start_time > wait_for_seconds:
raise TimeoutError(
'Timed out while waiting for proxy.py to start...')
@classmethod
def tearDownClass(cls) -> None:
assert cls.PROXY
cls.PROXY.__exit__(None, None, None)
cls.PROXY_PORT = 8899
cls.PROXY = None
cls.INPUT_ARGS = None
@contextlib.contextmanager
def vcr(self) -> Generator[None, None, None]:
try:
CacheResponsesPlugin.ENABLED.set()
yield
finally:
CacheResponsesPlugin.ENABLED.clear()
def run(self, result: Optional[unittest.TestResult] = None) -> Any:
super().run(result)
|
motion2015/edx-platform
|
refs/heads/master
|
lms/djangoapps/branding/models.py
|
84
|
"""
Model used by Video module for Branding configuration.
Includes:
BrandingInfoConfig: A ConfigurationModel for managing how Video Module will
use Branding.
"""
import json
from django.db.models import TextField
from django.core.exceptions import ValidationError
from config_models.models import ConfigurationModel
class BrandingInfoConfig(ConfigurationModel):
"""
Configuration for Branding.
Example of configuration that must be stored:
{
"CN": {
"url": "http://www.xuetangx.com",
"logo_src": "http://www.xuetangx.com/static/images/logo.png",
"logo_tag": "Video hosted by XuetangX.com"
}
}
"""
configuration = TextField(
help_text="JSON data of Configuration for Video Branding."
)
def clean(self):
"""
Validates configuration text field.
"""
try:
json.loads(self.configuration)
except ValueError:
raise ValidationError('Must be valid JSON string.')
@classmethod
def get_config(cls):
"""
Get the Video Branding Configuration.
"""
info = cls.current()
return json.loads(info.configuration) if info.enabled else {}
class BrandingApiConfig(ConfigurationModel):
"""Configure Branding api's
Enable or disable api's functionality.
When this flag is disabled, the api will return 404.
When the flag is enabled, the api will returns the valid reponse.
"""
pass
|
yencarnacion/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/django-0.96/django/db/models/related.py
|
49
|
class BoundRelatedObject(object):
def __init__(self, related_object, field_mapping, original):
self.relation = related_object
self.field_mappings = field_mapping[related_object.name]
def template_name(self):
raise NotImplementedError
def __repr__(self):
return repr(self.__dict__)
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.edit_inline = field.rel.edit_inline
self.name = '%s:%s' % (self.opts.app_label, self.opts.module_name)
self.var_name = self.opts.object_name.lower()
def flatten_data(self, follow, obj=None):
new_data = {}
rel_instances = self.get_list(obj)
for i, rel_instance in enumerate(rel_instances):
instance_data = {}
for f in self.opts.fields + self.opts.many_to_many:
# TODO: Fix for recursive manipulators.
fol = follow.get(f.name, None)
if fol:
field_data = f.flatten_data(fol, rel_instance)
for name, value in field_data.items():
instance_data['%s.%d.%s' % (self.var_name, i, name)] = value
new_data.update(instance_data)
return new_data
def extract_data(self, data):
"""
Pull out the data meant for inline objects of this class,
i.e. anything starting with our module name.
"""
return data # TODO
def get_list(self, parent_instance=None):
"Get the list of this type of object from an instance of the parent class."
if parent_instance is not None:
attr = getattr(parent_instance, self.get_accessor_name())
if self.field.rel.multiple:
# For many-to-many relationships, return a list of objects
# corresponding to the xxx_num_in_admin options of the field
objects = list(attr.all())
count = len(objects) + self.field.rel.num_extra_on_change
if self.field.rel.min_num_in_admin:
count = max(count, self.field.rel.min_num_in_admin)
if self.field.rel.max_num_in_admin:
count = min(count, self.field.rel.max_num_in_admin)
change = count - len(objects)
if change > 0:
return objects + [None] * change
if change < 0:
return objects[:change]
else: # Just right
return objects
else:
# A one-to-one relationship, so just return the single related
# object
return [attr]
else:
if self.field.rel.min_num_in_admin:
return [None] * max(self.field.rel.num_in_admin, self.field.rel.min_num_in_admin)
else:
return [None] * self.field.rel.num_in_admin
def get_db_prep_lookup(self, lookup_type, value):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def get_follow(self, override=None):
if isinstance(override, bool):
if override:
over = {}
else:
return None
else:
if override:
over = override.copy()
elif self.edit_inline:
over = {}
else:
return None
over[self.field.name] = False
return self.opts.get_follow(over)
def get_manipulator_fields(self, opts, manipulator, change, follow):
if self.field.rel.multiple:
if change:
attr = getattr(manipulator.original_object, self.get_accessor_name())
count = attr.count()
count += self.field.rel.num_extra_on_change
else:
count = self.field.rel.num_in_admin
if self.field.rel.min_num_in_admin:
count = max(count, self.field.rel.min_num_in_admin)
if self.field.rel.max_num_in_admin:
count = min(count, self.field.rel.max_num_in_admin)
else:
count = 1
fields = []
for i in range(count):
for f in self.opts.fields + self.opts.many_to_many:
if follow.get(f.name, False):
prefix = '%s.%d.' % (self.var_name, i)
fields.extend(f.get_manipulator_fields(self.opts, manipulator, change,
name_prefix=prefix, rel=True))
return fields
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def bind(self, field_mapping, original, bound_related_object_class=BoundRelatedObject):
return bound_related_object_class(self, field_mapping, original)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.object_name.lower() + '_set')
else:
return self.field.rel.related_name or (self.opts.object_name.lower())
|
kongseokhwan/kulcloud-iitp-neutron
|
refs/heads/master
|
neutron/api/rpc/handlers/metadata_rpc.py
|
58
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
from neutron.common import constants
from neutron import manager
class MetadataRpcCallback(object):
"""Metadata agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface used by the
metadata service to make calls back into the Neutron plugin. The client
side is defined in neutron.agent.metadata.agent.MetadataPluginAPI. For
more information about changing rpc interfaces, see
doc/source/devref/rpc_api.rst.
"""
# 1.0 MetadataPluginAPI BASE_RPC_API_VERSION
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_METADATA)
@property
def plugin(self):
if not hasattr(self, '_plugin'):
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
def get_ports(self, context, filters):
return self.plugin.get_ports(context, filters=filters)
|
cleder/parsewkt
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
import sys, os
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version = '0.1'
long_description = (
read('README.rst')
+ '\n' +
'Download\n'
'********\n')
setup(name='parsewkt',
version=version,
description="WKT Parser",
long_description=long_description,
classifiers=[
"Topic :: Scientific/Engineering :: GIS",
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='WKT GIS Spatial',
author='Christian Ledermann',
author_email='christian.ledermann@gmail.com',
url='https://github.com/cleder/parsewkt',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'grako',
],
tests_require=[
'pytest',
'pygeoif',
],
cmdclass = {'test': PyTest},
entry_points="""
# -*- Entry points: -*-
""",
)
|
Limags/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/lib/shape_base.py
|
76
|
__all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
from numpy.core import hstack, vstack, atleast_3d
def apply_along_axis(func1d,axis,arr,*args):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
Returns
-------
outarr : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> def new_func(a):
... \"\"\"Divide elements of a by 2.\"\"\"
... return a * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(new_func, 0, b)
array([[ 0.5, 1. , 1.5],
[ 2. , 2.5, 3. ],
[ 3.5, 4. , 4.5]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
val : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res,axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError, "function is not returning"\
" an array of correct shape"
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Notes
-----
This function is equivalent to ``np.vstack(tup).T``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v,copy=False,subok=True)
if arr.ndim < 2:
arr = array(arr,copy=False,subok=True,ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays,1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.array([])
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
sub_arys[i] = _nx.array([])
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
"""
Split an array into multiple sub-arrays of equal or near-equal size.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError, 'number sections must be larger than 0.'
Neach_section,extras = divmod(Ntotal,Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary,axis,0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
# there is a wierd issue with array slicing that allows
# 0x10 arrays and other such things. The following cluge is needed
# to get around this issue.
sub_arys = _replace_zero_by_x_arrays(sub_arys)
# end cluge.
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays of equal size.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError, 'array split does not result in an equal division'
res = array_split(ary,indices_or_sections,axis)
return res
def hsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError, 'hsplit only works on arrays of 1 or more dimensions'
if len(ary.shape) > 1:
return split(ary,indices_or_sections,1)
else:
return split(ary,indices_or_sections,0)
def vsplit(ary,indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError, 'vsplit only works on arrays of 2 or more dimensions'
return split(ary,indices_or_sections,0)
def dsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
return split(ary,indices_or_sections,2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def kron(a,b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a,b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A,copy=False,subok=True,ndmin=d)
shape = list(c.shape)
n = max(c.size,1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1,n).repeat(nrep,0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n /= max(dim_in,1)
return c.reshape(shape)
|
adamdmcbride/Nufront_linux_kernel
|
refs/heads/master
|
tools/perf/python/twatch.py
|
3213
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
divio/django
|
refs/heads/master
|
django/db/models/fields/files.py
|
129
|
import datetime
import os
import warnings
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.db.models import signals
from django.db.models.fields import Field
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text
from django.utils.inspect import func_supports_parameter
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
if func_supports_parameter(self.storage.save, 'max_length'):
self.name = self.storage.save(name, content, max_length=self.field.max_length)
else:
warnings.warn(
'Backwards compatibility for storage backends without '
'support for the `max_length` argument in '
'Storage.save() will be removed in Django 1.10.',
RemovedInDjango110Warning, stacklevel=2
)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
# If upload_to is a callable, make sure that the path it returns is
# passed through get_valid_name() of the underlying storage.
if callable(self.upload_to):
directory_name, filename = os.path.split(self.upload_to(instance, filename))
filename = self.storage.get_valid_name(filename)
return os.path.normpath(os.path.join(directory_name, filename))
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
|
eLBati/odoo
|
refs/heads/master
|
addons/l10n_hn/__init__.py
|
411
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Salvatore J. Trimarchi <salvatore@trimarchi.co.cc>
# (http://salvatoreweb.co.cc)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
spasovski/zamboni
|
refs/heads/master
|
mkt/stats/api.py
|
2
|
from django import http
import commonware
import requests
from rest_framework.exceptions import ParseError
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from lib.metrics import get_monolith_client
import amo
from stats.models import Contribution
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowAppOwner, AnyOf, GroupPermission
from mkt.api.base import CORSMixin, SlugOrIdMixin
from mkt.api.exceptions import ServiceUnavailable
from mkt.webapps.models import Webapp
from .forms import StatsForm
log = commonware.log.getLogger('z.stats')
# Map of URL metric name to monolith metric name.
#
# The 'dimensions' key is optional query string arguments with defaults that is
# passed to the monolith client and used in the facet filters. If the default
# is `None`, the dimension is excluded unless specified via the API.
#
# The 'lines' key is optional and used for multi-line charts. The format is:
# {'<name>': {'<dimension-key>': '<dimension-value>'}}
# where <name> is what's returned in the JSON output and the dimension
# key/value is what's sent to Monolith similar to the 'dimensions' above.
#
# The 'coerce' key is optional and used to coerce data types returned from
# monolith to other types. Provide the name of the key in the data you want to
# coerce with a callback for how you want the data coerced. E.g.:
# {'count': str}
lines = lambda name, vals: dict((val, {name: val}) for val in vals)
STATS = {
'apps_added_by_package': {
'metric': 'apps_added_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', amo.ADDON_WEBAPP_TYPES.values()),
},
'apps_added_by_premium': {
'metric': 'apps_added_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', amo.ADDON_PREMIUM_API.values()),
},
'apps_available_by_package': {
'metric': 'apps_available_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', amo.ADDON_WEBAPP_TYPES.values()),
},
'apps_available_by_premium': {
'metric': 'apps_available_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', amo.ADDON_PREMIUM_API.values()),
},
'apps_installed': {
'metric': 'app_installs',
# TODO: Add back when we track regions (bug 941666)
# 'dimensions': {'region': None},
},
'total_developers': {
'metric': 'total_dev_count'
},
'total_visits': {
'metric': 'visits'
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
APP_STATS = {
'installs': {
'metric': 'app_installs',
# TODO: Add back when we track regions (bug 941666)
# 'dimensions': {'region': None},
},
'visits': {
'metric': 'app_visits',
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
# TODO: Add more metrics here as needed. The total API will iterate over
# them and return statistical totals information on them all.
}
APP_STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
# TODO: Add more metrics here as needed. The total API will iterate over
# them and return statistical totals information on them all.
}
def _get_monolith_data(stat, start, end, interval, dimensions):
# If stat has a 'lines' attribute, it's a multi-line graph. Do a
# request for each item in 'lines' and compose them in a single
# response.
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
def _coerce(data):
for key, coerce in stat.get('coerce', {}).items():
if data.get(key):
data[key] = coerce(data[key])
return data
try:
data = {}
if 'lines' in stat:
for line_name, line_dimension in stat['lines'].items():
dimensions.update(line_dimension)
data[line_name] = map(_coerce,
client(stat['metric'], start, end,
interval, **dimensions))
else:
data['objects'] = map(_coerce,
client(stat['metric'], start, end, interval,
**dimensions))
except ValueError as e:
# This occurs if monolith doesn't have our metric and we get an
# elasticsearch SearchPhaseExecutionException error.
log.info('Monolith ValueError for metric {0}: {1}'.format(
stat['metric'], e))
raise ParseError('Invalid metric at this time. Try again later.')
return data
class GlobalStats(CORSMixin, APIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('Stats', 'View')]
def get(self, request, metric):
if metric not in STATS:
raise http.Http404('No metric by that name.')
stat = STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class AppStats(CORSMixin, SlugOrIdMixin, ListAPIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk, metric):
if metric not in APP_STATS:
raise http.Http404('No metric by that name.')
app = self.get_object()
stat = APP_STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {'app-id': app.id}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class StatsTotalBase(object):
"""
A place for a few helper methods for totals stats API.
"""
def get_client(self):
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
return client
def get_query(self, metric, field, app_id=None):
query = {
'query': {
'match_all': {}
},
'facets': {
metric: {
'statistical': {
'field': field
}
}
},
'size': 0
}
# If this is per-app, add the facet_filter.
if app_id:
query['facets'][metric]['facet_filter'] = {
'term': {
'app-id': app_id
}
}
return query
def process_response(self, resp, data):
for metric, facet in resp.get('facets', {}).items():
count = facet.get('count', 0)
# We filter out facets with count=0 to avoid returning things
# like `'max': u'-Infinity'`.
if count > 0:
for field in ('max', 'mean', 'min', 'std_deviation',
'sum_of_squares', 'total', 'variance'):
value = facet.get(field)
if value is not None:
data[metric][field] = value
class GlobalStatsTotal(CORSMixin, APIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('Stats', 'View')]
slug_field = 'app_slug'
def get(self, request):
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'])
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class AppStatsTotal(CORSMixin, SlugOrIdMixin, ListAPIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk):
app = self.get_object()
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in APP_STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'], app.id)
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class TransactionAPI(CORSMixin, APIView):
"""
API to query by transaction ID.
Note: This is intended for Monolith to be able to associate a Solitude
transaction with an app and price tier amount in USD.
"""
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('RevenueStats', 'View')]
def get(self, request, transaction_id):
try:
contrib = (Contribution.objects.select_related('price_tier').
get(transaction_id=transaction_id))
except Contribution.DoesNotExist:
raise http.Http404('No transaction by that ID.')
data = {
'id': transaction_id,
'app_id': contrib.addon_id,
'amount_USD': contrib.price_tier.price,
'type': amo.CONTRIB_TYPES[contrib.type],
}
return Response(data)
|
FlaPer87/django-nonrel
|
refs/heads/master
|
tests/regressiontests/templates/templatetags/custom.py
|
56
|
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
def trim(value, num):
return value[:num]
trim = stringfilter(trim)
register.filter(trim)
|
nesaro/driza
|
refs/heads/master
|
pyrqt/iuqt4/operaciones/woperaciones.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#Copyright (C) 2007-2008 Néstor Arocha Rodríguez
#This file is part of pyrqt.
#
#pyrqt is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#pyrqt is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pyrqt; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""woperaciones.py: Todos los widgets de operaciones"""
from PyQt4 import QtCore,QtGui
class WidgetOperacion(QtGui.QWidget):
"""Clase base de todos los widgets de operaciones"""
def __init__(self, name):
if isinstance(name, str):
name = name.encode("latin1")
if not name:
self.setName("WidgetOperacion")
QtGui.QWidget.__init__(self)#,None,name,0)
self._rejilla = QtGui.QGridLayout(self)#,1,1,11,6,"rejilla")
self.languageChange()
self.resize(QtCore.QSize(600,480).expandedTo(self.minimumSizeHint()))
#TODO Pendiente portabilidad qt4
#self.clearWState(Qt.WState_Polished)
def languageChange(self):
#TODO Pendiente portabilidad qt4
#self.setCaption(self.__tr("Form1"))
pass
def __tr(self,s,c = None):
return qApp.translate("Form1",s,c)
def seleccion(self):
"""Devuelve los elementos seleccionados en el selector"""
#Debe devolver una lista de str
seleccion = self._wseleccion.seleccion
seleccion2 = map(str, seleccion)
return seleccion2
def opciones(self):
"""Devuelve las opciones escogidas en todos los widgets de opciones"""
return self._wopciones.opciones()
class WidgetOperacionSelectorOpcion(WidgetOperacion):
"""WidgetOperacionSelectorOpcion es un widget de operaciones que tiene un solo selector
y un solo widget de opciones"""
def __init__(self, name, objetowidgetselector, opcioneswidgetopciones,
interfazdatos):
WidgetOperacion.__init__(self, name)
#
# #Condiciones a probar
# from wopciones import WOpciones
# from seleccion import SelectorElementosEstudio
# assert(isinstance(widgetselector,SelectorElementosEstudio))
# assert(isinstance(widgetopciones,WOpciones))
#
#Inicializacion de widgets
from pyrqt.iuqt4.operaciones.wopciones import WOpciones
self._wseleccion = objetowidgetselector(interfazdatos)
self._wopciones = WOpciones(opcioneswidgetopciones)
zonastacksuperior = QtGui.QStackedWidget(self)
zonastacksuperior.setMinimumSize(QtCore.QSize(200, 200))
zonastacksuperior.addWidget(self._wseleccion)
zonastacksuperior.setCurrentWidget(self._wseleccion)
zonastackinferior = QtGui.QStackedWidget(self)
zonastackinferior.setMinimumSize(QtCore.QSize(200,200))
zonastackinferior.addWidget(self._wopciones)
zonastackinferior.setCurrentWidget(self._wopciones)
self._rejilla.addWidget(zonastacksuperior,0,0)
self._rejilla.addWidget(zonastackinferior,1,0)
|
dgouldin/myspaceid-python-sdk
|
refs/heads/master
|
src/myspace/config/HTTPResponse.py
|
1
|
class HTTPResponse(object):
headers = None
status = None
body = None
final_url = None
def __init__(self, final_url=None, status=None, headers=None, body=None):
self.final_url = final_url
self.status = status
self.headers = headers
self.body = body
def __repr__(self):
return "[HTTP Status Code: %r --- Request URL: %s --- Response: %s" % (self.status, self.final_url, self.body)
|
inventivehack/yaus
|
refs/heads/master
|
links/urls.py
|
1
|
from django.conf.urls import url
from .views import (
URLRedirectView
)
urlpatterns = [
url(r'^(?P<url>[-\w]+)/$', URLRedirectView.as_view(), name='redirect'),
]
|
Boussadia/weboob
|
refs/heads/master
|
modules/lolix/test.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class LolixTest(BackendTest):
BACKEND = 'lolix'
def test_lolix_advanced_search(self):
l = list(self.backend.advanced_search_job())
assert len(l)
advert = self.backend.get_job_advert(l[0].id, l[0])
self.assertTrue(advert.url, 'URL for announce "%s" not found: %s' % (advert.id, advert.url))
|
erikriver/eduIntelligent-cynin
|
refs/heads/master
|
src/ubify.coretypes/ubify/__init__.py
|
25
|
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
GDG-JSS-NOIDA/programmr
|
refs/heads/master
|
programmr/app/migrations/0012_auto_20170118_1214.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-18 06:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0011_merge_20170117_1701'),
]
operations = [
migrations.AddField(
model_name='question',
name='correct_submissions',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='question',
name='total_submissions',
field=models.IntegerField(default=0),
),
]
|
wavesoft/LiveQ
|
refs/heads/master
|
liveq-common/liveq/utils/ascii85.py
|
1
|
#Kabopan - Readable Algorithms. Public Domain, 2007-2009
"""
Ascii85 / Base85
base coder
Paul E. Rutter
"""
def char_range(start, end):
return "".join(chr(i) for i in range(ord(start), ord(end) + 1))
DIGITS = char_range("0", "9")
ASCII = char_range("\x00", "\xff")
ALPHABET = char_range("A", "Z")
ALPHABET_LOWERCASE = char_range("a", "z")
ASCII85_1924 = DIGITS + ALPHABET + ALPHABET_LOWERCASE + "!#$%&()*+-;<=>?@^_`{|}~" # for reference only, for RFC 1924
ASCII85 = char_range(chr(33), chr(33 + 85 - 1))
def merge(number_list, base):
result = 0
for i in number_list:
result = result * base + i
return result
def split(number, base, max_digits=0):
result = list()
digits = 0
remainder = number
while remainder != 0:
number = remainder % base
remainder /= base
digits += 1
result = [number] + result
if digits < max_digits:
result = [0 for i in range (max_digits - digits)] + result
return result
def b85encode(source):
# todo : starter/ender and y/z belong to the format, not the algo
tetragrams = (source[i:i + 4] for i in range(0, len(source), 4))
result = ""
result += "<~"
for l in tetragrams:
if l == " ":
result += "y" # instead of "+<VdL"
elif l == "\x00\x00\x00\x00":
result += "z" # instead of "!!!!!"
else:
length = len(l)
if length < 4:
l = l + '\x00' * (4 - length)
value = merge ([ASCII.index(i) for i in l], len(ASCII))
l = split(value, len(ASCII85), 5)[:length + 1]
result += "".join(ASCII85[i] for i in l)
result = result + "~>"
return result
def b85decode(target):
target = target[2:-2]
target = target.replace("y", "+<VdL")
target = target.replace("z", "!!!!!")
result = str()
pentagrams = (target[i:i + 5] for i in range(0, len(target), 5))
for l in pentagrams:
length = len(l)
if length < 5:
l += "u" * (5 - length)
value = merge((ASCII85.index(i) for i in l), len(ASCII85))
encoded_char = 4
l = split(value, len(ASCII), 4)[:length - 1]
result += "".join((ASCII[i] for i in l))
return result
if __name__ == "__main__":
import kbp.test.ascii85_test
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/ingate/common.py
|
21
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ingate Systems AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from ingate import ingatesdk
HAS_INGATESDK = True
except ImportError:
HAS_INGATESDK = False
def ingate_argument_spec(**kwargs):
client_options = dict(
version=dict(choices=['v1'], default='v1'),
scheme=dict(choices=['http', 'https'], required=True),
address=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
port=dict(type='int'),
timeout=dict(type='int'),
verify_ssl=dict(default=True, type='bool'),
)
argument_spec = dict(
client=dict(type='dict', required=True,
options=client_options),
)
argument_spec.update(kwargs)
return argument_spec
def ingate_create_client(**kwargs):
api_client = ingate_create_client_noauth(**kwargs)
# Authenticate and get hold of a security token.
api_client.authenticate()
# Return the client.
return api_client
def ingate_create_client_noauth(**kwargs):
client_params = kwargs['client']
# Create API client.
api_client = ingatesdk.Client(client_params['version'],
client_params['scheme'],
client_params['address'],
client_params['username'],
client_params['password'],
port=client_params['port'],
timeout=client_params['timeout'])
# Check if we should skip SSL Certificate verification.
verify_ssl = client_params.get('verify_ssl')
if not verify_ssl:
api_client.skip_verify_certificate()
# Return the client.
return api_client
def is_ingatesdk_installed(module):
if not HAS_INGATESDK:
module.fail_json(msg="The Ingate Python SDK module is required for this module.")
|
spasticVerbalizer/-tg-station
|
refs/heads/master
|
tools/mapmerge/maintloot.py
|
118
|
#!/usr/bin/env python3
import argparse
import collections
import re
from map_helpers import parse_map
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mapfile")
args = parser.parse_args()
M = parse_map(args.mapfile)
# Format of this map parsing
# dict(coordinates->mapkey)
grid = M["grid"]
# dict(mapkey->tilecontents)
dictionary = M["dictionary"]
# tilecontents are a list of atoms, path is first
lootdrop_path = "/obj/effect/spawner/lootdrop/maintenance"
area_path = "/area"
follow_up = set()
for key, atoms in dictionary.items():
#atom is a string
for atom in atoms:
if atom.startswith(lootdrop_path):
if(key in follow_up):
print("Hey, '{}' has multiple maintlootdrops...")
follow_up.add(key)
# Count the number of times each map key appears
appears = collections.Counter()
for coord, key in grid.items():
if key in follow_up:
appears[key] += 1
tally = collections.Counter()
for key in follow_up:
# Because I am a terrible person, and don't actually care about
# building a proper parser for this "object notation" that byond
# uses, I'm just going to cheat.
area = None
count = 0
for atom in dictionary[key]:
if atom.startswith(lootdrop_path):
amount = 1
mo = re.search(r'lootcount = (\d+)', atom)
if mo is not None:
amount = int(mo.group(1))
count += amount
elif atom.startswith(area_path):
area = atom
# Multiply by the number of times this model is used
tally[area] += (count * appears[key])
for area, total in tally.items():
print("{}: {}".format(area, total))
print("TOTAL: {}".format(sum(tally.values())))
|
Omegaphora/external_chromium_org
|
refs/heads/lp5.1
|
tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_rfc2217.py
|
167
|
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see ../__init__.py
#
# This is a thin wrapper to load the rfc2271 implementation.
#
# (C) 2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
from serial.rfc2217 import Serial
|
GaretJax/pop-utils
|
refs/heads/master
|
poputils/conf.py
|
1
|
import os
from lxml import etree
def schema(name):
"""
Retrieves a schema by name from the 'schemata' directory and wraps it in an
lxml.etree.XMLSchema object.
"""
schema = os.path.join(os.path.dirname(__file__), 'schemata', name + '.xsd')
document = etree.parse(schema) # Will raise if schema does not exist
return etree.XMLSchema(document)
|
TeachAtTUM/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/migrations/0001_initial.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseAuthorization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', CourseKeyField(unique=True, max_length=255, db_index=True)),
('email_enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CourseEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.CharField(max_length=128, db_index=True)),
('subject', models.CharField(max_length=128, blank=True)),
('html_message', models.TextField(null=True, blank=True)),
('text_message', models.TextField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('course_id', CourseKeyField(max_length=255, db_index=True)),
('to_option', models.CharField(default=b'myself', max_length=64, choices=[(b'myself', b'Myself'), (b'staff', b'Staff and instructors'), (b'all', b'All')])),
('template_name', models.CharField(max_length=255, null=True)),
('from_addr', models.CharField(max_length=255, null=True)),
('sender', models.ForeignKey(default=1, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='CourseEmailTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('html_template', models.TextField(null=True, blank=True)),
('plain_template', models.TextField(null=True, blank=True)),
('name', models.CharField(max_length=255, unique=True, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Optout',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', CourseKeyField(max_length=255, db_index=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AlterUniqueTogether(
name='optout',
unique_together=set([('user', 'course_id')]),
),
]
|
nijinashok/sos
|
refs/heads/master
|
sos/plugins/insights.py
|
1
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin
class RedHatInsights(Plugin, RedHatPlugin):
'''Collect config and log for Red Hat Insights
'''
plugin_name = 'insights'
packages = ['redhat-access-insights']
profiles = ('system', 'sysmgmt')
conf_file = '/etc/redhat-access-insights/redhat-access-insights.conf'
def setup(self):
self.add_copy_spec(self.conf_file)
if self.get_option("all_logs"):
self.add_copy_spec("/var/log/redhat-access-insights/*.log*")
else:
self.add_copy_spec("/var/log/redhat-access-insights/*.log")
def postproc(self):
self.do_file_sub(
self.conf_file,
r'(password[\t\ ]*=[\t\ ]*)(.+)',
r'\1********'
)
self.do_file_sub(
self.conf_file,
r'(proxy[\t\ ]*=.*)(:)(.*)(@.*)',
r'\1\2********\4'
)
|
nkgilley/home-assistant
|
refs/heads/dev
|
tests/testing_config/custom_components/test/alarm_control_panel.py
|
18
|
"""
Provide a mock alarm_control_panel platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
"arm_code": MockAlarm(
name="Alarm arm code",
code_arm_required=True,
unique_id="unique_arm_code",
),
"no_arm_code": MockAlarm(
name="Alarm no arm code",
code_arm_required=False,
unique_id="unique_no_arm_code",
),
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockAlarm(MockEntity, AlarmControlPanelEntity):
"""Mock Alarm control panel class."""
def __init__(self, **values):
"""Init the Mock Alarm Control Panel."""
self._state = None
MockEntity.__init__(self, **values)
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._handle("code_arm_required")
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return (
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER
)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._state = STATE_ALARM_ARMED_AWAY
self.async_write_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._state = STATE_ALARM_ARMED_HOME
self.async_write_ha_state()
def alarm_arm_night(self, code=None):
"""Send arm night command."""
self._state = STATE_ALARM_ARMED_NIGHT
self.async_write_ha_state()
def alarm_disarm(self, code=None):
"""Send disarm command."""
if code == "1234":
self._state = STATE_ALARM_DISARMED
self.async_write_ha_state()
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
self._state = STATE_ALARM_TRIGGERED
self.async_write_ha_state()
|
georgemarshall/django
|
refs/heads/master
|
django/contrib/gis/sitemaps/kml.py
|
101
|
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.sitemaps import Sitemap
from django.db import models
from django.urls import reverse
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Go through the given sources and return a 3-tuple of the application
label, module name, and field name of every GeometryField encountered
in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None, protocol=None):
"""
This method is overridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return reverse(
'django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={
'label': obj[0],
'model': obj[1],
'field_name': obj[2],
},
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
|
bayusantoso/final-assignment-web-ontology
|
refs/heads/master
|
IMPLEMENTATION/Application/SourceCode/GOApps/flask/Lib/functools.py
|
68
|
"""functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>,
# Raymond Hettinger <python at rcn.com>,
# and Łukasz Langa <lukasz at langa.pl>.
# Copyright (C) 2006-2013 Python Software Foundation.
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
'partialmethod', 'singledispatch']
try:
from _functools import reduce
except ImportError:
pass
from abc import get_cache_token
from collections import namedtuple
from types import MappingProxyType
from weakref import WeakKeyDictionary
try:
from _thread import RLock
except:
class RLock:
'Dummy reentrant lock for builds without threads'
def __enter__(self): pass
def __exit__(self, exctype, excinst, exctb): pass
################################################################################
### update_wrapper() and wraps() decorator
################################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
################################################################################
### total_ordering class decorator
################################################################################
# The correct way to indicate that a comparison operation doesn't
# recognise the other type is to return NotImplemented and let the
# interpreter handle raising TypeError if both operands return
# NotImplemented from their respective comparison methods
#
# This makes the implementation of total_ordering more complicated, since
# we need to be careful not to trigger infinite recursion when two
# different types that both use this decorator encounter each other.
#
# For example, if a type implements __lt__, it's natural to define
# __gt__ as something like:
#
# lambda self, other: not self < other and not self == other
#
# However, using the operator syntax like that ends up invoking the full
# type checking machinery again and means we can end up bouncing back and
# forth between the two operands until we run out of stack space.
#
# The solution is to define helper functions that invoke the appropriate
# magic methods directly, ensuring we only try each operand once, and
# return NotImplemented immediately if it is returned from the
# underlying user provided method. Using this scheme, the __gt__ derived
# from a user provided __lt__ becomes:
#
# lambda self, other: _not_op_and_not_eq(self.__lt__, self, other))
def _not_op(op, other):
# "not a < b" handles "a >= b"
# "not a <= b" handles "a > b"
# "not a >= b" handles "a < b"
# "not a > b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result
def _op_or_eq(op, self, other):
# "a < b or a == b" handles "a <= b"
# "a > b or a == b" handles "a >= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result or self == other
def _not_op_and_not_eq(op, self, other):
# "not (a < b or a == b)" handles "a > b"
# "not a < b and a != b" is equivalent
# "not (a > b or a == b)" handles "a < b"
# "not a > b and a != b" is equivalent
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result and self != other
def _not_op_or_eq(op, self, other):
# "not a <= b or a == b" handles "a >= b"
# "not a >= b or a == b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result or self == other
def _op_and_not_eq(op, self, other):
# "a <= b and not a == b" handles "a < b"
# "a >= b and not a == b" handles "a > b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result and self != other
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: _not_op_and_not_eq(self.__lt__, self, other)),
('__le__', lambda self, other: _op_or_eq(self.__lt__, self, other)),
('__ge__', lambda self, other: _not_op(self.__lt__, other))],
'__le__': [('__ge__', lambda self, other: _not_op_or_eq(self.__le__, self, other)),
('__lt__', lambda self, other: _op_and_not_eq(self.__le__, self, other)),
('__gt__', lambda self, other: _not_op(self.__le__, other))],
'__gt__': [('__lt__', lambda self, other: _not_op_and_not_eq(self.__gt__, self, other)),
('__ge__', lambda self, other: _op_or_eq(self.__gt__, self, other)),
('__le__', lambda self, other: _not_op(self.__gt__, other))],
'__ge__': [('__le__', lambda self, other: _not_op_or_eq(self.__ge__, self, other)),
('__gt__', lambda self, other: _op_and_not_eq(self.__ge__, self, other)),
('__lt__', lambda self, other: _not_op(self.__ge__, other))]
}
# Find user-defined comparisons (not those inherited from object).
roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)]
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
################################################################################
### cmp_to_key() function converter
################################################################################
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
__hash__ = None
return K
try:
from _functools import cmp_to_key
except ImportError:
pass
################################################################################
### partial() argument application
################################################################################
# Purely functional, no descriptor behaviour
def partial(func, *args, **keywords):
"""New function with partial application of the given arguments
and keywords.
"""
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
from _functools import partial
except ImportError:
pass
# Descriptor version
class partialmethod(object):
"""Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = func.keywords.copy()
self.keywords.update(keywords)
else:
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__name__,
func=self.func,
args=args,
keywords=keywords)
def _make_unbound_method(self):
def _method(*args, **keywords):
call_keywords = self.keywords.copy()
call_keywords.update(keywords)
cls_or_self, *rest = args
call_args = (cls_or_self,) + self.args + tuple(rest)
return self.func(*call_args, **call_keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
def __get__(self, obj, cls):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
################################################################################
### LRU Cache function decorator
################################################################################
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
""" This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
"""Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
full = (len(cache) >= maxsize)
misses += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
raise RuntimeError("Inconsistent hierarchy")
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _c3_mro(cls, abcs=None):
"""Computes the method resolution order using extended C3 linearization.
If no *abcs* are given, the algorithm works exactly like the built-in C3
linearization used for method resolution.
If given, *abcs* is a list of abstract base classes that should be inserted
into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
result. The algorithm inserts ABCs where their functionality is introduced,
i.e. issubclass(cls, abc) returns True for the class itself but returns
False for all its direct base classes. Implicit ABCs for a given class
(either registered or inferred from the presence of a special method like
__len__) are inserted directly after the last ABC explicitly listed in the
MRO of said class. If two implicit ABCs end up next to each other in the
resulting MRO, their ordering depends on the order of types in *abcs*.
"""
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if abcs else []
explicit_bases = list(cls.__bases__[:boundary])
abstract_bases = []
other_bases = list(cls.__bases__[boundary:])
for base in abcs:
if issubclass(cls, base) and not any(
issubclass(b, base) for b in cls.__bases__
):
# If *cls* is the class that introduces behaviour described by
# an ABC *base*, insert said ABC to its MRO.
abstract_bases.append(base)
for base in abstract_bases:
abcs.remove(base)
explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
return _c3_merge(
[[cls]] +
explicit_c3_mros + abstract_c3_mros + other_c3_mros +
[explicit_bases] + [abstract_bases] + [other_bases]
)
def _compose_mro(cls, types):
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(typ):
return (typ not in bases and hasattr(typ, '__mro__')
and issubclass(cls, typ))
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(typ):
for other in types:
if typ != other and typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro)
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if (t in registry and t not in cls.__mro__
and match not in cls.__mro__
and not issubclass(match, t)):
raise RuntimeError("Ambiguous dispatch: {} or {}".format(
match, t))
break
if t in registry:
match = t
return registry.get(match)
def singledispatch(func):
"""Single-dispatch generic function decorator.
Transforms a function into a generic function, which can have different
behaviours depending upon the type of its first argument. The decorated
function acts as the default implementation, and additional
implementations can be registered using the register() attribute of the
generic function.
"""
registry = {}
dispatch_cache = WeakKeyDictionary()
cache_token = None
def dispatch(cls):
"""generic_func.dispatch(cls) -> <function implementation>
Runs the dispatch algorithm to return the best available implementation
for the given *cls* registered on *generic_func*.
"""
nonlocal cache_token
if cache_token is not None:
current_token = get_cache_token()
if cache_token != current_token:
dispatch_cache.clear()
cache_token = current_token
try:
impl = dispatch_cache[cls]
except KeyError:
try:
impl = registry[cls]
except KeyError:
impl = _find_impl(cls, registry)
dispatch_cache[cls] = impl
return impl
def register(cls, func=None):
"""generic_func.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_func*.
"""
nonlocal cache_token
if func is None:
return lambda f: register(cls, f)
registry[cls] = func
if cache_token is None and hasattr(cls, '__abstractmethods__'):
cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
return dispatch(args[0].__class__)(*args, **kw)
registry[object] = func
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, func)
return wrapper
|
int19h/PTVS
|
refs/heads/master
|
Python/Tests/TestData/TestDiscoverer/SyntaxErrorPytest/test_syntax_error.py
|
3
|
def test_with_syntax_error():
for (]
|
FreeAgent/djangoappengine-starter
|
refs/heads/master
|
django/conf/urls/static.py
|
156
|
import re
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ImproperlyConfigured
def static(prefix, view='django.views.static.serve', **kwargs):
"""
Helper function to return a URL pattern for serving files in debug mode.
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
# ... the rest of your URLconf goes here ...
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
"""
# No-op if not in debug mode or an non-local prefix
if not settings.DEBUG or (prefix and '://' in prefix):
return []
elif not prefix:
raise ImproperlyConfigured("Empty static prefix not permitted")
return patterns('',
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), view, kwargs=kwargs),
)
|
rickerc/keystone_audit
|
refs/heads/cis-havana-staging
|
keystone/openstack/common/fixture/moxstubout.py
|
14
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mox
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = self.mox.stubs
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)
|
zjutjsj1004/third
|
refs/heads/master
|
boost/tools/build/test/double_loading.py
|
6
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
# Regression test for double loading of the same Jamfile.
t.write("jamroot.jam", "")
t.write("jamfile.jam", "build-project subdir ;")
t.write("subdir/jamfile.jam", 'ECHO "Loaded subdir" ;')
t.run_build_system(subdir="subdir")
t.expect_output_lines("Loaded subdir")
# Regression test for a more contrived case. The top-level Jamfile refers to
# subdir via use-project, while subdir's Jamfile is being loaded. The
# motivation why use-project referring to subprojects is useful can be found
# at: http://article.gmane.org/gmane.comp.lib.boost.build/3906
t.write("jamroot.jam", "")
t.write("jamfile.jam", "use-project /subdir : subdir ;")
t.write("subdir/jamfile.jam", "project subdir ;")
t.run_build_system(subdir="subdir");
t.cleanup()
|
rowillia/buck
|
refs/heads/master
|
third-party/py/pex/pex/interpreter.py
|
52
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import os
import re
import subprocess
import sys
from collections import defaultdict
from pkg_resources import Distribution, Requirement, find_distributions
from .base import maybe_requirement
from .compatibility import string
from .tracer import TRACER
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, list):
raise TypeError('Capability must be a list, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash((self._binary, self._identity))
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return (self._binary, self._identity) == (other._binary, other._identity)
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyStringFormatInspection/PercentStringWithFormatStringReplacementSymbols.py
|
29
|
var = "if (1 == x)\n{\n%s;\n}" % "return 0"
"%s" % <warning descr="Too few arguments for format string">()</warning>
|
pierreg/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/scan_ops_test.py
|
21
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [slice(None, None, -1)
if i == axis else slice(None) for i in range(length)]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None)
for i in range(length)]
ix_init = [slice(0, -1) if i == axis else slice(None)
for i in range(length)]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(tf.test.TestCase):
valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
np.float64, np.complex64, np.complex128]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
tf_out = tf.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
tf.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
t = tf.convert_to_tensor(x)
result = tf.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
result,
shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, False)
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, True)
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, False)
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, True)
def testGradient2D(self):
for axis in (-1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([5, 10], axis, exclusive, reverse)
class CumprodTest(tf.test.TestCase):
valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
np.float64, np.complex64, np.complex128]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
tf_out = tf.cumprod(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
tf.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
t = tf.convert_to_tensor(x)
result = tf.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
result,
shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, False)
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, True)
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, False)
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, True)
def testGradient2D(self):
for axis in (-2, -1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([2, 4], axis, exclusive, reverse)
if __name__ == "__main__":
tf.test.main()
|
rohitwaghchaure/erpnext_smart
|
refs/heads/develop
|
erpnext/support/doctype/maintenance_schedule_detail/maintenance_schedule_detail.py
|
41
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class MaintenanceScheduleDetail(Document):
pass
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/orchestration/airflow/service/v1/airflow-service-v1-py/tests/__init__.py
|
951
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
trachelr/mne-python
|
refs/heads/master
|
mne/io/bti/read.py
|
10
|
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# simplified BSD-3 license
import struct
import numpy as np
from ...externals.six import b
def _unpack_matrix(fid, fmt, rows, cols, dtype):
""" Aux Function """
out = np.zeros((rows, cols), dtype=dtype)
bsize = struct.calcsize(fmt)
string = fid.read(bsize)
data = struct.unpack(fmt, string)
iter_mat = [(r, c) for r in range(rows) for c in range(cols)]
for idx, (row, col) in enumerate(iter_mat):
out[row, col] = data[idx]
return out
def _unpack_simple(fid, fmt, count):
""" Aux Function """
bsize = struct.calcsize(fmt)
string = fid.read(bsize)
data = list(struct.unpack(fmt, string))
out = data if count < 2 else list(data)
if len(out) > 0:
out = out[0]
return out
def read_str(fid, count=1):
""" Read string """
fmt = '>' + ('c' * count)
data = list(struct.unpack(fmt, fid.read(struct.calcsize(fmt))))
bytestr = b('').join(data[0:data.index(b('\x00')) if b('\x00') in data else
count])
return str(bytestr.decode('ascii')) # Return native str type for Py2/3
def read_char(fid, count=1):
" Read character from bti file """
return _unpack_simple(fid, '>' + ('c' * count), count)
def read_bool(fid, count=1):
""" Read bool value from bti file """
return _unpack_simple(fid, '>' + ('?' * count), count)
def read_uint8(fid, count=1):
""" Read unsigned 8bit integer from bti file """
return _unpack_simple(fid, '>' + ('B' * count), count)
def read_int8(fid, count=1):
""" Read 8bit integer from bti file """
return _unpack_simple(fid, '>' + ('b' * count), count)
def read_uint16(fid, count=1):
""" Read unsigned 16bit integer from bti file """
return _unpack_simple(fid, '>' + ('H' * count), count)
def read_int16(fid, count=1):
""" Read 16bit integer from bti file """
return _unpack_simple(fid, '>' + ('H' * count), count)
def read_uint32(fid, count=1):
""" Read unsigned 32bit integer from bti file """
return _unpack_simple(fid, '>' + ('I' * count), count)
def read_int32(fid, count=1):
""" Read 32bit integer from bti file """
return _unpack_simple(fid, '>' + ('i' * count), count)
def read_uint64(fid, count=1):
""" Read unsigned 64bit integer from bti file """
return _unpack_simple(fid, '>' + ('Q' * count), count)
def read_int64(fid, count=1):
""" Read 64bit integer from bti file """
return _unpack_simple(fid, '>' + ('q' * count), count)
def read_float(fid, count=1):
""" Read 32bit float from bti file """
return _unpack_simple(fid, '>' + ('f' * count), count)
def read_double(fid, count=1):
""" Read 64bit float from bti file """
return _unpack_simple(fid, '>' + ('d' * count), count)
def read_int16_matrix(fid, rows, cols):
""" Read 16bit integer matrix from bti file """
fmt = '>' + ('h' * rows * cols)
return _unpack_matrix(fid, fmt, rows, cols, np.int16)
def read_float_matrix(fid, rows, cols):
""" Read 32bit float matrix from bti file """
fmt = '>' + ('f' * rows * cols)
return _unpack_matrix(fid, fmt, rows, cols, 'f4')
def read_double_matrix(fid, rows, cols):
""" Read 64bit float matrix from bti file """
fmt = '>' + ('d' * rows * cols)
return _unpack_matrix(fid, fmt, rows, cols, 'f8')
def read_transform(fid):
""" Read 64bit float matrix transform from bti file """
fmt = '>' + ('d' * 4 * 4)
return _unpack_matrix(fid, fmt, 4, 4, 'f8')
|
rtucker-mozilla/mozilla_inventory
|
refs/heads/master
|
vendor-local/src/django-extensions/django_extensions/management/utils.py
|
16
|
from django.conf import settings
import os
import sys
def get_project_root():
""" get the project root directory """
settings_mod = __import__(settings.SETTINGS_MODULE, {}, {}, [''])
return os.path.dirname(os.path.abspath(settings_mod.__file__))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
fhe-odoo/odoo
|
refs/heads/8.0
|
addons/mass_mailing/tests/test_mail.py
|
388
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
class test_message_compose(TestMail):
def test_OO_mail_mail_tracking(self):
""" Tests designed for mail_mail tracking (opened, replied, bounced) """
pass
|
GetSomeBlocks/Score_Soccer
|
refs/heads/master
|
resources/lib/IMDbPY/imdb/parser/common/__init__.py
|
5
|
"""
parser.common package (imdb package).
This package provides some modules containing code shared amongst
different parsers.
Copyright 2005 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
|
FinnStutzenstein/OpenSlides
|
refs/heads/master
|
server/openslides/topics/__init__.py
|
9
|
default_app_config = "openslides.topics.apps.TopicsAppConfig"
|
crosswalk-project/blink-crosswalk
|
refs/heads/master
|
Source/bindings/scripts/interface_dependency_resolver.py
|
12
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Resolve interface dependencies, producing a merged IdlDefinitions object.
This library computes interface dependencies (partial interfaces and
implements), reads the dependency files, and merges them to the IdlDefinitions
for the main IDL file, producing an IdlDefinitions object representing the
entire interface.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler#TOC-Dependency-resolution
"""
import os.path
from utilities import idl_filename_to_component, is_valid_component_dependency
# The following extended attributes can be applied to a dependency interface,
# and are then applied to the individual members when merging.
# Note that this moves the extended attribute from the interface to the member,
# which changes the semantics and yields different code than the same extended
# attribute on the main interface.
DEPENDENCY_EXTENDED_ATTRIBUTES = frozenset([
'Conditional',
'RuntimeEnabled',
'TypeChecking',
])
class InterfaceDependencyResolver(object):
def __init__(self, interfaces_info, reader):
"""Initialize dependency resolver.
Args:
interfaces_info:
dict of interfaces information, from compute_dependencies.py
reader:
IdlReader, used for reading dependency files
"""
self.interfaces_info = interfaces_info
self.reader = reader
def resolve_dependencies(self, definitions, component):
"""Resolve dependencies, merging them into IDL definitions of main file.
Dependencies consist of 'partial interface' for the same interface as
in the main file, and other interfaces that this interface 'implements'.
These are merged into the main IdlInterface, as the main IdlInterface
implements all these members.
Referenced interfaces are added to IdlDefinitions, but not merged into
the main IdlInterface, as these are only referenced (their members are
introspected, but not implemented in this interface).
Inherited extended attributes are also added to the main IdlInterface.
Modifies definitions in place by adding parsed dependencies.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
Raises:
Exception:
A given IdlDefinitions object doesn't have any interfaces,
or a given IdlDefinitions object has incorrect referenced
interfaces.
"""
# FIXME: we need to resolve dependency when we implement partial
# dictionary.
if not definitions.interfaces:
raise Exception('No need to resolve any dependencies of '
'this definition: %s, because this should '
'have a dictionary.' % definitions.idl_name)
target_interface = next(definitions.interfaces.itervalues())
interface_name = target_interface.name
interface_info = self.interfaces_info[interface_name]
if 'inherited_extended_attributes' in interface_info:
target_interface.extended_attributes.update(
interface_info['inherited_extended_attributes'])
resolved_definitions = merge_interface_dependencies(
definitions,
component,
target_interface,
interface_info['dependencies_full_paths'] +
interface_info['dependencies_other_component_full_paths'],
self.reader)
for referenced_interface_name in interface_info['referenced_interfaces']:
referenced_definitions = self.reader.read_idl_definitions(
self.interfaces_info[referenced_interface_name]['full_path'])
for referenced_component in referenced_definitions:
if not is_valid_component_dependency(component, referenced_component):
raise Exception('This definitions: %s is defined in %s '
'but reference interface:%s is defined '
'in %s' % (definitions.idl_name,
component,
referenced_interface_name,
referenced_component))
resolved_definitions[component].update(referenced_definitions[component])
return resolved_definitions
def merge_interface_dependencies(definitions, component, target_interface, dependency_idl_filenames, reader):
"""Merge dependencies ('partial interface' and 'implements') in dependency_idl_filenames into target_interface.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
target_interface: IdlInterface object, modified in place
dependency_idl_filenames:
Idl filenames which depend on the above definitions.
reader: IdlReader object.
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
"""
resolved_definitions = {component: definitions}
# Sort so order consistent, so can compare output from run to run.
for dependency_idl_filename in sorted(dependency_idl_filenames):
dependency_definitions = reader.read_idl_file(dependency_idl_filename)
dependency_component = idl_filename_to_component(dependency_idl_filename)
dependency_interface = next(dependency_definitions.interfaces.itervalues())
dependency_interface_basename, _ = os.path.splitext(os.path.basename(dependency_idl_filename))
transfer_extended_attributes(dependency_interface,
dependency_interface_basename)
# We need to use different checkdeps here for partial interface and
# inheritance.
if dependency_interface.is_partial:
# Case: dependency_interface is a partial interface of
# target_interface.
# So,
# - A partial interface defined in modules can update
# the original interface defined in core.
# However,
# - A partial interface defined in core cannot update
# the original interface defined in modules.
if not is_valid_component_dependency(dependency_component, component):
raise Exception('The partial interface:%s in %s cannot update '
'the original interface:%s in %s' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
if dependency_component in resolved_definitions:
# When merging a new partial interfaces, should not overwrite
# ImpelemntedAs extended attributes in merged partial
# interface.
# See also the below "if 'ImplementedAs' not in ... " line's
# comment.
dependency_interface.extended_attributes.pop('ImplementedAs', None)
resolved_definitions[dependency_component].update(dependency_definitions)
continue
dependency_interface.extended_attributes.update(target_interface.extended_attributes)
assert target_interface == definitions.interfaces[dependency_interface.name]
# A partial interface should use its original interface's
# ImplementedAs. If the original interface doesn't have,
# remove ImplementedAs defined in the partial interface.
# Because partial interface needs the original interface's
# cpp class to obtain partial interface's cpp class.
# e.g.. V8WindowPartial.cpp:
# DOMWindow* impl = V8Window::toImpl(holder);
# RawPtr<...> cppValue(DOMWindowQuota::webkitStorageInfo(impl));
# TODO(tasak): remove ImplementedAs extended attributes
# from all partial interfaces. Instead, rename all cpp/header
# files correctly. ImplementedAs should not be allowed in
# partial interfaces.
if 'ImplementedAs' not in target_interface.extended_attributes:
dependency_interface.extended_attributes.pop('ImplementedAs', None)
dependency_interface.original_interface = target_interface
target_interface.partial_interfaces.append(dependency_interface)
resolved_definitions[dependency_component] = dependency_definitions
else:
# Case: target_interface implements dependency_interface.
# So,
# - An interface defined in modules can implement some interface
# defined in core.
# In this case, we need "NoInterfaceObject" extended attribute.
# However,
# - An interface defined in core cannot implement any interface
# defined in modules.
if not is_valid_component_dependency(component, dependency_component):
raise Exception('The interface:%s in %s cannot implement '
'the interface:%s in %s.' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
if component != dependency_component and 'NoInterfaceObject' not in dependency_interface.extended_attributes:
raise Exception('The interface:%s in %s cannot implement '
'the interface:%s in %s because of '
'missing NoInterfaceObject.' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
resolved_definitions[component].update(dependency_definitions) # merges partial interfaces
# Implemented interfaces (non-partial dependencies) are also merged
# into the target interface, so Code Generator can just iterate
# over one list (and not need to handle 'implements' itself).
target_interface.merge(dependency_interface)
return resolved_definitions
def transfer_extended_attributes(dependency_interface, dependency_interface_basename):
"""Transfer extended attributes from dependency interface onto members.
Merging consists of storing certain interface-level data in extended
attributes of the *members* (because there is no separate dependency
interface post-merging).
The data storing consists of:
* applying certain extended attributes from the dependency interface
to its members
* storing the C++ class of the implementation in an internal
extended attribute of each member, [PartialInterfaceImplementedAs]
No return: modifies dependency_interface in place.
"""
merged_extended_attributes = dict(
(key, value)
for key, value in dependency_interface.extended_attributes.iteritems()
if key in DEPENDENCY_EXTENDED_ATTRIBUTES)
# A partial interface's members are implemented as static member functions
# in a separate C++ class. This class name is stored in
# [PartialInterfaceImplementedAs] which defaults to the basename of
# dependency IDL file.
# This class name can be overridden by [ImplementedAs] on the partial
# interface definition.
#
# Note that implemented interfaces do *not* need [ImplementedAs], since
# they are implemented on the C++ object |impl| itself, just like members of
# the main interface definition, so the bindings do not need to know in
# which class implemented interfaces are implemented.
#
# Currently [LegacyTreatAsPartialInterface] can be used to have partial
# interface behavior on implemented interfaces, but this is being removed
# as legacy cruft:
# FIXME: Remove [LegacyTreatAsPartialInterface]
# http://crbug.com/360435
#
# Note that [ImplementedAs] is used with different meanings on interfaces
# and members:
# for Blink class name and function name (or constant name), respectively.
# Thus we do not want to copy this from the interface to the member, but
# instead extract it and handle it separately.
if (dependency_interface.is_partial or
'LegacyTreatAsPartialInterface' in dependency_interface.extended_attributes):
merged_extended_attributes['PartialInterfaceImplementedAs'] = (
dependency_interface.extended_attributes.get(
'ImplementedAs', dependency_interface_basename))
def update_attributes(attributes, extras):
for key, value in extras.items():
if key not in attributes:
attributes[key] = value
for attribute in dependency_interface.attributes:
update_attributes(attribute.extended_attributes, merged_extended_attributes)
for constant in dependency_interface.constants:
update_attributes(constant.extended_attributes, merged_extended_attributes)
for operation in dependency_interface.operations:
update_attributes(operation.extended_attributes, merged_extended_attributes)
|
mattcongy/itshop
|
refs/heads/master
|
docker-images/taigav2/taiga-back/taiga/base/utils/dicts.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
def dict_sum(*args):
result = collections.Counter()
for arg in args:
assert isinstance(arg, dict)
result += collections.Counter(arg)
return result
def into_namedtuple(dictionary):
return collections.namedtuple('GenericDict', dictionary.keys())(**dictionary)
|
tavaresdong/courses-notes
|
refs/heads/master
|
ucb_cs61A/homework/hw08/tests/derive-exp.py
|
3
|
test = {
'name': 'derive-exp',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (derive x^2 'x)
(* 2 x)
scm> (derive x^3 'x)
(* 3 (^ x 2))
scm> (derive (make-sum x^3 x^2) 'x)
(+ (* 3 (^ x 2)) (* 2 x))
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
scm> (load 'hw08)
scm> (define x^2 (make-exp 'x 2))
scm> (define x^3 (make-exp 'x 3))
""",
'teardown': '',
'type': 'scheme'
}
]
}
|
prampey/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
|
235
|
def test_quick(setup):
pass
|
arborh/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/ragged/ragged_string_ops.py
|
4
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ragged operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat as util_compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("strings.bytes_split")
def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin
"""Split string elements of `input` into bytes.
Examples:
>>> tf.strings.bytes_split('hello').numpy()
array([b'h', b'e', b'l', b'l', b'o'], dtype=object)
>>> tf.strings.bytes_split(['hello', '123'])
<tf.RaggedTensor [[b'h', b'e', b'l', b'l', b'o'], [b'1', b'2', b'3']]>
Note that this op splits strings into bytes, not unicode characters. To
split strings into unicode characters, use `tf.strings.unicode_split`.
See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`.
Args:
input: A string `Tensor` or `RaggedTensor`: the strings to split. Must
have a statically known rank (`N`).
name: A name for the operation (optional).
Returns:
A `RaggedTensor` of rank `N+1`: the bytes that make up the source strings.
"""
with ops.name_scope(name, "StringsByteSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input,
name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(string_bytes_split(input.flat_values))
rank = input.shape.ndims
if rank is None:
raise ValueError("input must have a statically-known rank.")
if rank == 0:
return string_bytes_split(array_ops.stack([input]))[0]
elif rank == 1:
indices, values, shape = gen_string_ops.string_split(
input, delimiter="", skip_empty=False)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=values, value_rowids=indices[:, 0], nrows=shape[0],
validate=False)
else:
return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input))
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_encode")
def unicode_encode(input,
output_encoding,
errors="replace",
replacement_char=65533,
name=None):
r"""Encodes each sequence of Unicode code points in `input` into a string.
`result[i1...iN]` is the string formed by concatenating the Unicode
codepoints `input[1...iN, :]`, encoded using `output_encoding`.
Args:
input: An `N+1` dimensional potentially ragged integer tensor with shape
`[D1...DN, num_chars]`.
output_encoding: Unicode encoding that should be used to encode each
codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`.
errors: Specifies the response when an invalid codepoint is encountered
(optional). One of:
* `'replace'`: Replace invalid codepoint with the
`replacement_char`. (default)
* `'ignore'`: Skip invalid codepoints.
* `'strict'`: Raise an exception for any invalid codepoint.
replacement_char: The replacement character codepoint to be used in place of
any invalid input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character
which is 0xFFFD (U+65533).
name: A name for the operation (optional).
Returns:
A `N` dimensional `string` tensor with shape `[D1...DN]`.
#### Example:
>>> input = tf.ragged.constant(
... [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
>>> print(unicode_encode(input, 'UTF-8'))
tf.Tensor([b'G\xc3\xb6\xc3\xb6dnight' b'\xf0\x9f\x98\x8a'],
shape=(2,), dtype=string)
"""
with ops.name_scope(name, "UnicodeEncode", [input]):
input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
if input_tensor.shape.ndims is None:
raise ValueError("Rank of input_tensor must be statically known.")
if ragged_tensor.is_ragged(input_tensor):
if input_tensor.flat_values.shape.ndims > 1:
# If the flat_values of our ragged tensor is multi-dimensional, we can
# process it separately and our output will have the same nested splits
# as our input.
return input_tensor.with_flat_values(
unicode_encode(input_tensor.flat_values, output_encoding, errors,
replacement_char))
elif input_tensor.ragged_rank > 1:
# Recursively process the values of the ragged tensor.
return input_tensor.with_values(
unicode_encode(input_tensor.values, output_encoding, errors,
replacement_char))
else:
# Our ragged tensor is of the correct shape (rank 1 flat_values tensor
# with ragged_rank of 1) so we can process it as normal.
return gen_string_ops.unicode_encode(
input_values=input_tensor.values,
input_splits=input_tensor.row_splits,
output_encoding=output_encoding,
errors=errors,
replacement_char=replacement_char)
else:
if input_tensor.shape.ndims == 2:
# The input tensor is of the correct 2-D shape, it's just not ragged.
return unicode_encode(
ragged_tensor.RaggedTensor.from_tensor(input_tensor),
output_encoding, errors, replacement_char)
elif input_tensor.shape.ndims > 2:
# We need to initially flatten the input tensor to 2-D, and then can
# reshape the output of our processed flattened tensor.
flat_input_tensor = array_ops.reshape(
input_tensor,
array_ops.stack([-1, array_ops.shape(input_tensor)[-1]]))
flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1])
elif input_tensor.shape.ndims == 0:
raise ValueError("input_tensor's rank must be at least 1.")
else:
# Our input tensor is rank 1, so we create a ragged tensor with an added
# dimension to create the correct input shape & type, and then remove
# the additional dimension from the output and return the string scalar.
ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits(
input_tensor,
array_ops.stack(
[0, array_ops.shape(input_tensor, out_type=dtypes.int32)[0]]),
validate=False)
output_tensor = unicode_encode(ragged_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(output_tensor, [])
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_decode")
def unicode_decode(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the Unicode codepoint for the `j`th character in
`input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_decode(input, 'UTF-8').to_list()
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
"""
with ops.name_scope(name, "UnicodeDecode", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=False)
@tf_export("strings.unicode_decode_with_offsets")
def unicode_decode_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(codepoints, start_offsets)` where:
* `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character
in `input[i1...iN]`, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8')
>>> result[0].to_list() # codepoints
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
"""
with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=True)
@tf_export("strings.unicode_split")
def unicode_split(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_split(input, 'UTF-8').to_list()
[[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
[b'\xf0\x9f\x98\x8a']]
"""
with ops.name_scope(name, "UnicodeSplit", [input]):
codepoints = _unicode_decode(input, input_encoding, errors,
replacement_char, False, with_offsets=False)
return unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
@tf_export("strings.unicode_split_with_offsets")
def unicode_split_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(chars, start_offsets)` where:
* `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8')
>>> result[0].to_list() # character substrings
[[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
[b'\xf0\x9f\x98\x8a']]
>>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
"""
with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]):
codepoints, offsets = _unicode_decode(input, input_encoding, errors,
replacement_char, False,
with_offsets=True)
chars = unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
return chars, offsets
def _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets):
"""Decodes each string into a sequence of codepoints."""
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input")
input_ndims = input.shape.ndims
if input_ndims is None:
raise ValueError("Rank of `input` must be statically known.")
if input_ndims > 1:
# Convert to a ragged tensor with ragged_rank = input_ndims - 1.
if not ragged_tensor.is_ragged(input):
input = ragged_tensor.RaggedTensor.from_tensor(
input, ragged_rank=input_ndims - 1)
elif input.ragged_rank < input_ndims - 1:
input = input.with_flat_values(
ragged_tensor.RaggedTensor.from_tensor(
input.flat_values,
ragged_rank=input_ndims - input.ragged_rank + 1))
# Reshape the input to a flat vector, and apply the gen_string_ops op.
if ragged_tensor.is_ragged(input):
flat_input = array_ops.reshape(input.flat_values, [-1])
else:
flat_input = array_ops.reshape(input, [-1])
if with_offsets:
decode_op = gen_string_ops.unicode_decode_with_offsets
else:
decode_op = gen_string_ops.unicode_decode
flat_result = decode_op(
input=flat_input,
input_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters)
if input_ndims == 0:
codepoints = flat_result.char_values
if with_offsets:
offsets = flat_result.char_to_byte_starts
else:
codepoints = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_values, flat_result.row_splits, validate=False)
if input_ndims > 1:
codepoints = input.with_flat_values(codepoints)
if with_offsets:
offsets = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_to_byte_starts, flat_result.row_splits,
validate=False)
if input_ndims > 1:
offsets = input.with_flat_values(offsets)
if with_offsets:
return codepoints, offsets
else:
return codepoints
@tf_export("strings.split", v1=[])
def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable=redefined-builtin
"""Split elements of `input` based on `sep` into a `RaggedTensor`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `SparseTensor` or
`RaggedTensor` containing the split tokens. Empty tokens are ignored.
Example:
>>> tf.strings.split('hello world').numpy()
array([b'hello', b'world'], dtype=object)
>>> tf.strings.split(['hello world', 'a b c'])
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter string.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `RaggedTensor` of rank `N+1`, the strings split according to the
delimiter.
"""
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(
string_split_v2(input.flat_values, sep, maxsplit))
rank = input.shape.ndims
if rank == 0:
return string_split_v2(array_ops.stack([input]), sep, maxsplit)[0]
elif rank == 1 or rank is None:
sparse_result = string_ops.string_split_v2(
input, sep=sep, maxsplit=maxsplit)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
return string_split_v2(
ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit)
@tf_export(v1=["string_split"])
@deprecation.deprecated_args(None,
"delimiter is deprecated, please use sep instead.",
"delimiter")
def string_split(source, sep=None, skip_empty=True, delimiter=None,
result_type="SparseTensor", name=None): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter`.
Let N be the size of `source` (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
or `RaggedTensor` containing the split tokens. Empty tokens are ignored.
If `sep` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
Examples:
>>> print(tf.compat.v1.string_split(['hello world', 'a b c']))
SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
>>> print(tf.compat.v1.string_split(['hello world', 'a b c'],
... result_type="RaggedTensor"))
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1. Default is ' '.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
delimiter: deprecated alias for `sep`.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
name: A name for the operation (optional).
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according
to the delimiter. The first column of the indices corresponds to the row
in `source` and the second column corresponds to the index of the split
component in this row.
"""
with ops.name_scope(name, "StringSplit", [source]):
sparse_result = string_ops.string_split(
source, sep=sep, skip_empty=skip_empty, delimiter=delimiter)
if result_type == "SparseTensor":
return sparse_result
elif result_type == "RaggedTensor":
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
# In TensorFlow 1.x, "tf.strings.split" uses the new signature (with maxsplit),
# but we need to add the result_type argument.
@tf_export(v1=["strings.split"])
def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redefined-builtin
result_type="SparseTensor", source=None, name=None):
"""Split elements of `input` based on `sep`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `SparseTensor` or
`RaggedTensor` containing the split tokens. Empty tokens are ignored.
Examples:
>>> print(tf.compat.v1.strings.split(['hello world', 'a b c']))
SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
>>> print(tf.compat.v1.strings.split(['hello world', 'a b c'],
... result_type="RaggedTensor"))
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
source: alias for "input" argument.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split
according to the delimiter.
"""
input = deprecation.deprecated_argument_lookup(
"input", input, "source", source)
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if input.shape.rank == 0:
input = gen_array_ops.expand_dims(input, 0)
if result_type == "SparseTensor":
if input.shape.rank == 1:
return string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit)
else:
return string_split_v2(input, sep=sep, maxsplit=maxsplit).to_sparse()
elif result_type == "RaggedTensor":
return string_split_v2(input, sep=sep, maxsplit=maxsplit)
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
def reduce_join(inputs, axis=None, keepdims=None, separator="", name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_math_ops.ragged_reduce_aggregate(
string_ops.reduce_join, string_ops.unsorted_segment_join, inputs, axis,
keepdims, separator, name or "RaggedSegmentJoin")
@tf_export("strings.ngrams")
def ngrams(data,
ngram_width,
separator=" ",
pad_values=None,
padding_width=None,
preserve_short_sequences=False,
name=None):
"""Create a tensor of n-grams based on `data`.
Creates a tensor of n-grams based on `data`. The n-grams are created by
joining windows of `width` adjacent strings from the inner axis of `data`
using `separator`.
The input data can be padded on both the start and end of the sequence, if
desired, using the `pad_values` argument. If set, `pad_values` should contain
either a tuple of strings or a single string; the 0th element of the tuple
will be used to pad the left side of the sequence and the 1st element of the
tuple will be used to pad the right side of the sequence. The `padding_width`
arg controls how many padding values are added to each side; it defaults to
`ngram_width-1`.
If this op is configured to not have padding, or if it is configured to add
padding with `padding_width` set to less than ngram_width-1, it is possible
that a sequence, or a sequence plus padding, is smaller than the ngram
width. In that case, no ngrams will be generated for that sequence. This can
be prevented by setting `preserve_short_sequences`, which will cause the op
to always generate at least one ngram per non-empty sequence.
Args:
data: A Tensor or RaggedTensor containing the source data for the ngrams.
ngram_width: The width(s) of the ngrams to create. If this is a list or
tuple, the op will return ngrams of all specified arities in list order.
Values must be non-Tensor integers greater than 0.
separator: The separator string used between ngram elements. Must be a
string constant, not a Tensor.
pad_values: A tuple of (left_pad_value, right_pad_value), a single string,
or None. If None, no padding will be added; if a single string, then that
string will be used for both left and right padding. Values must be Python
strings.
padding_width: If set, `padding_width` pad values will be added to both
sides of each sequence. Defaults to `ngram_width`-1. Must be greater than
0. (Note that 1-grams are never padded, regardless of this value.)
preserve_short_sequences: If true, then ensure that at least one ngram is
generated for each input sequence. In particular, if an input sequence is
shorter than `min(ngram_width) + 2*pad_width`, then generate a single
ngram containing the entire sequence. If false, then no ngrams are
generated for these short input sequences.
name: The op name.
Returns:
A RaggedTensor of ngrams. If `data.shape=[D1...DN, S]`, then
`output.shape=[D1...DN, NUM_NGRAMS]`, where
`NUM_NGRAMS=S-ngram_width+1+2*padding_width`.
Raises:
TypeError: if `pad_values` is set to an invalid type.
ValueError: if `pad_values`, `padding_width`, or `ngram_width` is set to an
invalid value.
"""
with ops.name_scope(name, "StringNGrams", [data]):
if pad_values is None:
left_pad = ""
right_pad = ""
elif isinstance(pad_values, (list, tuple)):
if (not isinstance(pad_values[0], util_compat.bytes_or_text_types) or
not isinstance(pad_values[1], util_compat.bytes_or_text_types)):
raise TypeError(
"pad_values must be a string, tuple of strings, or None.")
left_pad = pad_values[0]
right_pad = pad_values[1]
else:
if not isinstance(pad_values, util_compat.bytes_or_text_types):
raise TypeError(
"pad_values must be a string, tuple of strings, or None.")
left_pad = pad_values
right_pad = pad_values
if padding_width is not None and padding_width < 1:
raise ValueError("padding_width must be greater than 0.")
if padding_width is not None and pad_values is None:
raise ValueError("pad_values must be provided if padding_width is set.")
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(
data, name="data", dtype=dtypes.string)
# preserve the shape of the data if it is a tensor
to_tensor = False
if isinstance(data, ops.Tensor):
dense_shape = array_ops.concat([array_ops.shape(data)[:-1], [-1]], axis=0)
to_tensor = True
if not isinstance(data, ragged_tensor.RaggedTensor):
if data.shape.ndims is None:
raise ValueError("Rank of data must be known.")
elif data.shape.ndims == 0:
raise ValueError("Data must have rank>0")
elif data.shape.ndims == 1:
rt = ragged_tensor.RaggedTensor.from_row_starts(
data, [0], validate=False)
return ngrams(rt, ngram_width, separator, pad_values, padding_width,
preserve_short_sequences, name)[0]
else:
data = ragged_tensor.RaggedTensor.from_tensor(
data, ragged_rank=data.shape.ndims - 1)
if data.ragged_rank > 1:
output = data.with_values(
ngrams(data.values, ngram_width, separator, pad_values, padding_width,
preserve_short_sequences, name))
return array_ops.reshape(output.flat_values,
dense_shape) if to_tensor else output
if pad_values is None:
padding_width = 0
if pad_values is not None and padding_width is None:
padding_width = -1
if not isinstance(ngram_width, (list, tuple)):
ngram_widths = [ngram_width]
else:
ngram_widths = ngram_width
for width in ngram_widths:
if width < 1:
raise ValueError("All ngram_widths must be greater than 0. Got %s" %
ngram_width)
output, output_splits = gen_string_ops.string_n_grams(
data=data.flat_values,
data_splits=data.row_splits,
separator=separator,
ngram_widths=ngram_widths,
left_pad=left_pad,
right_pad=right_pad,
pad_width=padding_width,
preserve_short_sequences=preserve_short_sequences)
# if the input is Dense tensor, the output should also be a dense tensor
output = ragged_tensor.RaggedTensor.from_row_splits(
values=output, row_splits=output_splits, validate=False)
return array_ops.reshape(output.flat_values,
dense_shape) if to_tensor else output
|
Doveps/mono
|
refs/heads/master
|
bassist/bassist/parser/json_file/lib.py
|
2
|
# Copyright (c) 2015 Kurt Yoder
# See the file LICENSE for copying permission.
import logging
import os
import importlib
module_logger = logging.getLogger(__name__)
def get_parser(path):
'''For a given json file in a path, e.g. "/path/to/file.json", look for a
json file parsing module in the current path. If the parser module is not
found, ignore the json file.'''
# ./facts.json -> facts
parser_name = os.path.splitext(os.path.basename(path))[0]
module_logger.debug('importlib: %s', '..'+parser_name)
try:
parser_module = importlib.import_module('..'+parser_name, __name__)
except ImportError:
module_logger.debug('failed parser import for %s; skipping', path)
return None
except KeyError:
module_logger.debug('malformed json file %s; skipping', path)
return None
# facts -> FactsJSON
module_name = ''.join([w.capitalize() for w in parser_name.split('_')]) + 'JSON'
module_logger.debug('module: %s', module_name)
module_logger.debug('loaded')
return getattr(parser_module, module_name)(path)
|
donami/overflow
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
enovance/numeter
|
refs/heads/master
|
web-app/numeter_webapp/multiviews/views/customize/event.py
|
2
|
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from core.utils.decorators import login_required
from core.utils import make_page
from multiviews.models import Event
from multiviews.forms.event import Small_Event_Form as Event_Form
from json import dumps as jdumps
@login_required()
def index(request):
q = request.GET.get('q','')
events = Event.objects.user_web_filter(q, request.user)
events = make_page(events, int(request.GET.get('page',1)), 30)
return render(request, 'customize/event/index.html', {
'Events': events,
'q':q,
})
@login_required()
def list(request):
q = request.GET.get('q','')
events = Event.objects.user_web_filter(q, request.user)
events = make_page(events, int(request.GET.get('page',1)), 30)
return render(request, 'customize/event/list.html', {
'Events': events,
'q':q,
})
@login_required()
def add(request):
return render(request, 'customize/event/event.html', {
'Event_Form': Event_Form(user=request.user),
})
@login_required()
def edit(request, event_id):
E = get_object_or_404(Event.objects.filter(id=event_id))
F = Event_Form(instance=E, user=request.user)
return render(request, 'customize/event/event.html', {
'Event_Form': F,
})
|
jostep/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/python/kernel_tests/scatter_add_ndim_op_test.py
|
114
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.scatter_add_ndim_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class ScatterAddNdimTest(test_util.TensorFlowTestCase):
def test1dim(self):
input_data = variables.Variable(
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])
indices = [[1], [10]]
updates = [100., 200.]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(
[1., 102., 3., 4., 5., 6., 7., 8., 9., 10., 211., 12.],
input_data.eval())
def test3dim(self):
input_data = variables.Variable([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100., 200.]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[1., 102., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 212.]]], input_data.eval())
def testNoUpdates(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.Variable(init_val)
indices = []
updates = []
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testBadInput(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.Variable(init_val)
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100.]
with self.test_session():
variables.global_variables_initializer().run()
with self.assertRaisesOpError(
'Number of updates should be same as number of indices.'):
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testIncompleteIndices(self):
input_data = variables.Variable([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0], [1, 1]]
updates = [[100., 200., 300.], [400., 500., 600.]]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[101., 202., 303.], [4., 5., 6.]],
[[7., 8., 9.], [410., 511., 612.]]],
input_data.eval())
if __name__ == '__main__':
googletest.main()
|
encbladexp/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/__init__.py
|
8
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import difflib
import json
import sys
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins import AnsiblePlugin, get_plugin_class
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
if PY3:
# OrderedDict is needed for a backwards compat shim on Python3.x only
# https://github.com/ansible/ansible/pull/49512
from collections import OrderedDict
else:
OrderedDict = None
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
class CallbackBase(AnsiblePlugin):
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self.wants_implicit_tasks = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
''' helper for callbacks, so they don't all have to include deepcopy '''
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = v
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
'''
# load from config
self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
@staticmethod
def host_label(result):
"""Return label for the hostname (& delegated hostname) of a task
result.
"""
label = "%s" % result._host.get_name()
if result._task.delegate_to and result._task.delegate_to != result._host.get_name():
# show delegated host
label += " -> %s" % result._task.delegate_to
# in case we have 'extra resolution'
ahost = result._result.get('_ansible_delegated_vars', {}).get('ansible_host', result._task.delegate_to)
if result._task.delegate_to != ahost:
label += "(%s)" % ahost
return label
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
try:
jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
# Python3 bug: throws an exception when keys are non-homogenous types:
# https://bugs.python.org/issue25457
# sort into an OrderedDict and then json.dumps() that instead
if not OrderedDict:
raise
jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
return jsonified_results
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
# difflib in Python 2.6 adds trailing spaces after
# filenames in the -- before/++ after headers.
difflines[0] = difflines[0].replace(u' \n', u'\n')
difflines[1] = difflines[1].replace(u' \n', u'\n')
# it also treats empty files differently
difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
''' retrieves the value to be displayed as a label for an item entry from a result object'''
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
''' removes data from results for display '''
# mostly controls that debug only outputs what it was meant to
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
# msg should be alone
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
# 'var' value as field, so eliminate others and what is left should be varname
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def _print_task_path(self, task, color=C.COLOR_DEBUG):
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=color)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
jid = result._result.get('ansible_job_id')
if not jid and 'async_result' in result._result:
jid = result._result['async_result'].get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
"""Event used when host begins execution of a task
.. versionadded:: 2.8
"""
pass
|
ayumilong/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure rule names with non-"normal" characters in them don't cause
broken build files. This test was originally causing broken .ninja files.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('sanitize-rule-names.gyp')
test.build('sanitize-rule-names.gyp', test.ALL)
test.pass_test()
|
frankk00/realtor
|
refs/heads/master
|
gdata/Crypto/PublicKey/__init__.py
|
273
|
"""Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
Crypto.PublicKey.qNEW (Signature only)
"""
__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW']
__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $"
|
brianmay/karaage
|
refs/heads/master
|
karaage/people/emails.py
|
2
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from karaage.common import log
from karaage.common.emails import CONTEXT, send_mail
def render_email(name, context):
subject = render_to_string(
['karaage/emails/%s_subject.txt' % name,
'karaage/people/emails/%s_subject.txt' % name],
context)
subject = subject.replace('\n', '')
body = render_to_string(
['karaage/emails/%s_body.txt' % name,
'karaage/people/emails/%s_body.txt' % name],
context)
return subject, body
def send_bounced_warning(person, leader_list):
"""Sends an email to each project leader for person
informing them that person's email has bounced"""
context = CONTEXT.copy()
context['person'] = person
for lp in leader_list:
leader = lp['leader']
context['project'] = lp['project']
context['receiver'] = leader
to_email = leader.email
subject = render_to_string(
'karaage/people/emails/bounced_email_subject.txt', context)
body = render_to_string(
'karaage/people/emails/bounced_email_body.txt', context)
send_mail(
subject.replace('\n', ''), body,
settings.ACCOUNTS_EMAIL, [to_email])
log.change(
leader,
'Sent email about bounced emails from %s' % person)
def send_reset_password_email(person):
"""Sends an email to user allowing them to set their password."""
uid = urlsafe_base64_encode(force_bytes(person.pk))
token = default_token_generator.make_token(person)
url = '%s/persons/reset/%s/%s/' % (
settings.REGISTRATION_BASE_URL, uid, token)
context = CONTEXT.copy()
context.update({
'url': url,
'receiver': person,
})
to_email = person.email
subject, body = render_email('reset_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
def send_confirm_password_email(person):
"""Sends an email to user allowing them to confirm their password."""
url = '%s/profile/login/%s/' % (
settings.REGISTRATION_BASE_URL, person.username)
context = CONTEXT.copy()
context.update({
'url': url,
'receiver': person,
})
to_email = person.email
subject, body = render_email('confirm_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
|
openhatch/new-mini-tasks
|
refs/heads/master
|
vendor/packages/Django/django/contrib/gis/tests/geoapp/feeds.py
|
308
|
from __future__ import absolute_import
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1' : TestGeoRSS1,
'rss2' : TestGeoRSS2,
'atom1' : TestGeoAtom1,
'atom2' : TestGeoAtom2,
'w3cgeo1' : TestW3CGeo1,
'w3cgeo2' : TestW3CGeo2,
'w3cgeo3' : TestW3CGeo3,
}
|
Fusion-Rom/android_external_chromium_org
|
refs/heads/lp5.1
|
tools/telemetry/telemetry/core/platform/linux_platform_backend_unittest.py
|
27
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import decorators
from telemetry.core import util
from telemetry.core.platform import linux_platform_backend
class TestBackend(
linux_platform_backend.LinuxPlatformBackend):
def __init__(self):
super(TestBackend, self).__init__()
self._mock_files = {}
def SetMockFile(self, filename, output):
self._mock_files[filename] = output
def GetFileContents(self, filename):
return self._mock_files[filename]
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
raise NotImplementedError()
def StopVideoCapture(self):
raise NotImplementedError()
def StartVideoCapture(self, min_bitrate_mbps):
raise NotImplementedError()
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
class LinuxPlatformBackendTest(unittest.TestCase):
@decorators.Enabled('linux')
def testGetOSVersionNameSaucy(self):
backend = TestBackend()
path = os.path.join(util.GetUnittestDataDir(), 'ubuntu-saucy-lsb-release')
with open(path) as f:
backend.SetMockFile('/etc/lsb-release', f.read())
self.assertEqual(backend.GetOSVersionName(), 'saucy')
@decorators.Enabled('linux')
def testGetOSVersionNameArch(self):
backend = TestBackend()
path = os.path.join(util.GetUnittestDataDir(), 'arch-lsb-release')
with open(path) as f:
backend.SetMockFile('/etc/lsb-release', f.read())
# a distribution may not have a codename or a release number. We just check
# that GetOSVersionName doesn't raise an exception
backend.GetOSVersionName()
|
ekivemark/my_device
|
refs/heads/master
|
bbp/bbp/settings.py
|
1
|
"""
Django settings for bbp_oa project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APPS_DIR = os.path.join(BASE_DIR, 'bbp/apps')
sys.path.insert(0, APPS_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Use this to review Settings at run time
DEBUG_SETTINGS = True
APPLICATION_TITLE="MedYear:Device"
if DEBUG_SETTINGS:
print "Application: %s" % APPLICATION_TITLE
print ""
print "BASE_DIR:%s " % BASE_DIR
print "APPS_DIR:%s " % APPS_DIR
ALLOWED_HOSTS = []
ADMINS = (
('Mark Scrimshire', 'mark@ekivemark.com'),
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = (
# add admin_bootstrapped items before django.contrib.admin
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'bootstrap_themes',
# https://django-oauth2-provider.readthedocs.org/en/latest/getting_started.html
#'provider',
#'provider.oauth2',
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
'oauth2_provider',
'corsheaders',
'rest_framework',
'device',
'bbp.member',
'bbp.member.vutils',
)
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
# Uncomment following if you want to access the admin
'django.contrib.auth.backends.ModelBackend',
#'...',
)
# https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#a-full-example
#AUTH_USER_MODEL = 'member.MyUser'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
)
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
# Allow CORS requests from all domains (just for the scope of this tutorial):
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'bbp.urls'
WSGI_APPLICATION = 'bbp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DBPATH = os.path.join(BASE_DIR, 'db/db.db')
if DEBUG_SETTINGS:
print "DBPATH:",DBPATH
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': DBPATH, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# STATIC_ROOT = ''
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
if DEBUG_SETTINGS:
print "STATIC_ROOT:%s" % STATIC_ROOT
ADMIN_MEDIA_PREFIX = '/static/admin'
MAIN_STATIC_ROOT = os.path.join(BASE_DIR, 'mainstatic')
if DEBUG_SETTINGS:
print "MAIN_STATIC_ROOT:%s" % MAIN_STATIC_ROOT
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
MAIN_STATIC_ROOT,
# '/Users/mark/PycharmProjects/virtualenv/rb/rainbowbutton/static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'bbp/templates'),
)
TEMPLATE_VISIBLE_SETTINGS = {
# Put Strings here that you want to be visible in the templates
# then add settings_context_processor
'DEBUG',
'TEMPLATE_DEBUG',
'APPLICATION_TITLE',
}
TEMPLATE_MODULES = {
# Put the names of custom modules in this section
# This will be used by home.index to display a list of modules
# that can be called
'privacy',
'about',
'contact',
'terms',
'faq',
'admin',
'member/view',
'member/get_id',
'accounts/logout',
'accounts/login',
}
TEMPLATE_CONTEXT_PROCESSORS = (
# Use a context processor to enable frequently used settings variables
# to be used in templates
'django.contrib.auth.context_processors.auth',
'bbp.settings_context_processor.settings',
)
# Default settings for bootstrap 3
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.2.0/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-2',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-4',
# Set HTML required attribute on required fields
'set_required': True,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# http://django-oauth-toolkit.readthedocs.org/en/latest/rest-framework/getting_started.html
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {'read': 'Read scope', 'write': 'Write scope', 'groups': 'Access to your groups'}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
# @login_required defaults to using settings.LOGIN_URL
# if login_url= is not defined
#LOGIN_URL='/member/login'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Validic Device Integration
# Organization ID and Token are set in local_settings.py
# V_Secure = True of False. Used to determine http: or https: prefix
V_SECURE = True
V_ORG_ID = 'fake_value'
V_ACCESS_TOKEN = 'fake_token'
V_SERVER = "api.validic.com"
# Optional port number
# V_PORT = 443
VALIDIC_API = "https://api.validic.com/v1/"
# Make this unique, and don't share it with anybody.
# Setting a false value here and will overwrite using value in local_settings.py
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fake_value'
# Get Local Settings that you want to keep private.
# Make sure Local_settings.py is excluded from Git
try:
from local_settings import *
except Exception as e:
pass
if DEBUG_SETTINGS:
print "SECRET_KEY:%s" % SECRET_KEY
print "Validic_Org_ID:%s" % V_ORG_ID
print "================================================================"
# SECURITY WARNING: keep the secret key used in production secret!
|
koniiiik/django
|
refs/heads/master
|
django/contrib/messages/storage/cookie.py
|
116
|
import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(
self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
)
else:
response.delete_cookie(self.cookie_name, domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
|
fzimmermann89/pyload
|
refs/heads/stable
|
module/plugins/accounts/DebridItaliaCom.py
|
1
|
# -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.MultiAccount import MultiAccount
class DebridItaliaCom(MultiAccount):
__name__ = "DebridItaliaCom"
__type__ = "account"
__version__ = "0.18"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """Debriditalia.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com")]
WALID_UNTIL_PATTERN = r'Premium valid till: (.+?) \|'
def grab_hosters(self, user, password, data):
return self.load("http://debriditalia.com/api.php", get={'hosts': ""}).replace('"', '').split(',')
def grab_info(self, user, password, data):
info = {'premium': False, 'validuntil': None, 'trafficleft': None}
html = self.load("http://debriditalia.com/")
if 'Account premium not activated' not in html:
m = re.search(self.WALID_UNTIL_PATTERN, html)
if m is not None:
validuntil = time.mktime(time.strptime(m.group(1), "%d/%m/%Y %H:%M"))
info = {'premium': True, 'validuntil': validuntil, 'trafficleft': -1}
else:
self.log_error(_("Unable to retrieve account information"))
return info
def signin(self, user, password, data):
html = self.load("https://debriditalia.com/login.php",
get={'u': user,
'p': password})
if 'NO' in html:
self.fail_login()
|
KingPsychopath/namebench
|
refs/heads/master
|
nb_third_party/dns/rdtypes/ANY/NSEC3PARAM.py
|
248
|
# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
class NSEC3PARAM(dns.rdata.Rdata):
"""NSEC3PARAM record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
super(NSEC3PARAM, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
def to_text(self, origin=None, relativize=True, **kw):
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen]
current += slen
rdlen -= slen
if rdlen != 0:
raise dns.exception.FormError
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
|
xxsergzzxx/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/spread/ui/tkutil.py
|
81
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Utilities for building L{PB<twisted.spread.pb>} clients with L{Tkinter}.
"""
from Tkinter import *
from tkSimpleDialog import _QueryString
from tkFileDialog import _Dialog
from twisted.spread import pb
from twisted.internet import reactor
from twisted import copyright
import string
#normalFont = Font("-adobe-courier-medium-r-normal-*-*-120-*-*-m-*-iso8859-1")
#boldFont = Font("-adobe-courier-bold-r-normal-*-*-120-*-*-m-*-iso8859-1")
#errorFont = Font("-adobe-courier-medium-o-normal-*-*-120-*-*-m-*-iso8859-1")
class _QueryPassword(_QueryString):
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry",show="*")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def askpassword(title, prompt, **kw):
'''get a password from the user
@param title: the dialog title
@param prompt: the label text
@param **kw: see L{SimpleDialog} class
@returns: a string
'''
d = apply(_QueryPassword, (title, prompt), kw)
return d.result
def grid_setexpand(widget):
cols,rows=widget.grid_size()
for i in range(cols):
widget.columnconfigure(i,weight=1)
for i in range(rows):
widget.rowconfigure(i,weight=1)
class CList(Frame):
def __init__(self,parent,labels,disablesorting=0,**kw):
Frame.__init__(self,parent)
self.labels=labels
self.lists=[]
self.disablesorting=disablesorting
kw["exportselection"]=0
for i in range(len(labels)):
b=Button(self,text=labels[i],anchor=W,height=1,pady=0)
b.config(command=lambda s=self,i=i:s.setSort(i))
b.grid(column=i,row=0,sticky=N+E+W)
box=apply(Listbox,(self,),kw)
box.grid(column=i,row=1,sticky=N+E+S+W)
self.lists.append(box)
grid_setexpand(self)
self.rowconfigure(0,weight=0)
self._callall("bind",'<Button-1>',self.Button1)
self._callall("bind",'<B1-Motion>',self.Button1)
self.bind('<Up>',self.UpKey)
self.bind('<Down>',self.DownKey)
self.sort=None
def _callall(self,funcname,*args,**kw):
rets=[]
for l in self.lists:
func=getattr(l,funcname)
ret=apply(func,args,kw)
if ret!=None: rets.append(ret)
if rets: return rets
def Button1(self,e):
index=self.nearest(e.y)
self.select_clear(0,END)
self.select_set(index)
self.activate(index)
return "break"
def UpKey(self,e):
index=self.index(ACTIVE)
if index:
self.select_clear(0,END)
self.select_set(index-1)
return "break"
def DownKey(self,e):
index=self.index(ACTIVE)
if index!=self.size()-1:
self.select_clear(0,END)
self.select_set(index+1)
return "break"
def setSort(self,index):
if self.sort==None:
self.sort=[index,1]
elif self.sort[0]==index:
self.sort[1]=-self.sort[1]
else:
self.sort=[index,1]
self._sort()
def _sort(self):
if self.disablesorting:
return
if self.sort==None:
return
ind,direc=self.sort
li=list(self.get(0,END))
li.sort(lambda x,y,i=ind,d=direc:d*cmp(x[i],y[i]))
self.delete(0,END)
for l in li:
self._insert(END,l)
def activate(self,index):
self._callall("activate",index)
# def bbox(self,index):
# return self._callall("bbox",index)
def curselection(self):
return self.lists[0].curselection()
def delete(self,*args):
apply(self._callall,("delete",)+args)
def get(self,*args):
bad=apply(self._callall,("get",)+args)
if len(args)==1:
return bad
ret=[]
for i in range(len(bad[0])):
r=[]
for j in range(len(bad)):
r.append(bad[j][i])
ret.append(r)
return ret
def index(self,index):
return self.lists[0].index(index)
def insert(self,index,items):
self._insert(index,items)
self._sort()
def _insert(self,index,items):
for i in range(len(items)):
self.lists[i].insert(index,items[i])
def nearest(self,y):
return self.lists[0].nearest(y)
def see(self,index):
self._callall("see",index)
def size(self):
return self.lists[0].size()
def selection_anchor(self,index):
self._callall("selection_anchor",index)
select_anchor=selection_anchor
def selection_clear(self,*args):
apply(self._callall,("selection_clear",)+args)
select_clear=selection_clear
def selection_includes(self,index):
return self.lists[0].select_includes(index)
select_includes=selection_includes
def selection_set(self,*args):
apply(self._callall,("selection_set",)+args)
select_set=selection_set
def xview(self,*args):
if not args: return self.lists[0].xview()
apply(self._callall,("xview",)+args)
def yview(self,*args):
if not args: return self.lists[0].yview()
apply(self._callall,("yview",)+args)
class ProgressBar:
def __init__(self, master=None, orientation="horizontal",
min=0, max=100, width=100, height=18,
doLabel=1, appearance="sunken",
fillColor="blue", background="gray",
labelColor="yellow", labelFont="Verdana",
labelText="", labelFormat="%d%%",
value=0, bd=2):
# preserve various values
self.master=master
self.orientation=orientation
self.min=min
self.max=max
self.width=width
self.height=height
self.doLabel=doLabel
self.fillColor=fillColor
self.labelFont= labelFont
self.labelColor=labelColor
self.background=background
self.labelText=labelText
self.labelFormat=labelFormat
self.value=value
self.frame=Frame(master, relief=appearance, bd=bd)
self.canvas=Canvas(self.frame, height=height, width=width, bd=0,
highlightthickness=0, background=background)
self.scale=self.canvas.create_rectangle(0, 0, width, height,
fill=fillColor)
self.label=self.canvas.create_text(self.canvas.winfo_reqwidth() / 2,
height / 2, text=labelText,
anchor="c", fill=labelColor,
font=self.labelFont)
self.update()
self.canvas.pack(side='top', fill='x', expand='no')
def updateProgress(self, newValue, newMax=None):
if newMax:
self.max = newMax
self.value = newValue
self.update()
def update(self):
# Trim the values to be between min and max
value=self.value
if value > self.max:
value = self.max
if value < self.min:
value = self.min
# Adjust the rectangle
if self.orientation == "horizontal":
self.canvas.coords(self.scale, 0, 0,
float(value) / self.max * self.width, self.height)
else:
self.canvas.coords(self.scale, 0,
self.height - (float(value) /
self.max*self.height),
self.width, self.height)
# Now update the colors
self.canvas.itemconfig(self.scale, fill=self.fillColor)
self.canvas.itemconfig(self.label, fill=self.labelColor)
# And update the label
if self.doLabel:
if value:
if value >= 0:
pvalue = int((float(value) / float(self.max)) *
100.0)
else:
pvalue = 0
self.canvas.itemconfig(self.label, text=self.labelFormat
% pvalue)
else:
self.canvas.itemconfig(self.label, text='')
else:
self.canvas.itemconfig(self.label, text=self.labelFormat %
self.labelText)
self.canvas.update_idletasks()
class DirectoryBrowser(_Dialog):
command = "tk_chooseDirectory"
def askdirectory(**options):
"Ask for a directory to save to."
return apply(DirectoryBrowser, (), options).show()
class GenericLogin(Toplevel):
def __init__(self,callback,buttons):
Toplevel.__init__(self)
self.callback=callback
Label(self,text="Twisted v%s"%copyright.version).grid(column=0,row=0,columnspan=2)
self.entries={}
row=1
for stuff in buttons:
label,value=stuff[:2]
if len(stuff)==3:
dict=stuff[2]
else: dict={}
Label(self,text=label+": ").grid(column=0,row=row)
e=apply(Entry,(self,),dict)
e.grid(column=1,row=row)
e.insert(0,value)
self.entries[label]=e
row=row+1
Button(self,text="Login",command=self.doLogin).grid(column=0,row=row)
Button(self,text="Cancel",command=self.close).grid(column=1,row=row)
self.protocol('WM_DELETE_WINDOW',self.close)
def close(self):
self.tk.quit()
self.destroy()
def doLogin(self):
values={}
for k in self.entries.keys():
values[string.lower(k)]=self.entries[k].get()
self.callback(values)
self.destroy()
class Login(Toplevel):
def __init__(self,
callback,
referenced = None,
initialUser = "guest",
initialPassword = "guest",
initialHostname = "localhost",
initialService = "",
initialPortno = pb.portno):
Toplevel.__init__(self)
version_label = Label(self,text="Twisted v%s" % copyright.version)
self.pbReferenceable = referenced
self.pbCallback = callback
# version_label.show()
self.username = Entry(self)
self.password = Entry(self,show='*')
self.hostname = Entry(self)
self.service = Entry(self)
self.port = Entry(self)
self.username.insert(0,initialUser)
self.password.insert(0,initialPassword)
self.service.insert(0,initialService)
self.hostname.insert(0,initialHostname)
self.port.insert(0,str(initialPortno))
userlbl=Label(self,text="Username:")
passlbl=Label(self,text="Password:")
servicelbl=Label(self,text="Service:")
hostlbl=Label(self,text="Hostname:")
portlbl=Label(self,text="Port #:")
self.logvar=StringVar()
self.logvar.set("Protocol PB-%s"%pb.Broker.version)
self.logstat = Label(self,textvariable=self.logvar)
self.okbutton = Button(self,text="Log In", command=self.login)
version_label.grid(column=0,row=0,columnspan=2)
z=0
for i in [[userlbl,self.username],
[passlbl,self.password],
[hostlbl,self.hostname],
[servicelbl,self.service],
[portlbl,self.port]]:
i[0].grid(column=0,row=z+1)
i[1].grid(column=1,row=z+1)
z = z+1
self.logstat.grid(column=0,row=6,columnspan=2)
self.okbutton.grid(column=0,row=7,columnspan=2)
self.protocol('WM_DELETE_WINDOW',self.tk.quit)
def loginReset(self):
self.logvar.set("Idle.")
def loginReport(self, txt):
self.logvar.set(txt)
self.after(30000, self.loginReset)
def login(self):
host = self.hostname.get()
port = self.port.get()
service = self.service.get()
try:
port = int(port)
except:
pass
user = self.username.get()
pswd = self.password.get()
pb.connect(host, port, user, pswd, service,
client=self.pbReferenceable).addCallback(self.pbCallback).addErrback(
self.couldNotConnect)
def couldNotConnect(self,f):
self.loginReport("could not connect:"+f.getErrorMessage())
if __name__=="__main__":
root=Tk()
o=CList(root,["Username","Online","Auto-Logon","Gateway"])
o.pack()
for i in range(0,16,4):
o.insert(END,[i,i+1,i+2,i+3])
mainloop()
|
arielrossanigo/pycamp_scheduling
|
refs/heads/master
|
example_generation.py
|
1
|
import json
import random
by_project_number_of_votes = {
"Programar, que es eso?": 15,
"Vis de redes neuronales": 3,
"Mirror Pypi": 12,
"Linkode": 5,
"Recordium": 6,
"Radio to podcast": 3,
"Choppycamp": 12,
"LabJM": 2,
"Pilas": 10,
"Cuentos a epub": 3,
"Web PyAr": 8,
"Moravec": 4,
"Raspi": 2,
"Fades": 5,
"PyCamp voting manager": 9,
"Easy Camp": 7,
"Metaprogramacion": 4,
"Encajonar apps": 6
}
input_example = {
"available_slots": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"responsable_available_slots": {
"Matias": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"David": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Ariel": ["A3", "A4", "A5", "B2", "B3", "B4", "B5"],
"Fisa": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Gilgamezh": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Facu": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Diego": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Agustin": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Manuq": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Litox": ["B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Mario": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Zoe": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"],
"Luri": ["A3", "A4", "A5", "B2", "B3", "B4", "B5", "C1", "C2", "C3", "C4"]
},
"projects": {
"Programar, que es eso?": {"responsables": ["Matias", "David"], "priority_slots": ['A3']},
"Vis de redes neuronales": {"responsables": ["Ariel"]},
"Mirror Pypi": {"responsables": ["Fisa", "Gilgamezh"]},
"Linkode": {"responsables": ["Facu", "Matias"]},
"Recordium": {"responsables": ["Facu"]},
"Radio to podcast": {"responsables": ["Diego"]},
"Choppycamp": {"responsables": ["Fisa"]},
"LabJM": {"responsables": ["Agustin"]},
"Pilas": {"responsables": ["Manuq"]},
"Cuentos a epub": {"responsables": ["Diego"]},
"Web PyAr": {"responsables": ["Litox"]},
"Moravec": {"responsables": ["Mario"]},
"Raspi": {"responsables": ["David"]},
"Fades": {"responsables": ["Facu", "Gilgamezh"]},
"PyCamp voting manager": {"responsables": ["Zoe"]},
"Easy Camp": {"responsables": ["Matias", "Luri"]},
"Metaprogramacion": {"responsables": ["David"]},
"Encajonar apps": {"responsables": ["Manuq"]},
},
}
attendes = (list("ABCDEFGHIJKLMNOPQRSTUVWXYZ") +
list(set(input_example["responsable_available_slots"].keys())))
themes = ['web', 'desktop', 'mobile', 'ia', 'hardware', 'comunity', '']
difficult_levels = [1, 2, 3]
output_example = {}
slots = input_example['available_slots']
for i, (project_name, project) in enumerate(input_example["projects"].items()):
project['votes'] = random.sample(attendes, by_project_number_of_votes[project_name])
project['difficult_level'] = random.choice(difficult_levels)
project['theme'] = random.choice(themes)
if "priority_slots" not in project:
project["priority_slots"] = []
output_example[project_name] = slots[i % len(slots)]
json.dump(input_example, open('input_example.json', 'wt'), indent=2)
json.dump(output_example, open('output_example.json', 'wt'), indent=2)
|
li77leprince/shadowsocks
|
refs/heads/master
|
shadowsocks/asyncdns.py
|
655
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
sontek/rethinkdb
|
refs/heads/next
|
test/rql_test/connections/http_support/werkzeug/testsuite/compat.py
|
146
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.compat
~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure that old stuff does not break on update.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import warnings
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Response
from werkzeug.test import create_environ
class CompatTestCase(WerkzeugTestCase):
def test_old_imports(self):
from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \
LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \
Authorization
def test_exposed_werkzeug_mod(self):
import werkzeug
for key in werkzeug.__all__:
# deprecated, skip it
if key in ('templates', 'Template'):
continue
getattr(werkzeug, key)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CompatTestCase))
return suite
|
andresailer/DIRAC
|
refs/heads/rel-v6r22
|
Interfaces/API/test/Test_JobAPI.py
|
5
|
""" Basic unit tests for the Job API
"""
__RCSID__ = "$Id$"
import StringIO
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
def test_basicJob():
job = Job()
job.setOwner('ownerName')
job.setOwnerGroup('ownerGroup')
job.setName('jobName')
job.setJobGroup('jobGroup')
job.setExecutable('someExe')
job.setType('jobType')
job.setDestination('ANY')
xml = job._toXML()
try:
with open('./DIRAC/Interfaces/API/test/testWF.xml') as fd:
expected = fd.read()
except IOError:
with open('./Interfaces/API/test/testWF.xml') as fd:
expected = fd.read()
assert xml == expected
try:
with open('./DIRAC/Interfaces/API/test/testWFSIO.jdl') as fd:
expected = fd.read()
except IOError:
with open('./Interfaces/API/test/testWFSIO.jdl') as fd:
expected = fd.read()
jdlSIO = job._toJDL(jobDescriptionObject=StringIO.StringIO(job._toXML()))
assert jdlSIO == expected
def test_SimpleParametricJob():
job = Job()
job.setExecutable('myExec')
job.setLogLevel('DEBUG')
parList = [1, 2, 3]
job.setParameterSequence('JOB_ID', parList, addToWorkflow=True)
inputDataList = [
[
'/lhcb/data/data1',
'/lhcb/data/data2'
],
[
'/lhcb/data/data3',
'/lhcb/data/data4'
],
[
'/lhcb/data/data5',
'/lhcb/data/data6'
]
]
job.setParameterSequence('InputData', inputDataList, addToWorkflow=True)
jdl = job._toJDL()
try:
with open('./DIRAC/Interfaces/API/test/testWF.jdl') as fd:
expected = fd.read()
except IOError:
with open('./Interfaces/API/test/testWF.jdl') as fd:
expected = fd.read()
assert jdl == expected
clad = ClassAd('[' + jdl + ']')
arguments = clad.getAttributeString('Arguments')
job_id = clad.getAttributeString('JOB_ID')
inputData = clad.getAttributeString('InputData')
assert job_id == '%(JOB_ID)s'
assert inputData == '%(InputData)s'
assert 'jobDescription.xml' in arguments
assert '-o LogLevel=DEBUG' in arguments
assert'-p JOB_ID=%(JOB_ID)s' in arguments
assert'-p InputData=%(InputData)s' in arguments
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.