repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
davy39/eric | refs/heads/master | eric6_tray.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Eric6 Tray.
This is the main Python script that performs the necessary initialization
of the system-tray application. This acts as a quickstarter by providing a
context menu to start the eric6 IDE and the eric6 tools.
"""
from __future__ import unicode_literals
import Toolbox.PyQt4ImportHook # __IGNORE_WARNING__
try: # Only for Py2
import Utilities.compatibility_fixes # __IGNORE_WARNING__
except (ImportError):
pass
import sys
for arg in sys.argv:
if arg.startswith("--config="):
import Globals
configDir = arg.replace("--config=", "")
Globals.setConfigDir(configDir)
sys.argv.remove(arg)
break
from Globals import AppInfo
from Toolbox import Startup
def createMainWidget(argv):
"""
Function to create the main widget.
@param argv list of commandline parameters (list of strings)
@return reference to the main widget (QWidget)
"""
from Tools.TrayStarter import TrayStarter
return TrayStarter()
def main():
"""
Main entry point into the application.
"""
options = [
("--config=configDir",
"use the given directory as the one containing the config files"),
]
appinfo = AppInfo.makeAppInfo(sys.argv,
"Eric6 Tray",
"",
"Traystarter for eric6",
options)
res = Startup.simpleAppStartup(sys.argv,
appinfo,
createMainWidget,
quitOnLastWindowClosed=False,
raiseIt=False)
sys.exit(res)
if __name__ == '__main__':
main()
|
prakritish/ansible | refs/heads/devel | lib/ansible/plugins/lookup/shelvefile.py | 89 | # (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import shelve
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
class LookupModule(LookupBase):
def read_shelve(self, shelve_filename, key):
"""
Read the value of "key" from a shelve file
"""
d = shelve.open(to_bytes(shelve_filename))
res = d.get(key, None)
d.close()
return res
def run(self, terms, variables=None, **kwargs):
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
paramvals = {"file": None, "key": None}
params = term.split()
try:
for param in params:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
# In case "file" or "key" are not present
raise AnsibleError(e)
key = paramvals['key']
# Search also in the role/files directory and in the playbook directory
shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
if shelvefile:
res = self.read_shelve(shelvefile, key)
if res is None:
raise AnsibleError("Key %s not found in shelve file %s" % (key, file))
# Convert the value read to string
ret.append(to_text(res))
break
else:
raise AnsibleError("Could not locate shelve file in lookup: %s" % file)
return ret
|
DebrahR/lab4 | refs/heads/master | server/lib/flask/exthook.py | 783 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
|
mdblv2/joatu-django | refs/heads/master | application/site-packages/django/contrib/webdesign/lorem_ipsum.py | 230 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
|
mixman/djangodev | refs/heads/master | tests/regressiontests/select_related_onetoone/__init__.py | 12133432 | |
EnTeQuAk/django-filer | refs/heads/develop | filer/fields/__init__.py | 12133432 | |
likelyzhao/mxnet | refs/heads/dev-faster-rcnn | example/stochastic-depth/sd_mnist.py | 15 | ################################################################################
# A sanity check mainly for debugging purpose. See sd_cifar10.py for a non-trivial
# example of stochastic depth on cifar10.
################################################################################
import os
import sys
import mxnet as mx
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from utils import get_data
import sd_module
def get_conv(
name,
data,
num_filter,
kernel,
stride,
pad,
with_relu,
bn_momentum
):
conv = mx.symbol.Convolution(
name=name,
data=data,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=True
)
bn = mx.symbol.BatchNorm(
name=name + '_bn',
data=conv,
fix_gamma=False,
momentum=bn_momentum,
# Same with https://github.com/soumith/cudnn.torch/blob/master/BatchNormalization.lua
# cuDNN v5 don't allow a small eps of 1e-5
eps=2e-5
)
return (
# It's better to remove ReLU here
# https://github.com/gcr/torch-residual-networks
mx.symbol.Activation(name=name + '_relu', data=bn, act_type='relu')
if with_relu else bn
)
death_rates = [0.3]
contexts = [mx.context.cpu()]
data = mx.symbol.Variable('data')
conv = get_conv(
name='conv0',
data=data,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
base_mod = mx.mod.Module(conv, label_names=None, context=contexts)
mod_seq = mx.mod.SequentialModule()
mod_seq.add(base_mod)
for i in range(len(death_rates)):
conv = get_conv(
name='conv0_%d' % i,
data=mx.sym.Variable('data_%d' % i),
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
conv = get_conv(
name='conv1_%d' % i,
data=conv,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=False,
bn_momentum=0.9
)
mod = sd_module.StochasticDepthModule(conv, data_names=['data_%d' % i],
context=contexts, death_rate=death_rates[i])
mod_seq.add(mod, auto_wiring=True)
act = mx.sym.Activation(mx.sym.Variable('data_final'), act_type='relu')
flat = mx.sym.Flatten(act)
pred = mx.sym.FullyConnected(flat, num_hidden=10)
softmax = mx.sym.SoftmaxOutput(pred, name='softmax')
mod_seq.add(mx.mod.Module(softmax, context=contexts, data_names=['data_final']),
auto_wiring=True, take_labels=True)
n_epoch = 2
batch_size = 100
basedir = os.path.dirname(__file__)
get_data.get_mnist(os.path.join(basedir, "data"))
train = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "train-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "train-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False, seed=10)
val = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "t10k-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "t10k-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False)
logging.basicConfig(level=logging.DEBUG)
mod_seq.fit(train, val, optimizer_params={'learning_rate': 0.01, 'momentum': 0.9},
num_epoch=n_epoch, batch_end_callback=mx.callback.Speedometer(batch_size, 10))
|
robweber/maraschino | refs/heads/master | lib/jinja2/_markupsafe/_constants.py | 1535 | # -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
|
Hellowlol/plexpy | refs/heads/master | lib/unidecode/x082.py | 252 | data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
|
bigzz/autotest | refs/heads/master | server/profiler.py | 4 | import itertools
try:
import autotest.common as common
except ImportError:
import common
def _get_unpassable_types(arg):
""" Given an argument, returns a set of types contained in arg that are
unpassable. If arg is an atomic type (e.g. int) it either returns an
empty set (if the type is passable) or a singleton of the type (if the
type is not passable). """
if isinstance(arg, (basestring, int, long)):
return set()
elif isinstance(arg, (list, tuple, set, frozenset, dict)):
if isinstance(arg, dict):
# keys and values must both be passable
parts = itertools.chain(arg.iterkeys(), arg.itervalues())
else:
# for all other containers we just iterate
parts = iter(arg)
types = set()
for part in parts:
types |= _get_unpassable_types(part)
return types
else:
return set([type(arg)])
def _validate_args(args):
""" Validates arguments. Lists and dictionaries are valid argument types,
so you can pass *args and **dargs in directly, rather than having to
iterate over them yourself. """
unpassable_types = _get_unpassable_types(args)
if unpassable_types:
msg = "arguments of type '%s' cannot be passed to remote profilers"
msg %= ", ".join(t.__name__ for t in unpassable_types)
raise TypeError(msg)
class profiler_proxy(object):
""" This is a server-side class that acts as a proxy to a real client-side
profiler class."""
def __init__(self, profiler_name):
self.name = profiler_name
# does the profiler support rebooting?
profiler_module = common.setup_modules.import_module(
profiler_name, "autotest.client.profilers.%s" % profiler_name)
profiler_class = getattr(profiler_module, profiler_name)
self.supports_reboot = profiler_class.supports_reboot
def initialize(self, *args, **dargs):
_validate_args(args)
_validate_args(dargs)
self.args, self.dargs = args, dargs
def setup(self, *args, **dargs):
assert self.args == args and self.dargs == dargs
# the actual setup happens lazily at start()
def start(self, test, host=None):
raise NotImplementedError('start not implemented')
def stop(self, test, host=None):
raise NotImplementedError('stop not implemented')
def report(self, test, host=None, wait_on_client=True):
raise NotImplementedError('report not implemented')
|
gaddman/ansible | refs/heads/devel | test/units/modules/network/slxos/test_slxos_facts.py | 30 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.slxos import slxos_facts
from units.modules.utils import set_module_args
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosFactsModule(TestSlxosModule):
module = slxos_facts
def setUp(self):
super(TestSlxosFactsModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.slxos.slxos_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestSlxosFactsModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
commands = args[1]
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('slxos_facts_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_slxos_facts(self):
set_module_args(dict(gather_subset='default'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_model'], 'BR-SLX9140'
)
self.assertEqual(
result['ansible_facts']['ansible_net_serialnum'], 'EXH3349M005'
)
|
TwilioDevEd/api-snippets | refs/heads/master | ip-messaging/rest/users/update-user/update-user.6.x.py | 1 | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
user = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.update(role_sid="RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
print(user.role_sid)
|
codesnake/xbmc | refs/heads/master | lib/gtest/test/gtest_filter_unittest.py | 2826 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
|
Ernesto99/odoo | refs/heads/8.0 | addons/account_asset/account_asset_invoice.py | 141 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def action_number(self, cr, uid, ids, *args, **kargs):
result = super(account_invoice, self).action_number(cr, uid, ids, *args, **kargs)
for inv in self.browse(cr, uid, ids):
self.pool.get('account.invoice.line').asset_create(cr, uid, inv.invoice_line)
return result
def line_get_convert(self, cr, uid, x, part, date, context=None):
res = super(account_invoice, self).line_get_convert(cr, uid, x, part, date, context=context)
res['asset_id'] = x.get('asset_id', False)
return res
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
_columns = {
'asset_category_id': fields.many2one('account.asset.category', 'Asset Category'),
}
def asset_create(self, cr, uid, lines, context=None):
context = context or {}
asset_obj = self.pool.get('account.asset.asset')
for line in lines:
if line.asset_category_id:
vals = {
'name': line.name,
'code': line.invoice_id.number or False,
'category_id': line.asset_category_id.id,
'purchase_value': line.price_subtotal,
'period_id': line.invoice_id.period_id.id,
'partner_id': line.invoice_id.partner_id.id,
'company_id': line.invoice_id.company_id.id,
'currency_id': line.invoice_id.currency_id.id,
'purchase_date' : line.invoice_id.date_invoice,
}
changed_vals = asset_obj.onchange_category_id(cr, uid, [], vals['category_id'], context=context)
vals.update(changed_vals['value'])
asset_id = asset_obj.create(cr, uid, vals, context=context)
if line.asset_category_id.open_asset:
asset_obj.validate(cr, uid, [asset_id], context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nens/cassandralib | refs/heads/master | setup.py | 1 | from setuptools import setup
version = '0.7.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'numpy',
'pandas',
'pycassa',
'pytz',
'setuptools',
],
tests_require = [
]
setup(name='cassandralib',
version=version,
description="Python library to talk to Cassandra",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python'],
keywords=[],
author='Berto Booijink',
author_email='berto.booijink@nelen-schuurmans.nl',
url='https://github.com/nens/cassandralib',
license='MIT',
packages=['cassandralib'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
entry_points={
'console_scripts': [
]},
)
|
q1ang/scikit-learn | refs/heads/master | examples/datasets/plot_iris_dataset.py | 283 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
erkrishna9/odoo | refs/heads/master | addons/account/wizard/account_report_central_journal.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_central_journal(osv.osv_memory):
_name = 'account.central.journal'
_description = 'Account Central Journal'
_inherit = "account.common.journal.report"
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_central_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_centraljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
shingonoide/odoo | refs/heads/deverp_8.0 | openerp/addons/test_impex/tests/test_export.py | 211 | # -*- coding: utf-8 -*-
import itertools
import openerp.modules.registry
import openerp
from openerp.tests import common
class CreatorCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(CreatorCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(CreatorCase, self).setUp()
self.model = self.registry(self.model_name)
def make(self, value):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': value})
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value, fields=('value',), context=None):
record = self.make(value)
return record._BaseModel__export_rows([f.split('/') for f in fields])
class test_boolean_field(CreatorCase):
model_name = 'export.boolean'
def test_true(self):
self.assertEqual(
self.export(True),
[[u'True']])
def test_false(self):
""" ``False`` value to boolean fields is unique in being exported as a
(unicode) string, not a boolean
"""
self.assertEqual(
self.export(False),
[[u'False']])
class test_integer_field(CreatorCase):
model_name = 'export.integer'
def test_empty(self):
self.assertEqual(self.model.search(self.cr, openerp.SUPERUSER_ID, []), [],
"Test model should have no records")
def test_0(self):
self.assertEqual(
self.export(0),
[[u'0']])
def test_basic_value(self):
self.assertEqual(
self.export(42),
[[u'42']])
def test_negative(self):
self.assertEqual(
self.export(-32),
[[u'-32']])
def test_huge(self):
self.assertEqual(
self.export(2**31-1),
[[unicode(2**31-1)]])
class test_float_field(CreatorCase):
model_name = 'export.float'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[u'0.0']])
def test_epsilon(self):
self.assertEqual(
self.export(0.000000000027),
[[u'2.7e-11']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678),
[[u'87654321.4678']])
class test_decimal_field(CreatorCase):
model_name = 'export.decimal'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[u'0.0']])
def test_epsilon(self):
""" epsilon gets sliced to 0 due to precision
"""
self.assertEqual(
self.export(0.000000000027),
[[u'0.0']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678), [[u'87654321.468']])
class test_string_field(CreatorCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_within_bounds(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_out_of_bounds(self):
self.assertEqual(
self.export("C for Sinking, "
"Java for Drinking, "
"Smalltalk for Thinking. "
"...and Power to the Penguin!"),
[[u"C for Sinking, J"]])
class test_unbound_string_field(CreatorCase):
model_name = 'export.string'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("We flew down weekly to meet with IBM, but they "
"thought the way to measure software was the amount "
"of code we wrote, when really the better the "
"software, the fewer lines of code."),
[[u"We flew down weekly to meet with IBM, but they thought the "
u"way to measure software was the amount of code we wrote, "
u"when really the better the software, the fewer lines of "
u"code."]])
class test_text(CreatorCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.export(""),
[['']])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("So, `bind' is `let' and monadic programming is"
" equivalent to programming in the A-normal form. That"
" is indeed all there is to monads"),
[[u"So, `bind' is `let' and monadic programming is equivalent to"
u" programming in the A-normal form. That is indeed all there"
u" is to monads"]])
class test_date(CreatorCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07'),
[[u'2011-11-07']])
class test_datetime(CreatorCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07 21:05:48'),
[[u'2011-11-07 21:05:48']])
def test_tz(self):
""" Export ignores the timezone and always exports to UTC
.. note:: on the other hand, export uses user lang for name_get
"""
# NOTE: ignores user timezone, always exports to UTC
self.assertEqual(
self.export('2011-11-07 21:05:48', context={'tz': 'Pacific/Norfolk'}),
[[u'2011-11-07 21:05:48']])
class test_selection(CreatorCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
""" selections export the *label* for their value
"""
self.assertEqual(
self.export(2),
[[u"Bar"]])
def test_localized_export(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.export(2, context={'lang': 'fr_FR'}),
[[u'Bar']])
class test_selection_function(CreatorCase):
model_name = 'export.selection.function'
def test_empty(self):
self.assertEqual(
self.export(False),
[['']])
def test_value(self):
# FIXME: selection functions export the *value* itself
self.assertEqual(
self.export(1),
[[1]])
self.assertEqual(
self.export(3),
[[3]])
# fucking hell
self.assertEqual(
self.export(0),
[['']])
class test_m2o(CreatorCase):
model_name = 'export.many2one'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
""" Exported value is the name_get of the related object
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id]))[integer_id]
self.assertEqual(
self.export(integer_id),
[[name]])
def test_path(self):
""" Can recursively export fields of m2o via path
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.export(integer_id, fields=['value/.id', 'value/value']),
[[unicode(integer_id), u'42']])
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])
class test_o2m(CreatorCase):
model_name = 'export.one2many'
commands = [
(0, False, {'value': 4, 'str': 'record1'}),
(0, False, {'value': 42, 'str': 'record2'}),
(0, False, {'value': 36, 'str': 'record3'}),
(0, False, {'value': 4, 'str': 'record4'}),
(0, False, {'value': 13, 'str': 'record5'}),
]
names = [
u'export.one2many.child:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.one2many.child:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.one2many.child:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[
u'4', u','.join(self.names)
]])
def test_multiple_records_id(self):
export = self.export(self.commands, fields=['const', 'value/.id'])
O2M_c = self.registry('export.one2many.child')
ids = O2M_c.browse(self.cr, openerp.SUPERUSER_ID,
O2M_c.search(self.cr, openerp.SUPERUSER_ID, []))
self.assertEqual(
export,
[
['4', str(ids[0].id)],
['', str(ids[1].id)],
['', str(ids[2].id)],
['', str(ids[3].id)],
['', str(ids[4].id)],
])
def test_multiple_records_with_name_before(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value', 'value/value']),
[[ # exports sub-fields of very first o2m
u'4', u','.join(self.names), u'4'
]])
def test_multiple_records_with_name_after(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value', 'value']),
[ # completely ignores name_get request
[u'4', u'4', ''],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
def test_multiple_subfields_neighbour(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/str','value/value']),
[
[u'4', u'record1', u'4'],
['', u'record2', u'42'],
['', u'record3', u'36'],
['', u'record4', u'4'],
['', u'record5', u'13'],
])
def test_multiple_subfields_separated(self):
self.assertEqual(
self.export(self.commands, fields=['value/str', 'const', 'value/value']),
[
[u'record1', u'4', u'4'],
[u'record2', '', u'42'],
[u'record3', '', u'36'],
[u'record4', '', u'4'],
[u'record5', '', u'13'],
])
class test_o2m_multiple(CreatorCase):
model_name = 'export.one2many.multiple'
def make(self, value=None, **values):
if value is not None: values['value'] = value
id = self.model.create(self.cr, openerp.SUPERUSER_ID, values)
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value=None, fields=('child1', 'child2',), context=None, **values):
record = self.make(value, **values)
return record._BaseModel__export_rows([f.split('/') for f in fields])
def test_empty(self):
self.assertEqual(
self.export(child1=False, child2=False),
[[False, False]])
def test_single_per_side(self):
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})]),
[[False, u'export.one2many.child.2:42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False),
[[u'export.one2many.child.1:43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})]),
[[u'export.one2many.child.1:43', u'export.one2many.child.2:42']])
def test_single_integrate_subfield(self):
fields = ['const', 'child1/value', 'child2/value']
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', False, u'42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False,
fields=fields),
[[u'36', u'43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', u'43', u'42']])
def test_multiple(self):
""" With two "concurrent" o2ms, exports the first line combined, then
exports the rows for the first o2m, then the rows for the second o2m.
"""
fields = ['const', 'child1/value', 'child2/value']
child1 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(), [4, 42, 36, 4, 13])]
child2 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(10), [8, 12, 8, 55, 33, 13])]
self.assertEqual(
self.export(child1=child1, child2=False, fields=fields),
[
[u'36', u'4', False],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
self.assertEqual(
self.export(child1=False, child2=child2, fields=fields),
[
[u'36', False, u'8'],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
self.assertEqual(
self.export(child1=child1, child2=child2, fields=fields),
[
[u'36', u'4', u'8'],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
class test_m2m(CreatorCase):
model_name = 'export.many2many'
commands = [
(0, False, {'value': 4, 'str': 'record000'}),
(0, False, {'value': 42, 'str': 'record001'}),
(0, False, {'value': 36, 'str': 'record010'}),
(0, False, {'value': 4, 'str': 'record011'}),
(0, False, {'value': 13, 'str': 'record100'}),
]
names = [
u'export.many2many.other:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.many2many.other:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.many2many.other:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[ # FIXME: hardcoded comma, import uses config.csv_internal_sep
# resolution: remove configurable csv_internal_sep
u'4', u','.join(self.names)
]])
# essentially same as o2m, so boring
class test_function(CreatorCase):
model_name = 'export.function'
def test_value(self):
""" Exports value normally returned by accessing the function field
"""
self.assertEqual(
self.export(42),
[[u'3']])
|
tmerrick1/spack | refs/heads/develop | lib/spack/spack/paths.py | 4 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Defines paths that are part of Spack's directory structure.
Do not import other ``spack`` modules here. This module is used
throughout Spack and should bring in a minimal number of external
dependencies.
"""
import os
from llnl.util.filesystem import ancestor
#: This file lives in $prefix/lib/spack/spack/__file__
prefix = ancestor(__file__, 4)
#: synonym for prefix
spack_root = prefix
#: bin directory in the spack prefix
bin_path = os.path.join(prefix, "bin")
#: The spack script itself
spack_script = os.path.join(bin_path, "spack")
# spack directory hierarchy
lib_path = os.path.join(prefix, "lib", "spack")
external_path = os.path.join(lib_path, "external")
build_env_path = os.path.join(lib_path, "env")
module_path = os.path.join(lib_path, "spack")
command_path = os.path.join(module_path, "cmd")
platform_path = os.path.join(module_path, 'platforms')
compilers_path = os.path.join(module_path, "compilers")
build_systems_path = os.path.join(module_path, 'build_systems')
operating_system_path = os.path.join(module_path, 'operating_systems')
test_path = os.path.join(module_path, "test")
hooks_path = os.path.join(module_path, "hooks")
var_path = os.path.join(prefix, "var", "spack")
stage_path = os.path.join(var_path, "stage")
repos_path = os.path.join(var_path, "repos")
share_path = os.path.join(prefix, "share", "spack")
# Paths to built-in Spack repositories.
packages_path = os.path.join(repos_path, "builtin")
mock_packages_path = os.path.join(repos_path, "builtin.mock")
#: User configuration location
user_config_path = os.path.expanduser('~/.spack')
opt_path = os.path.join(prefix, "opt")
etc_path = os.path.join(prefix, "etc")
system_etc_path = '/etc'
# GPG paths.
gpg_keys_path = os.path.join(var_path, "gpg")
mock_gpg_data_path = os.path.join(var_path, "gpg.mock", "data")
mock_gpg_keys_path = os.path.join(var_path, "gpg.mock", "keys")
gpg_path = os.path.join(opt_path, "spack", "gpg")
|
Beauhurst/django | refs/heads/master | django/contrib/sessions/backends/cache.py | 113 | from django.conf import settings
from django.contrib.sessions.backends.base import (
CreateError, SessionBase, UpdateError,
)
from django.core.cache import caches
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super().__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self._session_key = None
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in range(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if self.session_key is None:
return self.create()
if must_create:
func = self._cache.add
elif self._cache.get(self.cache_key) is not None:
func = self._cache.set
else:
raise UpdateError
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return bool(session_key) and (self.cache_key_prefix + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
@classmethod
def clear_expired(cls):
pass
|
dkim-95112/infosys | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
TomasM/django-denorm | refs/heads/master | denorm/db/sqlite3/triggers.py | 7 | from django.db import transaction
from denorm.db import base
import logging
logger = logging.getLogger('denorm-sqlite')
class RandomBigInt(base.RandomBigInt):
def sql(self):
return 'RANDOM()'
class TriggerNestedSelect(base.TriggerNestedSelect):
def sql(self):
columns = self.columns
table = self.table
where = ", ".join(["%s = %s" % (k, v) for k, v in self.kwargs.iteritems()])
return 'SELECT DISTINCT %(columns)s FROM %(table)s WHERE %(where)s' % locals(), tuple()
class TriggerActionInsert(base.TriggerActionInsert):
def sql(self):
table = self.model._meta.db_table
columns = "(" + ", ".join(self.columns) + ")"
if isinstance(self.values, TriggerNestedSelect):
sql, params = self.values.sql()
values = "" + sql + ""
else:
values = "VALUES(" + ", ".join(self.values) + ")"
params = []
return 'INSERT OR REPLACE INTO %(table)s %(columns)s %(values)s' % locals(), tuple(params)
class TriggerActionUpdate(base.TriggerActionUpdate):
def sql(self):
table = self.model._meta.db_table
updates = ", ".join(["%s = %s" % (k, v) for k, v in zip(self.columns, self.values)])
if isinstance(self.where, tuple):
where, where_params = self.where
else:
where, where_params = self.where, []
return 'UPDATE %(table)s SET %(updates)s WHERE %(where)s' % locals(), where_params
class Trigger(base.Trigger):
def name(self):
name = base.Trigger.name(self)
if self.content_type_field:
name += "_%s" % self.content_type
return name
def sql(self):
qn = self.connection.ops.quote_name
name = self.name()
params = []
action_list = []
actions_added = set()
for a in self.actions:
sql, action_params = a.sql()
if sql:
if not sql.endswith(';'):
sql += ';'
action_params = tuple(action_params)
if (sql, action_params) not in actions_added:
actions_added.add((sql, action_params))
action_list.extend(sql.split('\n'))
params.extend(action_params)
actions = "\n ".join(action_list)
table = self.db_table
time = self.time.upper()
event = self.event.upper()
content_type = self.content_type
ct_field = self.content_type_field
when = []
if event == "UPDATE":
when.append("(" + "OR".join(["(OLD.%s IS NOT NEW.%s)" % (qn(f), qn(f)) for f, t in self.fields]) + ")")
if ct_field:
ct_field = qn(ct_field)
if event == "DELETE":
when.append("(OLD.%s == %s)" % (ct_field, content_type))
elif event == "INSERT":
when.append("(NEW.%s == %s)" % (ct_field, content_type))
elif event == "UPDATE":
when.append("((OLD.%(ctf)s == %(ct)s) OR (NEW.%(ctf)s == %(ct)s))" % {'ctf': ct_field, 'ct': content_type})
when = "AND".join(when)
if when:
when = "WHEN(%s)" % (when,)
return """
CREATE TRIGGER %(name)s
%(time)s %(event)s ON %(table)s
FOR EACH ROW %(when)s BEGIN
%(actions)s
END;
""" % locals(), tuple(params)
class TriggerSet(base.TriggerSet):
def drop(self):
qn = self.connection.ops.quote_name
cursor = self.cursor()
cursor.execute("SELECT name, tbl_name FROM sqlite_master WHERE type = 'trigger' AND name LIKE 'denorm_%%';")
for trigger_name, table_name in cursor.fetchall():
cursor.execute("DROP TRIGGER %s;" % (qn(trigger_name),))
transaction.commit_unless_managed(using=self.using)
def install(self):
cursor = self.cursor()
for name, trigger in self.triggers.iteritems():
sql, args = trigger.sql()
cursor.execute(sql, args)
transaction.commit_unless_managed(using=self.using)
|
Guts/isogeo-api-py-minsdk | refs/heads/master | isogeo_pysdk/exceptions.py | 1 | # -*- coding: UTF-8 -*-
#! python3
"""
Isogeo Python SDK - Custom exceptions
See: https://docs.python.org/fr/3/tutorial/errors.html#user-defined-exceptions
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import logging
# #############################################################################
# ########## Globals ###############
# ##################################
logger = logging.getLogger(__name__)
# #############################################################################
# ########## Classes ###############
# ##################################
class IsogeoSdkError(Exception):
"""Base class for exceptions in Isogeo Python SDK package."""
pass
class AlreadyExistError(IsogeoSdkError):
"""An object with similar properties already exists in Isogeo database"""
pass
|
MicroPyramid/Django-CRM | refs/heads/master | opportunity/api_urls.py | 1 | from django.urls import path
from opportunity import api_views
app_name = "api_opportunities"
urlpatterns = [
path("", api_views.OpportunityListView.as_view()),
path("<int:pk>/", api_views.OpportunityDetailView.as_view()),
path("comment/<int:pk>/", api_views.OpportunityCommentView.as_view()),
path("attachment/<int:pk>/", api_views.OpportunityAttachmentView.as_view()),
]
|
Ali-aqrabawi/ezclinic | refs/heads/master | appengine_config.py | 1 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START vendor]
from google.appengine.ext import vendor
import os
vendor.add('lib')
# [END vendor]
on_appengine = os.environ.get('SERVER_SOFTWARE','').startswith('Development')
if on_appengine and os.name == 'nt':
os.name = None |
magicmonty/qmk_firmware | refs/heads/master | util/atmega32a_program.py | 29 | #!/usr/bin/env python
# Copyright 2017 Luiz Ribeiro <luizribeiro@gmail.com>, Sebastian Kaim <sebb@sebb767.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import sys
import time
import usb
def checkForKeyboardInNormalMode():
"""Returns a device if a ps2avrGB device in normal made (that is in keyboard mode) or None if it is not found."""
return usb.core.find(idVendor=0x20A0, idProduct=0x422D)
def checkForKeyboardInBootloaderMode():
"""Returns True if a ps2avrGB device in bootloader (flashable) mode is found and False otherwise."""
return (usb.core.find(idVendor=0x16c0, idProduct=0x05df) is not None)
def flashKeyboard(firmware_file):
"""Calls bootloadHID to flash the given file to the device."""
print('Flashing firmware to device ...')
if os.system('bootloadHID -r "%s"' % firmware_file) == 0:
print('\nDone!')
else:
print('\nbootloadHID returned an error.')
def printDeviceInfo(dev):
"""Prints all infos for a given USB device"""
print('Device Information:')
print(' idVendor: %d (0x%04x)' % (dev.idVendor, dev.idVendor))
print(' idProduct: %d (0x%04x)' % (dev.idProduct, dev.idProduct))
print('Manufacturer: %s' % (dev.iManufacturer))
print('Serial: %s' % (dev.iSerialNumber))
print('Product: %s' % (dev.iProduct), end='\n\n')
def sendDeviceToBootloaderMode(dev):
"""Tries to send a given ps2avrGB keyboard to bootloader mode to allow flashing."""
try:
dev.set_configuration()
request_type = usb.util.build_request_type(
usb.util.CTRL_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_DEVICE)
USBRQ_HID_SET_REPORT = 0x09
HID_REPORT_OPTION = 0x0301
dev.ctrl_transfer(request_type, USBRQ_HID_SET_REPORT, HID_REPORT_OPTION, 0, [0, 0, 0xFF] + [0] * 5)
except usb.core.USBError:
# for some reason I keep getting USBError, but it works!
pass
if len(sys.argv) < 2:
print('Usage: %s <firmware.hex>' % sys.argv[0])
sys.exit(1)
kb = checkForKeyboardInNormalMode()
if kb is not None:
print('Found a keyboad in normal mode. Attempting to send it to bootloader mode ...', end='')
sendDeviceToBootloaderMode(kb)
print(' done.')
print("Hint: If your keyboard can't be set to bootloader mode automatically, plug it in while pressing the bootloader key to do so manually.")
print(" You can find more infos about this here: https://github.com/qmk/qmk_firmware/tree/master/keyboards/ps2avrGB#setting-the-board-to-bootloader-mode")
attempts = 12 # 60 seconds
found = False
for attempt in range(1, attempts + 1):
print("Searching for keyboard in bootloader mode (%i/%i) ... " % (attempt, attempts), end='')
if checkForKeyboardInBootloaderMode():
print('Found', end='\n\n')
flashKeyboard(sys.argv[1])
found = True
break
else:
print('Nothing.', end='')
if attempt != attempts: # no need to wait on the last attempt
print(' Sleeping 5 seconds.', end='')
time.sleep(5)
# print a newline
print()
if not found:
print("Couldn't find a flashable keyboard. Aborting.")
sys.exit(2)
|
flavour/cedarbluff | refs/heads/master | modules/ClimateDataPortal/import_tabbed_readings.py | 53 | #!/usr/bin/python
# this will be used for aggregating data.
def get_or_create(dict, key, creator):
try:
value = dict[key]
except KeyError:
value = dict[key] = creator()
return value
#from import_NetCDF_readings import InsertChunksWithoutCheckingForExistingReadings
import sys
class Readings(object):
"Stores a set of readings for a single place"
def __init__(
self,
sample_table,
place_id,
missing_data_marker,
converter,
year_month_to_month_number,
maximum = None,
minimum = None
):
self.sample_table = sample_table
self.missing_data_marker = missing_data_marker
self.maximum = maximum
self.minimum = 0 #minimum
self.converter = converter
self.place_id = place_id
self.year_month_to_month_number = year_month_to_month_number
self.aggregated_values = {}
def __repr__(self):
return "%s for place %i" % (
self.sample_table._tablename,
self.place_id
)
def add_reading(self, year, month, day, reading, out_of_range):
if reading != self.missing_data_marker:
reading = self.converter(reading)
if (
(self.minimum is not None and reading < self.minimum) or
(self.maximum is not None and reading > self.maximum)
):
pass
#out_of_range(year, month, day, reading)
else:
print "%i,%i,%f" % (
self.place_id,
ClimateDataPortal.year_month_day_to_day_number(year, month, day),
reading
)
return
readings = get_or_create(
self.aggregated_values,
self.year_month_day_to_day_number(year, month, day),
list
)
readings.append(reading)
def done(self):
"Writes the average reading to the database for that place and month"
for day_number, values in self.aggregated_values.iteritems():
self.sample_table.insert(
time_period = month_number,
place_id = self.place_id,
value = sum(values) / len(values)
)
ClimateDataPortal = local_import("ClimateDataPortal")
def import_tabbed_readings(
folder,
start_station,
end_station,
suffix,
prefix,
fields,
clear_existing_data,
separator,
missing_data_marker
):
"""
Expects a folder containing files with name rtXXXX.txt
each file contains lines of the form e.g.:
1978\t1\t1\t0\t-99.9\t-99.9
representing year, month, day, rainfall(mm), minimum and maximum temperature
"""
import os
assert os.path.isdir(folder), "%s is not a folder!" % folder
from decimal import Decimal
import datetime
field_order = []
def readings_lambda(sample_table):
return (lambda missing_data_marker, converter, place_id:
Readings(
sample_table,
place_id,
missing_data_marker = missing_data_marker,
converter = Decimal,
year_month_to_month_number = ClimateDataPortal.year_month_to_month_number,
maximum = None,
minimum = None
)
)
date_format = {}
field_positions = []
for field, position in zip(fields, range(len(fields))):
sys.stderr.write( field)
if field != "UNUSED":
if field in ("year", "month", "day"):
date_format[field+"_pos"] = position
else:
try:
sample_table = ClimateDataPortal.SampleTable.matching(field, "O")
except KeyError:
raise Exception(
"'%s' not recognised, available options are: %s\n"
"You can add new tables using add_table.py" % (
field,
", ".join(map("\"%s\"".__mod__, available_tables.keys()))
)
)
else:
if clear_existing_data:
sys.stderr.write( "Clearing "+sample_table._tablename+"\n")
db(sample_table.id > 0).delete()
field_positions.append(
(readings_lambda(sample_table), position)
)
for field in ("year", "month", "day"):
assert field+"_pos" in date_format, "%s is not specified in --fields" % field
query_terms = []
if start_station is not None:
query_terms.append(climate_station_id.station_id >= start_station)
if end_station is not None:
query_terms.append(climate_station_id.station_id <= end_station)
if not query_terms:
query = climate_station_id
else:
import operator
query = reduce(operator.and_, query_terms)
stations = list(db(query).select())
if stations:
for station in stations:
station_id = station.station_id
sys.stderr.write(str(station_id)+"\n")
data_file_path = os.path.join(
folder,
(prefix+"%04i"+suffix) % station_id
)
if not os.path.exists(data_file_path):
sys.stderr.write( "%s not found\n" % data_file_path)
else:
variable_positions = []
for field, position in field_positions:
variable_positions.append(
(
field(
missing_data_marker = missing_data_marker,
converter = Decimal,
place_id = station.id
),
position
)
)
import_data_in_file(
data_file_path,
tuple(variable_positions),
separator,
**date_format
)
db.commit()
else:
sys.stderr.write( "No stations! Import using import_stations.py\n")
def out_of_range(year, month, day, reading):
sys.stderr.write( "%s-%s-%s: %s out of range\n" % (
year, month, day, reading
))
def import_data_row(year, month, day, data):
for variable, field_string in data:
variable.add_reading(
year, month, day,
field_string,
out_of_range = out_of_range
)
def import_data_in_file(
data_file_path,
variable_positions,
separator,
year_pos,
month_pos,
day_pos,
):
# print variables
try:
line_number = 1
last_year = last_month = last_day = None
for line in open(data_file_path, "r").readlines():
if line:
field_strings = line.split(separator)
if field_strings.__len__() > 0:
try:
field = field_strings.__getitem__
year = int(field(year_pos))
month = int(field(month_pos))
day = int(field(day_pos))
if day == last_day:
if month == last_month:
if year == last_year:
sys.stderr.write("Duplicate record for %s" % str(year,month,day))
else:
last_year = year
last_month = month
last_day = day
import_data_row(
year,
month,
day,
tuple((variable, field(position)) for variable, position in variable_positions)
)
except Exception, exception:
sys.stderr.write( "line %i: %s\n" % (line_number, exception))
line_number += 1
for variable, position in variable_positions:
variable.done()
except:
sys.stderr.write( line+"\n")
raise
def main(argv):
import argparse
import os
parser = argparse.ArgumentParser(
description = "Imports observed climate data from tab-delimited files in a folder.",
prog= argv[0],
usage="""
<web2py preamble to run script> \\
%(prog)s \\
--folder path_to/folder [options]
Use flag -h | --help for extra help on options.
The file names must follow a convention of prefix + station_id + suffix.
e.g.:
path_to
`--folder
|--rt0100.txt
|--rt0101.txt
|--...
`--rt9999.txt
* Other files in this folder will not be read.
* Files not corresponding to imported stations will not be read.
* You must add tables for the data being import before it can be imported.
Use add_table.py to do this.
Examples: *(IN ROOT OF APP FOLDER)*
Import all files in a folder, clearing existing data :
python ./run.py \\
%(prog)s \\
--folder path_to/folder --clear_existing_data \\
--fields year month day "Rainfall mm" "Max Temp C" "Min Temp C"
Import a range of stations:
python ./run.py \\
%(prog)s \\
--folder path_to/folder --from 0 --to 500 \\
--fields year month day "Rainfall mm" "Max Temp C" "Min Temp C"
Only import Rainfall:
python ./run.py \\
%(prog)s \\
--folder path_to/folder \\
--fields year month day "Rainfall mm" UNUSED UNUSED
""")
parser.add_argument(
"--folder",
required = True,
help="Folder in which to search for files."
)
parser.add_argument(
"--clear_existing_data",
help="Truncate database tables first."
)
parser.add_argument(
"--start_station",
type=int,
default = None,
help="Station number to start from."
)
parser.add_argument(
"--end_station",
type=int,
default = None,
help="""Station number to end on
(inclusive, i.e. import data from this station's file too)."""
)
parser.add_argument(
"--prefix",
default = "rt",
help="File name prefix e.g. '%(default)s' (default)"
)
parser.add_argument(
"--suffix",
default = ".txt",
help="File name suffix e.g. '%(default)s' (default)."
)
parser.add_argument(
"--separator",
default = "\t",
help="Field separator e.g. '\t' (default)."
)
# parser.add_argument(
# "--units",
# required = True,
# choices = ClimateDataPortal.units_in_out.keys(),
# help="Field units"
# )
parser.add_argument(
"--missing_data_marker",
default = "-99.9",
help = """Missing data marker.
Interpret this as missing data and do not import anything for that date.
"""
)
parser.add_argument(
"--fields",
required = True,
nargs = "+",
help="""List of fields in file, e.g.:
year month day "Rainfall mm" "Max Temp Celsius" "Min Temp Celsius"
year, month and day are used to parse the date.
The other fields name tables to import data into, mapping by position.
All fields must be accounted for. Any unused fields should be marked as UNUSED.
"""
)
args = parser.parse_args(argv[1:])
kwargs = {}
for key, value in args.__dict__.iteritems():
if not key.startswith("_"):
kwargs[key] = value
import_tabbed_readings(**kwargs)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
saideepchandg/oracle-r12-accounting | refs/heads/master | lib/django/core/management/__init__.py | 10 | from __future__ import unicode_literals
import collections
from importlib import import_module
import os
import pkgutil
import sys
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (BaseCommand, CommandError,
CommandParser, handle_default_options)
from django.core.management.color import color_style
from django.utils import autoreload, lru_cache, six
from django.utils._os import npath, upath
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
# Workaround for a Python 3.2 bug with pkgutil.iter_modules
sys.path_importer_cache.pop(command_dir, None)
return [name for _, name, is_pkg in pkgutil.iter_modules([npath(command_dir)])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(upath(__path__[0]))}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
if command.use_argparse:
# Use the `dest` option name from the parser option
opt_mapping = {sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
else:
# Legacy optparse method
defaults, _ = parser.parse_args(args=[])
defaults = dict(defaults.__dict__, **options)
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
# This might trigger ImproperlyConfigured (masked in get_commands)
settings.INSTALLED_APPS
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options.extend((k, 1) for k in FASTCGI_OPTIONS)
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options.extend((sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings)
else:
options.extend((s_opt.get_opt_string(), s_opt.nargs != 0) for s_opt in
parser.option_list)
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader.
pass
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
|
algorithm-ninja/cmsocial | refs/heads/master | cmsocial/db/base.py | 2 | #-*- coding: utf8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import func
from sqlalchemy import Column, DateTime, Integer
from cms.db import Base as CMSBase
class Base(CMSBase):
__abstract__ = True
_created = Column(DateTime, default=func.now())
_updated = Column(DateTime, default=func.now(), onupdate=func.now())
#TODO: maybe add the id field here and remove it elsewhere?
def fieldnames(self, *args):
all_fields = map(lambda c: str(c).split('.')[-1], self.__table__.columns)
real_fields = filter(lambda c: not c.startswith('_'), all_fields)
return real_fields
|
jnhdny/parts-unlimited-to-bigcommerce | refs/heads/master | python_scripts/import_products_to_bigcommerce.py | 1 | #!usr/bin/python
import mysql.connector
import json
import sys
from mysql.connector import Error
import getpass
import argparse
# Bigcommerce login credentials
BIG_USER = 'henry'
BIG_KEY = '10f0f4f371f7953c4d7d7809b62463281f15c829'
BIG_API = 'https://store-45eg5.mybigcommerce.com/api/v2/'
BIG_HEADERS = {'Content-Type': 'application/json'}
BIG_STORE_URL = BIG_API + '%s'
BIG_STORE_PRODUCT_URL = BIG_API + 'products.json'
#IMAGE_LOCATION = 'http://www.wpsstatic.com/WPSIMAGES/'
BRAND_IMAGE = 'http://162.243.58.11/comingsoon.jpg'
def get_category_id(name):
get_request = requests.get(BIG_API + 'categories.json', params={'name':name}, headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
try:
cat_list = get_request.json()
if cat_list:
return cat_list[0]['id']
else:
return None
except:
return None
def create_category(name):
rp = requests.post(BIG_API + 'categories.json',
data=json.dumps({'name':name}), headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
if rp.status_code == 201:
return rp.json()['id']
else:
return get_category_id(name)
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--testing", help="In testing mode, part number 03010101 will be used instead of prompting you to input a part number.", action="store_true")
args = parser.parse_args()
if args.testing:
print "Testing mode enabled, using part number 03010101"
pn = "03010101"
else:
pn = raw_input('Type a single part number with no dashes here to query database:')
def get_part_info(pn):
mysqlpasswd = getpass.getpass('Mysql server root password:')
try:
conn = mysql.connector.connect(host='localhost',
database='PARTDATA',
buffered=True,
user='root',
password=mysqlpasswd)
cursor = conn.cursor()
if conn.is_connected():
print('Connected to MySQL database\n...\n...')
cursor.execute("SELECT bullet1, bullet2, bullet3, bullet4, bullet5, bullet6, "
"bullet7, bullet8, bullet9, bullet10, bullet11, bullet12, bullet13, "
"bullet14, bullet15, bullet16, bullet17, bullet18, bullet19, bullet20, "
"bullet21, bullet22, bullet23, bullet24, retailPrice, partImage, productName, "
"partDescr FROM CatalogContentExport WHERE partNumber=%s", (pn,))
row = cursor.fetchone()
#while row is not None:
if row is not None:
print("Pulling Data About Part Number " + pn + "\n ... \n ...")
tupleOfDescription = row[0:23]
partSubName = row[27]
fullTextOfDescription = '. \n'.join(x for x in tupleOfDescription if x is not None)
partRetailPrice = row[24]
partImageLocation = row[25]
partName = row[26]
row = cursor.fetchone()
else:
print("No Part Data Found for part number " + pn + " ! Double check the part number or consult with your system administrator (Henry, in this case)")
except Error as e:
print(e)
finally:
cursor.close()
conn.close()
print("The following data was found in our MySQL database for part number " + pn + " and was assigned to various variable names: \n")
print("Image URL: " + partImageLocation + "\n")
print("Description:\n\n -------------- \n\n" + fullTextOfDescription + "\n\n")
print("Current Retail price: " + partRetailPrice + "\n")
print("Part name: " + partName + "\n")
print("Part sub-name: " + partSubName)
def add_image(image_name, product_id):
image_data = {'image_file': IMAGE_LOCATION + '%s' % (image_name,)}
image_info = requests.post(BIG_API + 'products/%s/images.json' % (product_id,),
data=json.dumps(image_data), headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
def create_brand(brand_name):
if brand_name in brand_dictionary.keys():
return brand_dictionary[brand_name]
brand_data = {'name': brand_name, 'image_file': BRAND_IMAGE}
rp = requests.post(BIG_API + 'brands.json', data=json.dumps(brand_data), headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
if rp.status_code == 201:
print 'Created brand %s' % (brand_name,)
b_id = rp.json()['id']
brand_dictionary[brand_name] = b_id
return b_id
else:
return get_brand_id(brand_name)
def get_brand_id(name):
get_request = requests.get(BIG_API + 'brands.json', params={'name':name}, headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
try:
brand_list = get_request.json()
if brand_list:
b_id = brand_list[0]['id']
brand_dictionary[name] = b_id
return b_id
else:
return None
except:
return None
def create_item(pd):
if pd.get('vname', None):
pd['brand_id'] = create_brand(pd['vname'])
# Swap out compatible keys and remove incompatible keys
print "Creating item: %s" % (pd['iname'],)
for k in pd.keys():
if k in conversion_dict.keys() and k != conversion_dict[k]:
pd[conversion_dict[k]] = pd[k]
pd.pop(k)
elif k not in conversion_dict.values():
pd.pop(k)
# Add some product properties
pd['availability'] = 'available'
pd['is_visible'] = True
pd['type'] = 'physical'
pd['categories'] = [CATID,]
if 'weight' not in pd.keys():
pd['weight'] = 0
# Create BigCommerce product
rp = requests.post(BIG_STORE_URL % ('products.json',), data=json.dumps(pd), headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
# Check for success
if rp.status_code == 201:
print "Item %s created" % (pd['name'],)
if image_filename:
product_id = rp.json()['id']
add_image(image_filename, product_id)
elif rp.status_code == 409 and not OVERWRITE:
print "Cannot continue. Item %s already exists" % (pd['name'],)
elif rp.status_code == 409 and OVERWRITE:
print "Item %s already exists. Overwriting..." % (pd['name'],)
existing_product = requests.get(BIG_STORE_PRODUCT_URL, params={'name': pd['name']}, headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
existing_product = existing_product.json()
product_id = existing_product[0]['id']
requests.delete(BIG_API + 'products/%s.json' % (product_id,),
headers = BIG_HEADERS, auth = (BIG_USER, BIG_KEY))
print 'Deleted item %s' % (pd['name'],)
rp = requests.post(BIG_STORE_PRODUCT_URL, data=json.dumps(pd), headers = BIG_HEADERS, auth=(BIG_USER, BIG_KEY))
# Get product ID and add image if one exists
if image_filename:
product_id = rp.json()['id']
add_image(image_filename, product_id)
print "Item %s created" % (pd['name'],)
elif DEBUG:
print rp.text
print 'Could not create item %s' % (pd['name'],)
if __name__ == '__main__':
if len(sys.argv) > 2:
CATNAME = sys.argv[1]
CATID = create_category(CATNAME)
# Open and read input file
f = open(sys.argv[2])
f = f.readlines()
for part_number in f:
part_number = part_number.rstrip('\n')
part_info = get_part_info(part_number)
if part_info:
for part in part_info:
if DEBUG:
print "Name: " + part['iname']
create_item(part)
else:
print 'Correct syntax is import.py categoryname FILE.TXT\nFILE.TXT is a plain text file with a part number on each line'
|
belimawr/django-filer | refs/heads/develop | filer/test_utils/custom_image/models.py | 4 | # -*- coding: utf-8 -*-
from django.db import models
from filer.models.abstract import BaseImage
class Image(BaseImage):
extra_description = models.TextField()
class Meta:
pass
|
honza801/django-swiftbrowser | refs/heads/master | swiftbrowser/templatetags/dateconv.py | 8 | from datetime import datetime
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.timezone import utc
register = template.Library()
@register.filter
@stringfilter
def dateconv(value):
try:
value = datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
value = value.replace(tzinfo=utc)
except ValueError:
value = 0.0
return value
|
raj454raj/eden | refs/heads/master | modules/s3db/stats.py | 5 | # -*- coding: utf-8 -*-
""" Sahana Eden Stats Model
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__all__ = ("S3StatsModel",
"S3StatsDemographicModel",
"S3StatsImpactModel",
"S3StatsPeopleModel",
"stats_demographic_data_controller",
"stats_quantile",
"stats_year",
"stats_year_options",
#"stats_SourceRepresent",
)
import datetime
try:
# try stdlib (Python 2.6)
import json
except ImportError:
try:
# try external module
import simplejson as json
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# =============================================================================
class S3StatsModel(S3Model):
"""
Statistics Data
"""
names = ("stats_parameter",
"stats_data",
"stats_source",
"stats_source_superlink",
"stats_source_id",
#"stats_source_details",
)
def model(self):
T = current.T
db = current.db
super_entity = self.super_entity
super_link = self.super_link
# ---------------------------------------------------------------------
# Super entity: stats_parameter
#
sp_types = Storage(disease_statistic = T("Disease Statistic"),
org_resource_type = T("Organization Resource Type"),
project_beneficiary_type = T("Project Beneficiary Type"),
project_campaign_keyword = T("Project Campaign Keyword"),
#project_indicator = T("Project Indicator"),
stats_demographic = T("Demographic"),
stats_impact_type = T("Impact Type"),
# @ToDo; Deprecate
stats_people_type = T("Types of People"),
supply_distribution_item = T("Distribution Item"),
vulnerability_indicator = T("Vulnerability Indicator"),
vulnerability_aggregated_indicator = T("Vulnerability Aggregated Indicator"),
#survey_question_type = T("Survey Question Type"),
#climate_parameter = T("Climate Parameter"),
)
tablename = "stats_parameter"
super_entity(tablename, "parameter_id",
sp_types,
Field("name",
label = T("Name"),
),
Field("description",
label = T("Description"),
),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
# ---------------------------------------------------------------------
# Super entity: stats_data
#
sd_types = Storage(disease_stats_data = T("Disease Data"),
org_resource = T("Organization Resource"),
project_beneficiary = T("Project Beneficiary"),
project_campaign_response_summary = T("Project Campaign Response Summary"),
#project_indicator_data = T("Project Indicator Data"),
stats_demographic_data = T("Demographic Data"),
stats_impact = T("Impact"),
# @ToDo: Deprecate
stats_people = T("People"),
supply_distribution = T("Distribution"),
vulnerability_data = T("Vulnerability Data"),
#survey_answer = T("Survey Answer"),
#climate_data = T("Climate Data"),
)
tablename = "stats_data"
super_entity(tablename, "data_id",
sd_types,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter"),
self.gis_location_id(
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
),
Field("value", "double",
label = T("Value"),
#represent = lambda v: \
# IS_FLOAT_AMOUNT.represent(v, precision=2),
),
# @ToDo: This will need to be a datetime for some usecases
s3_date(label = T("Start Date"),
),
s3_date("end_date",
label = T("End Date"),
),
)
# ---------------------------------------------------------------------
# Stats Source Super-Entity
#
source_types = Storage(doc_document = T("Document"),
#org_organisation = T("Organization"),
#pr_person = T("Person"),
#flood_gauge = T("Flood Gauge"),
#survey_series = T("Survey")
)
tablename = "stats_source"
super_entity(tablename, "source_id", source_types,
Field("name",
label = T("Name"),
),
)
# For use by Instances or Components
source_superlink = super_link("source_id", "stats_source")
# For use by other FKs
represent = stats_SourceRepresent(show_link = True)
source_id = S3ReusableField("source_id", "reference %s" % tablename,
label = T("Source"),
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "stats_source.source_id",
represent,
sort=True)),
)
#self.add_components(tablename,
# stats_source_details="source_id",
# )
# ---------------------------------------------------------------------
# Stats Source Details
#
#tablename = "stats_source_details"
#define_table(tablename,
# # Component
# source_superlink,
# #Field("reliability",
# # label=T("Reliability")),
# #Field("review",
# # label=T("Review")),
# )
# Pass names back to global scope (s3.*)
return dict(stats_source_superlink = source_superlink,
stats_source_id = source_id,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if module is disabled """
return dict(
# Needed for doc
stats_source_superlink = S3ReusableField("source_id", "integer",
readable=False,
writable=False,
)(),
)
# =============================================================================
class S3StatsDemographicModel(S3Model):
"""
Baseline Demographics
@ToDo: Don't aggregate data for locations which don't exist in time window
"""
names = ("stats_demographic",
"stats_demographic_data",
"stats_demographic_aggregate",
"stats_demographic_id",
"stats_demographic_rebuild_all_aggregates",
"stats_demographic_update_aggregates",
"stats_demographic_update_location_aggregate",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
stats_parameter_represent = S3Represent(lookup="stats_parameter",
translate=True)
# ---------------------------------------------------------------------
# Demographic
#
tablename = "stats_demographic"
define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
requires = IS_NOT_EMPTY(),
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
),
s3_comments("description",
label = T("Description"),
),
# Link to the Demographic which is the Total, so that we can calculate percentages
Field("total_id", self.stats_parameter,
label = T("Total"),
represent = stats_parameter_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "stats_parameter.parameter_id",
stats_parameter_represent,
instance_types = ("stats_demographic",),
sort=True)),
),
*s3_meta_fields()
)
# CRUD Strings
ADD_DEMOGRAPHIC = T("Add Demographic")
crud_strings[tablename] = Storage(
label_create = ADD_DEMOGRAPHIC,
title_display = T("Demographic Details"),
title_list = T("Demographics"),
title_update = T("Edit Demographic"),
#title_upload = T("Import Demographics"),
label_list_button = T("List Demographics"),
msg_record_created = T("Demographic added"),
msg_record_modified = T("Demographic updated"),
msg_record_deleted = T("Demographic deleted"),
msg_list_empty = T("No demographics currently defined"))
configure(tablename,
deduplicate = self.stats_demographic_duplicate,
requires_approval = True,
super_entity = "stats_parameter",
)
demographic_id = super_link("parameter_id", "stats_parameter",
instance_types = ("stats_demographic",),
label = T("Demographic"),
represent = stats_parameter_represent,
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "stats",
f = "demographic",
vars = {"child": "parameter_id"},
title = ADD_DEMOGRAPHIC,
),
)
# ---------------------------------------------------------------------
# Demographic Data
#
tablename = "stats_demographic_data"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
demographic_id,
location_id(
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
),
Field("value", "double",
label = T("Value"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_NOT_EMPTY(),
),
s3_date(empty = False),
Field("end_date", "date",
# Just used for the year() VF
readable = False,
writable = False
),
Field("year", "list:integer",
compute = lambda row: \
stats_year(row, "stats_demographic_data"),
label = T("Year"),
),
# Link to Source
self.stats_source_id(),
s3_comments(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Demographic Data"),
title_display = T("Demographic Data Details"),
title_list = T("Demographic Data"),
title_update = T("Edit Demographic Data"),
title_upload = T("Import Demographic Data"),
label_list_button = T("List Demographic Data"),
msg_record_created = T("Demographic Data added"),
msg_record_modified = T("Demographic Data updated"),
msg_record_deleted = T("Demographic Data deleted"),
msg_list_empty = T("No demographic data currently available"))
levels = current.gis.get_relevant_hierarchy_levels()
location_fields = ["location_id$%s" % level for level in levels]
list_fields = ["parameter_id"]
list_fields.extend(location_fields)
list_fields.extend((("value",
"date",
"source_id",
)))
filter_widgets = [S3OptionsFilter("parameter_id",
label = T("Type"),
multiple = False,
# Not translateable
#represent = "%(name)s",
),
S3OptionsFilter("year",
#multiple = False,
operator = "anyof",
options = lambda: \
stats_year_options("stats_demographic_data"),
),
S3OptionsFilter("location_id$level",
label = T("Level"),
multiple = False,
# Not translateable
#represent = "%(name)s",
),
S3LocationFilter("location_id",
levels = levels,
),
]
report_options = Storage(rows = location_fields + ["year"],
cols = ["parameter_id"],
fact = [(T("Average"), "avg(value)"),
(T("Total"), "sum(value)"),
],
defaults = Storage(rows = location_fields[0], # => L0 for multi-country, L1 for single country
cols = "parameter_id",
fact = "sum(value)",
totals = True,
chart = "breakdown:rows",
table = "collapse",
)
)
configure(tablename,
deduplicate = self.stats_demographic_data_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
# @ToDo: Wrapper function to call this for the record linked
# to the relevant place depending on whether approval is
# required or not. Disable when auth.override is True.
#onaccept = self.stats_demographic_update_aggregates,
#onapprove = self.stats_demographic_update_aggregates,
report_options = report_options,
# @ToDo: deployment_setting
requires_approval = True,
super_entity = "stats_data",
# If using dis-aggregated data
#timeplot_options = {"defaults": {"event_start": "date",
# "event_end": "end_date",
# "fact": "cumulate(value)",
# },
# },
)
#----------------------------------------------------------------------
# Demographic Aggregated data
#
# The data can be aggregated against:
# location, all the aggregated values across a number of locations
# thus for an L2 it will aggregate all the L3 values
# time, all the demographic_data values for the same time period.
# currently this is just the latest value in the time period
# copy, this is a copy of the previous time aggregation because no
# data is currently available for this time period
aggregate_types = {1 : T("Time"),
2 : T("Location"),
3 : T("Copy"),
}
tablename = "stats_demographic_aggregate"
define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
empty = False,
instance_types = ("stats_demographic",),
label = T("Demographic"),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
),
location_id(
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
),
Field("agg_type", "integer",
default = 1,
label = T("Aggregation Type"),
represent = lambda opt: \
aggregate_types.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_IN_SET(aggregate_types),
),
s3_date("date",
label = T("Start Date"),
),
s3_date("end_date",
label = T("End Date"),
),
# Sum is used by Vulnerability as a fallback if we have no data at this level
Field("sum", "double",
label = T("Sum"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
# Percentage is used to compare an absolute value against a total
Field("percentage", "double",
label = T("Percentage"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
#Field("min", "double",
# label = T("Minimum"),
# ),
#Field("max", "double",
# label = T("Maximum"),
# ),
#Field("mean", "double",
# label = T("Mean"),
# ),
#Field("median", "double",
# label = T("Median"),
# ),
#Field("mad", "double",
# label = T("Median Absolute Deviation"),
# default = 0.0,
# ),
#Field("mean_ad", "double",
# label = T("Mean Absolute Deviation"),
# ),
#Field("std", "double",
# label = T("Standard Deviation"),
# ),
#Field("variance", "double",
# label = T("Variance"),
# ),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
stats_demographic_id = demographic_id,
stats_demographic_rebuild_all_aggregates = self.stats_demographic_rebuild_all_aggregates,
stats_demographic_update_aggregates = self.stats_demographic_update_aggregates,
stats_demographic_update_location_aggregate = self.stats_demographic_update_location_aggregate,
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name")
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_data_duplicate(item):
""" Import item de-duplication """
data = item.data
parameter_id = data.get("parameter_id")
location_id = data.get("location_id")
date = data.get("date")
table = item.table
query = (table.date == date) & \
(table.location_id == location_id) & \
(table.parameter_id == parameter_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_rebuild_all_aggregates():
"""
This will delete all the stats_demographic_aggregate records and
then rebuild them by triggering off a request for each
stats_demographic_data record.
This function is normally only run during prepop or postpop so we
don't need to worry about the aggregate data being unavailable for
any length of time
"""
# Check to see whether an existing task is running and if it is then kill it
db = current.db
ttable = db.scheduler_task
rtable = db.scheduler_run
wtable = db.scheduler_worker
query = (ttable.task_name == "stats_demographic_update_aggregates") & \
(rtable.task_id == ttable.id) & \
(rtable.status == "RUNNING")
rows = db(query).select(rtable.id,
rtable.task_id,
rtable.worker_name)
now = current.request.utcnow
for row in rows:
db(wtable.worker_name == row.worker_name).update(status="KILL")
db(rtable.id == row.id).update(stop_time=now,
status="STOPPED")
db(ttable.id == row.task_id).update(stop_time=now,
status="STOPPED")
# Delete the existing aggregates
current.s3db.stats_demographic_aggregate.truncate()
# Read all the approved stats_demographic_data records
dtable = db.stats_demographic
ddtable = db.stats_demographic_data
query = (ddtable.deleted != True) & \
(ddtable.parameter_id == dtable.parameter_id) & \
(ddtable.approved_by != None)
# @ToDo: deployment_setting for whether records need to be approved
# query &= (ddtable.approved_by != None)
records = db(query).select(ddtable.data_id,
ddtable.parameter_id,
ddtable.date,
ddtable.location_id,
ddtable.value,
dtable.total_id,
)
# Fire off a rebuild task
current.s3task.async("stats_demographic_update_aggregates",
vars = dict(records=records.json()),
timeout = 21600 # 6 hours
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_aggregated_period(data_date=None):
"""
This will return the start and end dates of the aggregated time
period.
Currently the time period is annually so it will return the start
and end of the current year.
"""
date = datetime.date
if data_date is None:
data_date = date.today()
year = data_date.year
soap = date(year, 1, 1)
eoap = date(year, 12, 31)
return (soap, eoap)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_aggregates(records=None):
"""
This will calculate the stats_demographic_aggregates for the
specified records. Either all (when rebuild_all is invoked) or for
the individual parameter(s) at the specified location(s) when run
onapprove - which currently happens inside the vulnerability
approve_report() controller.
@ToDo: onapprove/onaccept wrapper function for other workflows.
This will get the raw data from stats_demographic_data and generate
a stats_demographic_aggregate record for the given time period.
The reason for doing this is so that all aggregated data can be
obtained from a single table. So when displaying data for a
particular location it will not be necessary to try the aggregate
table, and if it's not there then try the data table. Rather just
look at the aggregate table.
Once this has run then a complete set of aggregate records should
exists for this parameter_id and location for every time period from
the first data item until the current time period.
@ToDo: Add test cases to modules/unit_tests/s3db/stats.py
"""
if not records:
return
from dateutil.rrule import rrule, YEARLY
db = current.db
s3db = current.s3db
dtable = s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
gtable = db.gis_location
# Data Structures used for the OPTIMISATION
param_total_dict = {} # the total_id for each parameter
param_location_dict = {} # a list of locations for each parameter
location_dict = {} # a list of locations
loc_level_list = {} # a list of levels for each location
aggregated_period = S3StatsDemographicModel.stats_demographic_aggregated_period
(last_period, year_end) = aggregated_period(None)
# Test to see which date format we have based on how we were called
if isinstance(records, basestring):
from_json = True
from dateutil.parser import parse
records = json.loads(records)
elif isinstance(records[0]["stats_demographic_data"]["date"],
(datetime.date, datetime.datetime)):
from_json = False
else:
from_json = True
from dateutil.parser import parse
for record in records:
total_id = record["stats_demographic"]["total_id"]
record = record["stats_demographic_data"]
data_id = record["data_id"]
location_id = record["location_id"]
parameter_id = record["parameter_id"]
# Skip if either the location or the parameter is not valid
if not location_id or not parameter_id:
current.log.warning("Skipping bad stats_demographic_data record with data_id %s " % data_id)
continue
if total_id and parameter_id not in param_total_dict:
param_total_dict[parameter_id] = total_id
if from_json:
date = parse(record["date"]) # produces a datetime
date = date.date()
else:
date = record["date"]
(start_date, end_date) = aggregated_period(date)
# Get all the approved stats_demographic_data records for this location and parameter
query = (dtable.location_id == location_id) & \
(dtable.deleted != True) & \
(dtable.approved_by != None)
# @ToDo: deployment_setting for whether records need to be approved
# query &= (dtable.approved_by != None)
fields = [dtable.data_id,
dtable.date,
dtable.value,
]
if total_id:
# Also get the records for the Total to use to calculate the percentage
query &= (dtable.parameter_id.belongs([parameter_id, total_id]))
fields.append(dtable.parameter_id)
else:
percentage = None
query &= (dtable.parameter_id == parameter_id)
data_rows = db(query).select(*fields)
if total_id:
# Separate out the rows relating to the Totals
total_rows = data_rows.exclude(lambda row: row.parameter_id == total_id)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = current.request.utcnow.date()
end_date = year_end
totals = {}
for row in total_rows:
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in totals:
if row_date <= totals[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in totals but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the totals storage
totals[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = start_date
end_date = year_end
data = {}
data[start_date] = Storage(date = date,
id = data_id,
value = record["value"])
for row in data_rows:
if row.data_id == data_id:
# This is the record we started with, so skip
continue
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in data:
if row_date <= data[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in data but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the data storage
data[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get all the aggregate records for this parameter and location
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id)
aggr_rows = db(query).select(atable.id,
atable.agg_type,
atable.date,
atable.end_date,
atable.sum,
)
aggr = {}
for row in aggr_rows:
(start_date, end_date) = aggregated_period(row.date)
aggr[start_date] = Storage(id = row.id,
type = row.agg_type,
end_date = row.end_date,
sum = row.sum,
)
# Step through each period and check that aggr is correct
last_data_period = earliest_period
last_type_agg = False # Whether the type of previous non-copy record was aggr
last_data_value = None # The value of the previous aggr record
last_total = None # The value of the previous aggr record for the totals param
# Keep track of which periods the aggr record has been changed in
# the database
changed_periods = []
for dt in rrule(YEARLY, dtstart=earliest_period, until=last_period):
# Calculate the end of the dt period.
# - it will be None if this is the last period
dt = dt.date()
if dt != last_period:
(start_date, end_date) = aggregated_period(dt)
else:
start_date = dt
end_date = None
if dt in aggr:
# Check that the stored aggr data is correct
agg_type = aggr[dt]["type"]
if agg_type == 2:
# This is built using other location aggregates
# so it can be ignored because only time or copy aggregates
# are being calculated in this function
last_type_agg = True
last_data_value = aggr[dt]["sum"]
continue
# Query to use to update aggr records
query = (atable.id == aggr[dt]["id"])
if agg_type == 3:
# This is a copy aggregate
if dt in data:
# There is data in the data dictionary for this period
# so aggregate record needs to be changed
value = data[dt]["value"]
last_data_value = value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif last_type_agg:
# No data in the data dictionary and the last type was aggr
continue
# Check that the data currently stored is correct
elif aggr[dt]["sum"] != last_data_value:
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif agg_type == 1:
# The value in the aggr should match the value in data
if dt in data:
value = data[dt]["value"]
last_data_value = value
if total_id and dt in totals:
last_total = totals[dt]["value"]
if aggr[dt]["sum"] != value:
if total_id and last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
else:
# The data is not there so it must have been deleted
# Copy the value from the previous record
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# No aggregate record for this time period exists
# So one needs to be inserted
else:
if dt in data:
value = data[dt]["value"]
agg_type = 1 # time
last_data_value = value
else:
value = last_data_value
agg_type = 3 # copy
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
atable.insert(parameter_id = parameter_id,
location_id = location_id,
agg_type = agg_type,
#reported_count = 1, # one record
#ward_count = 1, # one ward
date = start_date,
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# End of loop through each time period
if changed_periods == []:
continue
# The following structures are used in the OPTIMISATION step later
location = db(gtable.id == location_id).select(gtable.level,
limitby=(0, 1)
).first()
loc_level_list[location_id] = location.level
if parameter_id not in param_location_dict:
param_location_dict[parameter_id] = {location_id : changed_periods}
elif location_id not in param_location_dict[parameter_id]:
param_location_dict[parameter_id][location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < param_location_dict[parameter_id][location_id][0][0]:
param_location_dict[parameter_id][location_id] = changed_periods
if location_id not in location_dict:
location_dict[location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < location_dict[location_id][0][0]:
location_dict[location_id] = changed_periods
# End of loop through each stats_demographic_data record
# OPTIMISATION
# The following code will get all the locations for which a parameter
# has been changed. This will remove duplicates which will occur when
# items are being imported for many communes in the same district.
# Take an import of 12 communes in the same district, without this the
# district will be updated 12 times, the province will be updated 12
# times and the country will be updated 12 times that is 33 unnecessary
# updates (for each time period) (i.e. 15 updates rather than 48)
# Get all the parents
# @ToDo: Optimise by rewriting as custom routine rather than using this wrapper
# - we only need immediate children not descendants, so can use parent not path
# - look at disease_stats_update_aggregates()
parents = {}
get_parents = current.gis.get_parents
for loc_id in location_dict.keys():
_parents = get_parents(loc_id)
if parents:
parents[loc_id] = _parents
# Expand the list of locations for each parameter
parents_data = {}
for (param_id, loc_dict) in param_location_dict.items():
for (loc_id, periods) in loc_dict.items():
if loc_id in parents: # There won't be a parent if this is a L0
for p_loc_row in parents[loc_id]:
p_loc_id = p_loc_row.id
if param_id in parents_data:
if p_loc_id in parents_data[param_id]:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if periods[0][0] < parents_data[param_id][p_loc_id][0][0][0]:
parents_data[param_id][p_loc_id][0] = periods
else:
parents_data[param_id][p_loc_id] = [periods,
loc_level_list[loc_id]
]
else:
parents_data[param_id] = {p_loc_id : [periods,
loc_level_list[loc_id]
]
}
# Now that the time aggregate types have been set up correctly,
# fire off requests for the location aggregates to be calculated
async = current.s3task.async
for (param_id, loc_dict) in parents_data.items():
total_id = param_total_dict[param_id]
for (loc_id, (changed_periods, loc_level)) in loc_dict.items():
for (start_date, end_date) in changed_periods:
s, e = str(start_date), str(end_date)
async("stats_demographic_update_aggregate_location",
args = [loc_level, loc_id, param_id, total_id, s, e],
timeout = 1800 # 30m
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_location_aggregate(location_level,
location_id,
parameter_id,
total_id,
start_date,
end_date
):
"""
Calculates the stats_demographic_aggregate for a specific parameter at a
specific location.
@param location_id: the location record ID
@param parameter_id: the parameter record ID
@param total_id: the parameter record ID for the percentage calculation
@param start_date: the start date of the time period (as string)
@param end_date: the end date of the time period (as string)
"""
db = current.db
dtable = current.s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
# Get all the child locations (immediate children only, not all descendants)
child_locations = current.gis.get_children(location_id, location_level)
child_ids = [row.id for row in child_locations]
# Get the most recent stats_demographic_data record for all child locations
query = (dtable.parameter_id == parameter_id) & \
(dtable.deleted != True) & \
(dtable.location_id.belongs(child_ids)) & \
(dtable.approved_by != None)
# @ToDo: deployment_setting for whether records need to be approved
# query &= (dtable.approved_by != None)
if end_date == "None": # converted to string as async parameter
end_date = None
else:
query &= (dtable.date <= end_date)
rows = db(query).select(dtable.value,
dtable.date,
dtable.location_id,
orderby=(dtable.location_id, ~dtable.date),
# groupby avoids duplicate records for the same
# location, but is slightly slower than just
# skipping the duplicates in the loop below
#groupby=(dtable.location_id)
)
# Get the most recent aggregate for this location for the total parameter
#if total_id == "None": # converted to string as async parameter
# total_id = None
# Collect the values, skip duplicate records for the
# same location => use the most recent one, which is
# the first row for each location as per the orderby
# in the query above
last_location = None
values = []
append = values.append
for row in rows:
new_location_id = row.location_id
if new_location_id != last_location:
last_location = new_location_id
append(row.value)
# Aggregate the values
values_len = len(values)
if not values_len:
return
values_sum = sum(values)
#values_min = min(values)
#values_max = max(values)
#values_avg = float(values_sum) / values_len
percentage = 100 * values_sum / values_total
values_percentage = round(percentage, 3)
#from numpy import median
#values_med = median(values)
#values_mad = median([abs(v - values_med) for v in values])
# Add or update the aggregated values in the database
# Do we already have a record?
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id) & \
(atable.date == start_date) & \
(atable.end_date == end_date)
exists = db(query).select(atable.id, limitby=(0, 1)).first()
attr = dict(agg_type = 2, # Location
#reported_count = values_len,
#ward_count = len(child_ids),
#min = values_min,
#max = values_max,
#mean = values_avg,
#median = values_med,
#mad = values_mad,
sum = values_sum,
percentage = values_percentage,
)
if exists:
# Update
db(query).update(**attr)
else:
# Insert new
atable.insert(parameter_id = parameter_id,
location_id = location_id,
date = start_date,
end_date = end_date,
**attr
)
# =============================================================================
def stats_demographic_data_controller():
"""
Function to be called from controller functions
- display all demographic data for a location as a tab.
- options.s3json lookups for AddResourceLink
"""
request = current.request
if "options.s3json" in request.args:
# options.s3json lookups for AddResourceLink
output = current.rest_controller("stats", "demographic_data")
return output
# Only viewing is valid
vars = request.get_vars
if "viewing" not in vars:
error = current.xml.json_message(False, 400, message="viewing not in vars")
raise HTTP(400, error)
else:
viewing = vars.viewing
if "." in viewing:
tablename, id = viewing.split(".", 1)
else:
error = current.xml.json_message(False, 400, message="viewing needs a period")
raise HTTP(400, error)
s3db = current.s3db
table = s3db[tablename]
location_id = current.db(table.id == id).select(table.location_id,
limitby=(0, 1)
).first().location_id
s3 = current.response.s3
dtable = s3db.stats_demographic_data
field = dtable.location_id
s3.filter = (field == location_id)
field.default = location_id
field.readable = field.writable = False
# Post-process
def postp(r, output):
if r.representation == "html":
output["title"] = s3.crud_strings[tablename].title_display
return output
s3.postp = postp
if tablename == "project_location":
rheader = s3db.project_rheader
else:
rheader = None
output = current.rest_controller("stats", "demographic_data",
rheader = rheader,
)
return output
# =============================================================================
class S3StatsImpactModel(S3Model):
"""
Used to record Impacts of Events &/or Incidents
- might link to Assessments module in future
"""
names = ("stats_impact",
"stats_impact_type",
"stats_impact_id",
)
def model(self):
T = current.T
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Impact Types
#
tablename = "stats_impact_type"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_IMPACT_TYPE = T("Add Impact Type")
crud_strings[tablename] = Storage(
label_create=ADD_IMPACT_TYPE,
title_display=T("Impact Type Details"),
title_list=T("Impact Types"),
title_update=T("Edit Impact Type"),
#title_upload=T("Import Impact Types"),
label_list_button=T("Impact Types"),
label_delete_button=T("Delete Impact Type"),
msg_record_created=T("Impact Type added"),
msg_record_modified=T("Impact Type updated"),
msg_record_deleted=T("Impact Type deleted"),
msg_list_empty=T("No Impact Types defined"))
# Resource Configuration
configure(tablename,
deduplicate = self.stats_impact_type_duplicate,
super_entity = ("doc_entity", "stats_parameter"),
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# Impact
#
tablename = "stats_impact"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos/Reports)
super_link("doc_id", "doc_entity"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Impact Type"),
instance_types = ("stats_impact_type",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "stats",
f = "impact_type",
vars = {"child": "parameter_id"},
title = ADD_IMPACT_TYPE,
),
),
Field("value", "double",
label = T("Value"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_NOT_EMPTY(),
),
#self.gis_location_id(),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create=T("Add Impact"),
title_display=T("Impact Details"),
title_list=T("Impacts"),
title_update=T("Edit Impact"),
title_upload=T("Import Impacts"),
label_list_button=T("Impacts"),
label_delete_button=T("Delete Impact"),
msg_record_created=T("Impact added"),
msg_record_modified=T("Impact updated"),
msg_record_deleted=T("Impact deleted"),
msg_list_empty=T("No Impacts defined"))
filter_widgets = [S3OptionsFilter("parameter_id",
label = T("Type"),
# Doesn't support Translation
#represent = "%(name)s",
),
]
# Reusable Field
impact_id = S3ReusableField("impact_id", "reference %s" % tablename,
label = T("Impact"),
ondelete = "CASCADE",
represent = S3Represent(lookup=tablename),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "stats_impact.id")),
)
configure(tablename,
filter_widgets = filter_widgets,
super_entity = ("doc_entity", "stats_data"),
)
# Pass names back to global scope (s3.*)
return dict(stats_impact_id = impact_id,
)
# ---------------------------------------------------------------------
@staticmethod
def stats_impact_type_duplicate(item):
"""
Deduplication of Impact Type
"""
name = item.data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3StatsPeopleModel(S3Model):
"""
Used to record people in the CRMT (Community Resilience Mapping Tool) template
@ToDo: Deprecate
"""
names = ("stats_people",
"stats_people_type",
"stats_people_group",
)
def model(self):
T = current.T
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Type of Peoples
#
tablename = "stats_people_type"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_PEOPLE_TYPE = T("Add Type of People")
crud_strings[tablename] = Storage(
label_create=ADD_PEOPLE_TYPE,
title_display=T("Type of People Details"),
title_list=T("Type of Peoples"),
title_update=T("Edit Type of People"),
#title_upload=T("Import Type of Peoples"),
label_list_button=T("Type of Peoples"),
label_delete_button=T("Delete Type of People"),
msg_record_created=T("Type of People added"),
msg_record_modified=T("Type of People updated"),
msg_record_deleted=T("Type of People deleted"),
msg_list_empty=T("No Type of Peoples defined"))
# Resource Configuration
configure(tablename,
deduplicate = self.stats_people_type_duplicate,
super_entity = ("doc_entity", "stats_parameter"),
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People
#
tablename = "stats_people"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos)
super_link("doc_id", "doc_entity"),
Field("name", #notnull=True,
label = T("Name"),
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Type of People"),
instance_types = ("stats_people_type",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "stats",
f = "people_type",
vars = {"child": "parameter_id"},
title = ADD_PEOPLE_TYPE,
),
),
Field("value", "integer",
label = T("Number of People"),
represent = IS_INT_AMOUNT.represent,
requires = IS_INT_IN_RANGE(0, 999999),
),
self.gis_location_id(label = T("Address"),
),
self.pr_person_id(label = T("Contact Person"),
requires = IS_ADD_PERSON_WIDGET2(allow_empty=True),
widget = S3AddPersonWidget2(controller="pr"),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create=T("Add People"),
title_display=T("People Details"),
title_list=T("People"),
title_update=T("Edit People"),
title_upload=T("Import People"),
label_list_button=T("People"),
label_delete_button=T("Delete People"),
msg_record_created=T("People added"),
msg_record_modified=T("People updated"),
msg_record_deleted=T("People deleted"),
msg_list_empty=T("No People defined"))
filter_widgets = [S3OptionsFilter("people_group.group_id",
label = T("Coalition"),
represent = "%(name)s",
),
S3OptionsFilter("parameter_id",
label = T("Type"),
# Doesn't support Translation
#represent = "%(name)s",
),
]
configure(tablename,
filter_widgets = filter_widgets,
super_entity = ("doc_entity", "stats_data"),
)
# Components
self.add_components(tablename,
# Coalitions
org_group = {"link": "stats_people_group",
"joinby": "people_id",
"key": "group_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
stats_people_group = "people_id",
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People <> Coalitions link table
#
tablename = "stats_people_group"
define_table(tablename,
Field("people_id", "reference stats_people",
requires = IS_ONE_OF(current.db, "stats_people.id",
represent,
sort=True,
),
represent = represent,
),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return {}
# ---------------------------------------------------------------------
@staticmethod
def stats_people_type_duplicate(item):
"""
Deduplication of Type of Peoples
"""
name = item.data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
def stats_quantile(data, q):
"""
Return the specified quantile(s) q of the supplied list.
The function can be called with either a single value for q or a
list of values. In the latter case, the returned value is a tuple.
"""
sx = sorted(data)
def get_quantile(q1):
pos = (len(sx) - 1) * q1
if abs(pos - int(pos) - 0.5) < 0.1:
# quantile in the middle between two values, average them
return (sx[int(pos)] + sx[int(pos) + 1]) * 0.5
else:
# otherwise return the nearest value
return sx[int(pos + 0.5)]
if hasattr(q, "__iter__"):
return tuple([get_quantile(qi) for qi in q])
else:
return get_quantile(q)
# =============================================================================
def stats_year(row, tablename):
"""
Function to calculate computed field for stats_data
- returns the year of this entry
@param row: a dict of the Row
"""
NOT_PRESENT = lambda: None
try:
start_date = row["date"]
except AttributeError:
start_date = NOT_PRESENT
try:
end_date = row["end_date"]
except AttributeError:
end_date = NOT_PRESENT
if start_date is NOT_PRESENT or end_date is NOT_PRESENT:
if tablename == "project_beneficiary":
# Fallback to the Project's
try:
project_id = row["project_id"]
except KeyError:
pass
else:
table = current.s3db.project_project
project = current.db(table.id == project_id).select(table.start_date,
table.end_date,
limitby=(0, 1)
).first()
if project:
if start_date is NOT_PRESENT:
start_date = project.start_date
if end_date is NOT_PRESENT:
end_date = project.end_date
if start_date is NOT_PRESENT and end_date is NOT_PRESENT:
# Partial record update without dates => let it fail so
# we do not override the existing value
raise AttributeError("no data available")
if not start_date and not end_date:
return []
elif end_date is NOT_PRESENT or not end_date:
return [start_date.year]
elif start_date is NOT_PRESENT or not start_date :
return [end_date.year]
else:
return list(xrange(start_date.year, end_date.year + 1))
# =============================================================================
def stats_year_options(tablename):
"""
returns a dict of the options for the year computed field
used by the filter widget
orderby needed for postgres
"""
db = current.db
s3db = current.s3db
table = s3db[tablename]
# @ToDo: use auth.s3_accessible_query
query = (table.deleted == False)
min_field = table.date.min()
start_date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)).first()[min_field]
max_field = table.end_date.max()
end_date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)).first()[max_field]
if tablename == "project_beneficiary":
# Use the Project's Years as well, as the dates may not be filled in the project_beneficiary table
ptable = s3db.project_project
pquery = (ptable.deleted == False)
pmin = ptable.start_date.min()
pmax = ptable.end_date.max()
p_start_date_min = db(pquery).select(pmin,
orderby=pmin,
limitby=(0, 1)).first()[pmin]
p_end_date_max = db(pquery).select(pmax,
orderby=pmax,
limitby=(0, 1)).first()[pmax]
if p_start_date_min and start_date_min:
start_year = min(p_start_date_min,
start_date_min).year
else:
start_year = (p_start_date_min and p_start_date_min.year) or \
(start_date_min and start_date_min.year)
if p_end_date_max and end_date_max:
end_year = max(p_end_date_max,
end_date_max).year
else:
end_year = (p_end_date_max and p_end_date_max.year) or \
(end_date_max and end_date_max.year)
else:
start_year = start_date_min and start_date_min.year
end_year = end_date_max and end_date_max.year
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in xrange(start_year, end_year + 1):
years[year] = year
return years
# =============================================================================
class stats_SourceRepresent(S3Represent):
""" Representation of Stats Sources """
def __init__(self,
translate = False,
show_link = False,
multiple = False,
):
if show_link:
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
# Need a custom representation
fields = ["name"]
super(stats_SourceRepresent,
self).__init__(lookup="stats_source",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def bulk(self, values, rows=None, list_type=False, show_link=True, include_blank=True):
"""
Represent multiple values as dict {value: representation}
@param values: list of values
@param rows: the referenced rows (if values are foreign keys)
@param show_link: render each representation as link
@param include_blank: Also include a blank value
@return: a dict {value: representation}
"""
show_link = show_link and self.show_link
if show_link and not rows:
# Retrieve the rows
rows = self.custom_lookup_rows(None, values)
self._setup()
# Get the values
if rows and self.table:
values = [row["stats_source.source_id"] for row in rows]
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
labels = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = dict((k, link(k, v, rows.get(k)))
for k, v in labels.items())
for v in values:
if v not in labels:
labels[v] = self.default
else:
labels = {}
if include_blank:
labels[None] = self.none
return labels
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for site rows, does a
left join with any instance_types found. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the site IDs
"""
db = current.db
s3db = current.s3db
stable = s3db.stats_source
qty = len(values)
if qty == 1:
query = (stable.id == values[0])
limitby = (0, 1)
else:
query = (stable.id.belongs(values))
limitby = (0, qty)
if self.show_link:
# We need the instance_type IDs
# Do a first query to see which instance_types we have
rows = db(query).select(stable.instance_type,
limitby=limitby)
instance_types = []
for row in rows:
if row.instance_type not in instance_types:
instance_types.append(row.instance_type)
# Now do a second query which left-joins with all the instance tables we have
fields = [stable.source_id,
stable.name,
]
left = []
for instance_type in instance_types:
table = s3db[instance_type]
fields.append(table.id)
left.append(table.on(table.source_id == stable.source_id))
if instance_type == "doc_document":
# We need the URL
fields.append(table.url)
rows = db(query).select(*fields,
left=left,
limitby=limitby)
else:
# Normal lookup
rows = db(query).select(stable.source_id,
stable.name,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
try:
url = row["doc_document.url"]
except AttributeError:
return v
else:
if url:
return A(v, _href=url, _target="blank")
# We have no way to determine the linkto
return v
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the org_site Row
"""
name = row["stats_source.name"]
if not name:
return self.default
return s3_unicode(name)
# END =========================================================================
|
danielberndt/Feature_Langpop | refs/heads/master | tests/helpers.py | 3 | """
helpers.py: Helper functions for testing
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
def strip_wrappers(fn):
""" For decorated fn, return function with stirpped decorator """
if not hasattr(fn, 'func_closure') or not fn.func_closure:
return fn
for f in fn.func_closure:
f = f.cell_contents
if hasattr(f, '__call__'):
return strip_wrappers(f)
return fn
|
SFPD/rlreloaded | refs/heads/master | domain_data/mujoco_worlds/make_xml.py | 1 | import re
def do_substitution(in_lines):
lines_iter = iter(in_lines)
defn_lines = []
while True:
try:
line = lines_iter.next()
except StopIteration:
raise RuntimeError("didn't find line starting with ---")
if line.startswith('---'):
break
else:
defn_lines.append(line)
d = {}
exec("\n".join(defn_lines), d)
pat = re.compile("\$\((.+?)\)")
out_lines = []
for line in lines_iter:
matches = pat.finditer(line)
for m in matches:
line = line.replace(m.group(0), str(eval(m.group(1),d)))
out_lines.append(line)
return out_lines
from glob import glob
import os.path as osp
infiles = glob(osp.join(osp.dirname(__file__),"*.xml.in"))
for fname in infiles:
with open(fname,"r") as fh:
in_lines = fh.readlines()
out_lines = do_substitution(in_lines)
outfname = fname[:-3]
with open(outfname,"w") as fh:
fh.writelines(out_lines)
|
bbsan2k/nzbToMedia | refs/heads/nightly | libs/requests/packages/urllib3/contrib/pyopenssl.py | 488 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
|
dezynetechnologies/odoo | refs/heads/8.0 | addons/auth_oauth/controllers/main.py | 205 | import functools
import logging
import simplejson
import urlparse
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
kw.pop('debug', False)
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
provider_obj = request.registry.get('auth.oauth.provider')
providers = provider_obj.search_read(request.cr, SUPERUSER_ID, [('enabled', '=', True), ('auth_endpoint', '!=', False), ('validation_endpoint', '!=', False)])
# TODO in forwardport: remove conditions on 'auth_endpoint' and 'validation_endpoint' when these fields will be 'required' in model
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
debug=request.debug,
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=simplejson.dumps(state),
)
provider['auth_link'] = provider['auth_endpoint'] + '?' + werkzeug.url_encode(params)
return providers
def get_state(self, provider):
redirect = request.params.get('redirect') or 'web'
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect[1:] if redirect[0] == '/' else redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
@http.route()
def web_auth_signup(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_signup(*args, **kw)
response.qcontext.update(providers=providers)
return response
@http.route()
def web_auth_reset_password(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_reset_password(*args, **kw)
response.qcontext.update(providers=providers)
return response
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
return login_and_redirect(*credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(**kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
grib0ed0v/hse-cv-course-2017 | refs/heads/master | 6. Template Matching/drawMatches.py | 1 | import numpy as np
import cv2
def drawMatches(img1, kp1, img2, kp2, matches):
"""
My own implementation of cv2.drawMatches as OpenCV 2.4.9
does not have this function available but it's supported in
OpenCV 3.0.0
This function takes in two images with their associated
keypoints, as well as a list of DMatch data structure (matches)
that contains which keypoints matched in which images.
An image will be produced where a montage is shown with
the first image followed by the second image beside it.
Keypoints are delineated with circles, while lines are connected
between matching keypoints.
img1,img2 - Grayscale images
kp1,kp2 - Detected list of keypoints through any of the OpenCV keypoint
detection algorithms
matches - A list of matches of corresponding keypoints through any
OpenCV keypoint matching algorithm
"""
# Create a new output image that concatenates the two images together
# (a.k.a) a montage
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
# Place the first image to the left
out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])
# Place the next image to the right of it
out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])
# For each pair of points we have between both images
# draw circles, then connect a line between them
for mat in matches:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
# Draw a small circle at both co-ordinates
# radius 4
# colour blue
# thickness = 1
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
# Draw a line in between the two points
# thickness = 1
# colour blue
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)
# Show the image
cv2.imshow('Matched Features', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
noroutine/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_keystone_service.py | 25 | #!/usr/bin/python
# Copyright 2016 Sam Yaple
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "Sam Yaple (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
required: false
default: None
enabled:
description:
- Is the service enabled
required: false
default: True
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.6.0")
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
try:
cloud = shade.operator_cloud(**module.params)
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name, description=description,
type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed = False
else:
cloud.delete_service(service.id)
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
pepetreshere/odoo | refs/heads/patch-2 | addons/crm/tests/common.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo.addons.mail.tests.common import MailCase, mail_new_test_user
from odoo.addons.sales_team.tests.common import TestSalesCommon
from odoo.fields import Datetime
from odoo import tools
INCOMING_EMAIL = """Return-Path: {return_path}
X-Original-To: {to}
Delivered-To: {to}
Received: by mail.my.com (Postfix, from userid xxx)
id 822ECBFB67; Mon, 24 Oct 2011 07:36:51 +0200 (CEST)
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.my.com
X-Spam-Level:
X-Spam-Status: No, score=-1.0 required=5.0 tests=ALL_TRUSTED autolearn=ham
version=3.3.1
Received: from [192.168.1.146]
(Authenticated sender: {email_from})
by mail.customer.com (Postfix) with ESMTPSA id 07A30BFAB4
for <{to}>; Mon, 24 Oct 2011 07:36:50 +0200 (CEST)
Message-ID: {msg_id}
Date: Mon, 24 Oct 2011 11:06:29 +0530
From: {email_from}
User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.14) Gecko/20110223 Lightning/1.0b2 Thunderbird/3.1.8
MIME-Version: 1.0
To: {to}
Subject: {subject}
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Content-Transfer-Encoding: 8bit
This is an example email. All sensitive content has been stripped out.
ALL GLORY TO THE HYPNOTOAD !
Cheers,
Somebody."""
class TestCrmCommon(TestSalesCommon, MailCase):
@classmethod
def setUpClass(cls):
super(TestCrmCommon, cls).setUpClass()
cls._init_mail_gateway()
cls.sales_team_1.write({
'alias_name': 'sales.test',
'use_leads': True,
'use_opportunities': True,
})
(cls.user_sales_manager | cls.user_sales_leads | cls.user_sales_salesman).write({
'groups_id': [(4, cls.env.ref('crm.group_use_lead').id)]
})
cls.env['crm.stage'].search([]).write({'sequence': 9999}) # ensure search will find test data first
cls.stage_team1_1 = cls.env['crm.stage'].create({
'name': 'New',
'sequence': 1,
'team_id': cls.sales_team_1.id,
})
cls.stage_team1_2 = cls.env['crm.stage'].create({
'name': 'Proposition',
'sequence': 5,
'team_id': cls.sales_team_1.id,
})
cls.stage_team1_won = cls.env['crm.stage'].create({
'name': 'Won',
'sequence': 70,
'team_id': cls.sales_team_1.id,
'is_won': True,
})
cls.stage_gen_1 = cls.env['crm.stage'].create({
'name': 'Generic stage',
'sequence': 3,
'team_id': False,
})
cls.stage_gen_won = cls.env['crm.stage'].create({
'name': 'Generic Won',
'sequence': 30,
'team_id': False,
'is_won': True,
})
cls.lead_1 = cls.env['crm.lead'].create({
'name': 'Nibbler Spacecraft Request',
'type': 'lead',
'user_id': cls.user_sales_leads.id,
'team_id': cls.sales_team_1.id,
'partner_id': False,
'contact_name': 'Amy Wong',
'email_from': 'amy.wong@test.example.com',
'country_id': cls.env.ref('base.us').id,
})
# update lead_1: stage_id is not computed anymore by default for leads
cls.lead_1.write({
'stage_id': cls.stage_team1_1.id,
})
# create an history for new team
cls.lead_team_1_won = cls.env['crm.lead'].create({
'name': 'Already Won',
'type': 'lead',
'user_id': cls.user_sales_leads.id,
'team_id': cls.sales_team_1.id,
})
cls.lead_team_1_won.action_set_won()
cls.lead_team_1_lost = cls.env['crm.lead'].create({
'name': 'Already Won',
'type': 'lead',
'user_id': cls.user_sales_leads.id,
'team_id': cls.sales_team_1.id,
})
cls.lead_team_1_lost.action_set_lost()
(cls.lead_team_1_won | cls.lead_team_1_lost).flush()
cls.contact_company_1 = cls.env['res.partner'].create({
'name': 'Planet Express',
'email': 'planet.express@test.example.com',
'is_company': True,
'street': '57th Street',
'city': 'New New York',
'country_id': cls.env.ref('base.us').id,
'zip': '12345',
})
cls.contact_1 = cls.env['res.partner'].create({
'name': 'Philip J Fry',
'email': 'philip.j.fry@test.example.com',
'mobile': '+1 202 555 0122',
'title': cls.env.ref('base.res_partner_title_mister').id,
'function': 'Delivery Boy',
'phone': False,
'parent_id': cls.contact_company_1.id,
'is_company': False,
'street': 'Actually the sewers',
'city': 'New York',
'country_id': cls.env.ref('base.us').id,
'zip': '54321',
})
cls.contact_2 = cls.env['res.partner'].create({
'name': 'Turanga Leela',
'email': 'turanga.leela@test.example.com',
'parent_id': False,
'is_company': False,
'street': 'Cookieville Minimum-Security Orphanarium',
'city': 'New New York',
'country_id': cls.env.ref('base.us').id,
'mobile': '+1 202 555 0999',
'zip': '97648',
})
def _create_leads_batch(self, lead_type='lead', count=10, partner_ids=None, user_ids=None):
""" Helper tool method creating a batch of leads, useful when dealing
with batch processes. Please update me.
:param string type: 'lead', 'opportunity', 'mixed' (lead then opp),
None (depends on configuration);
"""
types = ['lead', 'opportunity']
leads_data = [{
'name': 'TestLead_%02d' % (x),
'type': lead_type if lead_type else types[x % 2],
'priority': '%s' % (x % 3),
} for x in range(count)]
# customer information
if partner_ids:
for idx, lead_data in enumerate(leads_data):
lead_data['partner_id'] = partner_ids[idx % len(partner_ids)]
else:
for idx, lead_data in enumerate(leads_data):
lead_data['email_from'] = tools.formataddr((
'TestCustomer_%02d' % (idx),
'customer_email_%02d@example.com' % (idx)
))
# salesteam information
if user_ids:
for idx, lead_data in enumerate(leads_data):
lead_data['user_id'] = user_ids[idx % len(user_ids)]
return self.env['crm.lead'].create(leads_data)
def _create_duplicates(self, lead, create_opp=True):
""" Helper tool method creating, based on a given lead
* a customer (res.partner) based on lead email (to test partner finding)
-> FIXME: using same normalized email does not work currently, only exact email works
* a lead with same email_from
* a lead with same email_normalized (other email_from)
* a lead with customer but another email
* a lost opportunity with same email_from
"""
self.customer = self.env['res.partner'].create({
'name': 'Lead1 Email Customer',
'email': lead.email_from,
})
self.lead_email_from = self.env['crm.lead'].create({
'name': 'Duplicate: same email_from',
'type': 'lead',
'team_id': lead.team_id.id,
'email_from': lead.email_from,
})
# self.lead_email_normalized = self.env['crm.lead'].create({
# 'name': 'Duplicate: email_normalize comparison',
# 'type': 'lead',
# 'team_id': lead.team_id.id,
# 'stage_id': lead.stage_id.id,
# 'email_from': 'CUSTOMER WITH NAME <%s>' % lead.email_normalized.upper(),
# })
self.lead_partner = self.env['crm.lead'].create({
'name': 'Duplicate: customer ID',
'type': 'lead',
'team_id': lead.team_id.id,
'partner_id': self.customer.id,
})
if create_opp:
self.opp_lost = self.env['crm.lead'].create({
'name': 'Duplicate: lost opportunity',
'type': 'opportunity',
'team_id': lead.team_id.id,
'stage_id': lead.stage_id.id,
'email_from': lead.email_from,
})
self.opp_lost.action_set_lost()
else:
self.opp_lost = self.env['crm.lead']
# self.assertEqual(self.lead_email_from.email_normalized, self.lead_email_normalized.email_normalized)
# self.assertTrue(lead.email_from != self.lead_email_normalized.email_from)
# self.assertFalse(self.opp_lost.active)
# new_lead = self.lead_email_from | self.lead_email_normalized | self.lead_partner | self.opp_lost
new_leads = self.lead_email_from | self.lead_partner | self.opp_lost
new_leads.flush() # compute notably probability
return new_leads
class TestLeadConvertCommon(TestCrmCommon):
@classmethod
def setUpClass(cls):
super(TestLeadConvertCommon, cls).setUpClass()
# Sales Team organization
# Role: M (team member) R (team manager)
# SALESMAN---------------sales_team_1-----sales_team_convert
# admin------------------M----------------/
# user_sales_manager-----R----------------R
# user_sales_leads-------M----------------/
# user_sales_salesman----/----------------M
# Stages Team organization
# Name-------------------ST-------------------Sequ
# stage_team1_1----------sales_team_1---------1
# stage_team1_2----------sales_team_1---------5
# stage_team1_won--------sales_team_1---------70
# stage_gen_1------------/--------------------3
# stage_gen_won----------/--------------------30
# stage_team_convert_1---sales_team_convert---1
cls.sales_team_convert = cls.env['crm.team'].create({
'name': 'Convert Sales Team',
'sequence': 10,
'alias_name': False,
'use_leads': True,
'use_opportunities': True,
'company_id': False,
'user_id': cls.user_sales_manager.id,
'member_ids': [(4, cls.user_sales_salesman.id)],
})
cls.stage_team_convert_1 = cls.env['crm.stage'].create({
'name': 'New',
'sequence': 1,
'team_id': cls.sales_team_convert.id,
})
cls.lead_1.write({'date_open': Datetime.from_string('2020-01-15 11:30:00')})
cls.crm_lead_dt_patcher = patch('odoo.addons.crm.models.crm_lead.fields.Datetime', wraps=Datetime)
cls.crm_lead_dt_mock = cls.crm_lead_dt_patcher.start()
@classmethod
def tearDownClass(cls):
cls.crm_lead_dt_patcher.stop()
super(TestLeadConvertCommon, cls).tearDownClass()
class TestLeadConvertMassCommon(TestLeadConvertCommon):
@classmethod
def setUpClass(cls):
super(TestLeadConvertMassCommon, cls).setUpClass()
# Sales Team organization
# Role: M (team member) R (team manager)
# SALESMAN-------------------sales_team_1-----sales_team_convert
# admin----------------------M----------------/
# user_sales_manager---------R----------------R
# user_sales_leads-----------M----------------/
# user_sales_leads_convert---/----------------M <-- NEW
# user_sales_salesman--------/----------------M
cls.user_sales_leads_convert = mail_new_test_user(
cls.env, login='user_sales_leads_convert',
name='Lucien Sales Leads Convert', email='crm_leads_2@test.example.com',
company_id=cls.env.ref("base.main_company").id,
notification_type='inbox',
groups='sales_team.group_sale_salesman_all_leads,base.group_partner_manager,crm.group_use_lead',
)
cls.sales_team_convert.write({
'member_ids': [(4, cls.user_sales_leads_convert.id)]
})
cls.lead_w_partner = cls.env['crm.lead'].create({
'name': 'New1',
'type': 'lead',
'probability': 10,
'user_id': cls.user_sales_manager.id,
'stage_id': False,
'partner_id': cls.contact_1.id,
})
cls.lead_w_partner.write({'stage_id': False})
cls.lead_w_partner_company = cls.env['crm.lead'].create({
'name': 'New1',
'type': 'lead',
'probability': 15,
'user_id': cls.user_sales_manager.id,
'stage_id': cls.stage_team1_1.id,
'partner_id': cls.contact_company_1.id,
'contact_name': 'Hermes Conrad',
'email_from': 'hermes.conrad@test.example.com',
})
cls.lead_w_contact = cls.env['crm.lead'].create({
'name': 'LeadContact',
'type': 'lead',
'probability': 15,
'contact_name': 'TestContact',
'user_id': cls.user_sales_salesman.id,
'stage_id': cls.stage_gen_1.id,
})
cls.lead_w_email = cls.env['crm.lead'].create({
'name': 'LeadEmailAsContact',
'type': 'lead',
'probability': 15,
'email_from': 'contact.email@test.example.com',
'user_id': cls.user_sales_salesman.id,
'stage_id': cls.stage_gen_1.id,
})
cls.lead_w_email_lost = cls.env['crm.lead'].create({
'name': 'Lost',
'type': 'lead',
'probability': 15,
'email_from': 'strange.from@test.example.com',
'user_id': cls.user_sales_leads.id,
'stage_id': cls.stage_team1_2.id,
'active': False,
})
(cls.lead_w_partner | cls.lead_w_partner_company | cls.lead_w_contact | cls.lead_w_email | cls.lead_w_email_lost).flush()
|
mrtnrdl/.macdots | refs/heads/master | scripts/bin/platform-tools/systrace/catapult/systrace/systrace/tracing_agents/agents_unittest.py | 8 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from systrace import util
from devil.android import device_utils
from devil.android.sdk import intent
from devil.android.sdk import keyevent
class BaseAgentTest(unittest.TestCase):
def setUp(self):
devices = device_utils.DeviceUtils.HealthyDevices()
self.browser = 'stable'
self.package_info = util.get_supported_browsers()[self.browser]
self.device = devices[0]
curr_browser = self.GetChromeProcessID()
if curr_browser == None:
self.StartBrowser()
def tearDown(self):
# Stop the browser after each test to ensure that it doesn't interfere
# with subsequent tests, e.g. by holding the devtools socket open.
self.device.ForceStop(self.package_info.package)
def StartBrowser(self):
# Turn on the device screen.
self.device.SetScreen(True)
# Unlock device.
self.device.SendKeyEvent(keyevent.KEYCODE_MENU)
# Start browser.
self.device.StartActivity(
intent.Intent(activity=self.package_info.activity,
package=self.package_info.package,
data='about:blank',
extras={'create_new_tab': True}),
blocking=True, force_stop=True)
def GetChromeProcessID(self):
chrome_processes = self.device.GetPids(self.package_info.package)
if (self.package_info.package in chrome_processes and
len(chrome_processes[self.package_info.package]) > 0):
return chrome_processes[self.package_info.package][0]
return None
|
resmo/ansible | refs/heads/devel | test/units/module_utils/urls/test_fetch_url.py | 30 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves.http_cookiejar import Cookie
from ansible.module_utils.six.moves.http_client import HTTPMessage
from ansible.module_utils.urls import fetch_url, urllib_error, ConnectionError, NoSSLError, httplib
import pytest
from mock import MagicMock
class AnsibleModuleExit(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class ExitJson(AnsibleModuleExit):
pass
class FailJson(AnsibleModuleExit):
pass
@pytest.fixture
def open_url_mock(mocker):
return mocker.patch('ansible.module_utils.urls.open_url')
@pytest.fixture
def fake_ansible_module():
return FakeAnsibleModule()
class FakeAnsibleModule:
def __init__(self):
self.params = {}
self.tmpdir = None
def exit_json(self, *args, **kwargs):
raise ExitJson(*args, **kwargs)
def fail_json(self, *args, **kwargs):
raise FailJson(*args, **kwargs)
def test_fetch_url_no_urlparse(mocker, fake_ansible_module):
mocker.patch('ansible.module_utils.urls.HAS_URLPARSE', new=False)
with pytest.raises(FailJson):
fetch_url(fake_ansible_module, 'http://ansible.com/')
def test_fetch_url(open_url_mock, fake_ansible_module):
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None,
follow_redirects='urllib2', force=False, force_basic_auth='', headers=None,
http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='',
use_proxy=True, validate_certs=True, use_gssapi=False, unix_socket=None, ca_path=None)
def test_fetch_url_params(open_url_mock, fake_ansible_module):
fake_ansible_module.params = {
'validate_certs': False,
'url_username': 'user',
'url_password': 'passwd',
'http_agent': 'ansible-test',
'force_basic_auth': True,
'follow_redirects': 'all',
'client_cert': 'client.pem',
'client_key': 'client.key',
}
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None,
follow_redirects='all', force=False, force_basic_auth=True, headers=None,
http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user',
use_proxy=True, validate_certs=False, use_gssapi=False, unix_socket=None, ca_path=None)
def test_fetch_url_cookies(mocker, fake_ansible_module):
def make_cookies(*args, **kwargs):
cookies = kwargs['cookies']
r = MagicMock()
try:
r.headers = HTTPMessage()
add_header = r.headers.add_header
except TypeError:
# PY2
r.headers = HTTPMessage(StringIO())
add_header = r.headers.addheader
r.info.return_value = r.headers
for name, value in (('Foo', 'bar'), ('Baz', 'qux')):
cookie = Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain="ansible.com",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
cookies.set_cookie(cookie)
add_header('Set-Cookie', '%s=%s' % (name, value))
return r
mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'}
# Python sorts cookies in order of most specific (ie. longest) path first
# items with the same path are reversed from response order
assert info['cookies_string'] == 'Baz=qux; Foo=bar'
# The key here has a `-` as opposed to what we see in the `uri` module that converts to `_`
# Note: this is response order, which differs from cookies_string
assert info['set-cookie'] == 'Foo=bar, Baz=qux'
def test_fetch_url_nossl(open_url_mock, fake_ansible_module, mocker):
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='notredhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' not in excinfo.value.kwargs['msg']
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='redhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' in excinfo.value.kwargs['msg']
assert'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = ConnectionError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
open_url_mock.side_effect = ValueError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
def test_fetch_url_httperror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.HTTPError(
'http://ansible.com/',
500,
'Internal Server Error',
{'Content-Type': 'application/json'},
StringIO('TESTS')
)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS',
'status': 500, 'url': 'http://ansible.com/', 'content-type': 'application/json'}
def test_fetch_url_urlerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.URLError('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Request failed: <urlopen error TESTS>', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_socketerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = socket.error('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_exception(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = Exception('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
exception = info.pop('exception')
assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
assert "Exception: TESTS" in exception
def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = httplib.BadStatusLine('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
|
holmes-app/holmes-api | refs/heads/master | tests/unit/validators/test_body.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import lxml.html
from mock import Mock
from preggy import expect
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.validators.body import BodyValidator
from tests.fixtures import PageFactory
from tests.unit.base import ValidatorTestCase
class TestBodyValidator(ValidatorTestCase):
def test_validate(self):
config = Config()
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=config,
validators=[]
)
content = '<html></html>'
result = {
'url': page.url,
'status': 200,
'content': content,
'html': lxml.html.fromstring(content)
}
reviewer.responses[page.url] = result
reviewer.get_response = Mock(return_value=result)
validator = BodyValidator(reviewer)
validator.add_violation = Mock()
validator.review.data = {
'page.body': []
}
validator.validate()
validator.add_violation.assert_called_once_with(
key='page.body.not_found',
value=page.url,
points=50
)
def test_can_get_violation_definitions(self):
reviewer = Mock()
validator = BodyValidator(reviewer)
definitions = validator.get_violation_definitions()
expect(definitions).to_length(1)
expect('page.body.not_found' in definitions).to_be_true()
|
7Robot/cerveau | refs/heads/master | ia/missions/gros/__init__.py | 12133432 | |
jacksonicson/paper.IS2015 | refs/heads/master | control/Control/src/filelock/__init__.py | 12133432 | |
lordmuffin/aws-cfn-plex | refs/heads/master | functions/credstash/pip/_vendor/html5lib/treewalkers/__init__.py | 354 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshi", "etree_lxml"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
Args:
treeType (str): the name of the tree type required (case-insensitive).
Supported values are:
- "dom": The xml.dom.minidom DOM implementation
- "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
- "lxml": Optimized walker for lxml.etree
- "genshi": a Genshi stream
Implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the
"etree" tree type only).
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
|
blrm/openshift-tools | refs/heads/stg | ansible/roles/lib_gcloud/library/gcloud_config.py | 7 | #!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
GcloudCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import random
# Not all genearated modules use this.
# pylint: disable=unused-import
import re
import shutil
import string
import subprocess
import tempfile
import yaml
# Not all genearated modules use this.
# pylint: disable=unused-import
import copy
# pylint: disable=import-error
from apiclient.discovery import build
# pylint: disable=import-error
from oauth2client.client import GoogleCredentials
from ansible.module_utils.basic import AnsibleModule
class GcloudCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GcloudCLI(object):
''' Class to wrap the command line tools '''
def __init__(self, credentials=None, project=None, verbose=False):
''' Constructor for GcloudCLI '''
self.scope = None
self._project = project
if not credentials:
self.credentials = GoogleCredentials.get_application_default()
else:
tmp = tempfile.NamedTemporaryFile()
tmp.write(json.dumps(credentials))
tmp.seek(0)
self.credentials = GoogleCredentials.from_stream(tmp.name)
tmp.close()
self.scope = build('compute', 'beta', credentials=self.credentials)
self.verbose = verbose
@property
def project(self):
'''property for project'''
return self._project
def _create_image(self, image_name, image_info):
'''create an image name'''
cmd = ['compute', 'images', 'create', image_name]
for key, val in image_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_image(self, image_name):
'''delete image by name '''
cmd = ['compute', 'images', 'delete', image_name]
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_images(self, image_name=None):
'''list images.
if name is supplied perform a describe and return
'''
cmd = ['compute', 'images']
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_deployments(self, simple=True):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'list']
if simple:
cmd.append('--simple-list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_deployment(self, dname):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'delete', dname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'create', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _update_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'update', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_manifests(self, deployment, mname=None):
''' list manifests
if a name is specified then perform a describe
'''
cmd = ['deployment-manager', 'manifests', '--deployment', deployment]
if mname:
cmd.extend(['describe', mname])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_address(self, aname):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses', 'delete', aname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_addresses(self, aname=None, region=None):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses']
if aname:
cmd.extend(['describe', aname, '--region', region])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_address(self, address_name, address_info, address=None, isglobal=False):
''' create a deployment'''
cmd = ['compute', 'addresses', 'create', address_name]
if address:
cmd.append(address)
if isglobal:
cmd.append('--global')
for key, val in address_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_metadata(self, resource_type, name=None, zone=None):
''' list metadata'''
cmd = ['compute', resource_type, 'describe']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _delete_metadata(self, resource_type, keys, remove_all=False, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'remove-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
if remove_all:
cmd.append('--all')
else:
cmd.append('--keys')
cmd.append(','.join(keys))
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _create_metadata(self, resource_type, metadata=None, metadata_from_file=None, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'add-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
data = None
if metadata_from_file:
cmd.append('--metadata-from-file')
data = metadata_from_file
else:
cmd.append('--metadata')
data = metadata
cmd.append(','.join(['%s=%s' % (key, val) for key, val in data.items()]))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_accounts(self, sa_name=None):
'''return service accounts '''
cmd = ['iam', 'service-accounts']
if sa_name:
cmd.extend(['describe', sa_name])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account(self, sa_name):
'''delete service account '''
cmd = ['iam', 'service-accounts', 'delete', sa_name, '-q']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account(self, sa_name, display_name=None):
'''create service account '''
cmd = ['iam', 'service-accounts', 'create', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _update_service_account(self, sa_name, display_name=None):
'''update service account '''
cmd = ['iam', 'service-accounts', 'update', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account_key(self, sa_name, key_id):
'''delete service account key'''
cmd = ['iam', 'service-accounts', 'keys', 'delete', key_id, '--iam-account', sa_name, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_account_keys(self, sa_name):
'''return service account keys '''
cmd = ['iam', 'service-accounts', 'keys', 'list', '--iam-account', sa_name]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account_key(self, sa_name, outputfile, key_format='p12'):
'''create service account key '''
# Ensure we remove the key file
atexit.register(Utils.cleanup, [outputfile])
cmd = ['iam', 'service-accounts', 'keys', 'create', outputfile,
'--iam-account', sa_name, '--key-file-type', key_format]
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_project_policy(self, project):
'''create service account key '''
cmd = ['projects', 'get-iam-policy', project]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _add_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _remove_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'remove-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _set_project_policy(self, project, policy_path):
'''create service account key '''
cmd = ['projects', 'set-iam-policy', project, policy_path]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_zones(self):
''' list zones '''
cmd = ['compute', 'zones', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _config_set(self, config_param, config_value, config_section):
''' set config params with gcloud config set '''
param = config_section + '/' + config_param
cmd = ['config', 'set', param, config_value]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_config(self):
'''return config '''
cmd = ['config', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def list_disks(self, zone=None, disk_name=None):
'''return a list of disk objects in this project and zone'''
cmd = ['beta', 'compute', 'disks']
if disk_name and zone:
cmd.extend(['describe', disk_name, '--zone', zone])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
# disabling too-many-arguments as these are all required for the disk labels
# pylint: disable=too-many-arguments
def _set_disk_labels(self, project, zone, dname, labels, finger_print):
'''create service account key '''
if labels == None:
labels = {}
self.scope = build('compute', 'beta', credentials=self.credentials)
body = {'labels': labels, 'labelFingerprint': finger_print}
result = self.scope.disks().setLabels(project=project,
zone=zone,
resource=dname,
body=body,
).execute()
return result
def gcloud_cmd(self, cmd, output=False, output_type='json'):
'''Base command for gcloud '''
cmds = ['/usr/bin/gcloud']
if self.project:
cmds.extend(['--project', self.project])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
################################################################################
# utilities and helpers for generation
################################################################################
class Utils(object):
''' utilities for openshiftcli modules '''
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def global_compute_url(project, collection, rname):
'''build the global compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', rname])
@staticmethod
def zonal_compute_url(project, zone, collection, rname):
'''build the zone compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', rname])
@staticmethod
def generate_random_name(size):
'''generate a random string of lowercase and digits the length of size'''
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
# pylint: disable=too-many-instance-attributes
class GcloudConfig(GcloudCLI):
''' Class to wrap the gcloud config command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
project=None,
region=None):
''' Constructor for gcloud resource '''
super(GcloudConfig, self).__init__()
self._working_param = {}
if project:
self._working_param['name'] = 'project'
self._working_param['value'] = project
self._working_param['section'] = 'core'
elif region:
self._working_param['name'] = 'region'
self._working_param['value'] = region
self._working_param['section'] = 'compute'
self._current_config = self.get_compacted_config()
def get_compacted_config(self):
'''return compated config options'''
results = self._list_config()
compacted_results = {}
for config in results['results']:
compacted_results.update(results['results'][config])
return compacted_results
def list_config(self):
'''return config'''
results = self._list_config()
return results
def check_value(self, param, param_value):
'''check to see if param needs to be updated'''
return self._current_config[param] == param_value
def update(self):
''' do updates, if needed '''
if not self.check_value(self._working_param['name'], self._working_param['value']):
config_set_results = self._config_set(self._working_param['name'], self._working_param['value'],
self._working_param['section'])
list_config_results = self.list_config()
config_set_results['results'] = list_config_results['results']
return config_set_results
else:
list_config_results = self.list_config()
return {'results': list_config_results['results']}
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud config'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'list']),
project=dict(default=None, type='str'),
region=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=[['project', 'region']],
)
gcloud = GcloudConfig(module.params['project'],
module.params['region'],
)
state = module.params['state']
api_rval = gcloud.list_config()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Create
########
if state == 'present':
if gcloud.update():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = gcloud.update()
if 'returncode' in api_rval:
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present|update")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
ProfessorX/CIS508 | refs/heads/master | Codes/xen-4.3.1/tools/xm-test/tests/block-list/03_block-list_anotherbd_pos.py | 42 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Li Ge <lge@us.ibm.com>
# Positive Test: create domain with block attach, attach another, verify both in list
from XmTestLib import *
if ENABLE_HVM_SUPPORT:
SKIP("Block-list not supported for HVM domains")
config = {"disk":"phy:/dev/ram0,xvda1,w"}
domain = XmTestDomain(extraConfig=config)
try:
console = domain.start()
except DomainError, e:
if verbose:
print e.extra
FAIL("Unable to create domain")
status, output = traceCommand("xm block-list %s" % domain.getId())
if status != 0:
FAIL("Fail to list block device")
#Add another virtual block device to the domain
status, output = traceCommand("xm block-attach %s phy:/dev/ram1 xvda2 w" % domain.getId())
if status != 0:
FAIL("Fail to attach block device")
#Verify block-list on Domain0
status, output = traceCommand("xm block-list %s" % domain.getId())
eyecatcher1 = "51713"
eyecatcher2 = "51714"
where1 = output.find(eyecatcher1)
where2 = output.find(eyecatcher2)
if status != 0:
FAIL("xm block-list returned bad status, expected 0, status is %i" % status)
elif (where1 < 0) and (where2 < 0):
FAIL("Fail to list all block devices after attaching another block device")
#Verify attached block device on DomainU
try:
run = console.runCmd("cat /proc/partitions | grep xvda1;cat /proc/partitions | grep xvda2")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
domain.stop()
if run["return"] != 0:
FAIL("Failed to verify that block dev is attached on DomainU")
|
mmnelemane/neutron | refs/heads/master | neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py | 7 | # Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port()
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
return policy_obj
def _create_fake_port(self):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
|
Igalia/snabbswitch | refs/heads/disable-blacklisting | src/program/lwaftr/tests/subcommands/bench_test.py | 9 | """
Test the "snabb lwaftr bench" subcommand. Does not need NIC cards.
"""
import unittest
from test_env import (BENCHMARK_FILENAME, BENCHMARK_PATH, DATA_DIR,
BENCHDATA_DIR, SNABB_CMD, BaseTestCase)
class TestBench(BaseTestCase):
cmd_args = [
str(SNABB_CMD), 'lwaftr', 'bench',
'--duration', '1',
'--bench-file', BENCHMARK_FILENAME,
str(DATA_DIR / 'icmp_on_fail.conf'),
str(BENCHDATA_DIR / 'ipv4-0550.pcap'),
str(BENCHDATA_DIR / 'ipv6-0550.pcap'),
]
def test_bench(self):
self.run_cmd(self.cmd_args)
self.assertTrue(BENCHMARK_PATH.is_file(),
'Cannot find {}'.format(BENCHMARK_PATH))
BENCHMARK_PATH.unlink()
if __name__ == '__main__':
unittest.main()
|
BizzyBane/qBittorrent | refs/heads/master | src/searchengine/nova3/engines/__init__.py | 12133432 | |
carsongee/edx-platform | refs/heads/master | cms/djangoapps/models/__init__.py | 12133432 | |
yigitguler/django | refs/heads/master | tests/staticfiles_tests/__init__.py | 12133432 | |
foobar999/Suchmaschine | refs/heads/master | src/processors/__init__.py | 12133432 | |
gagneurlab/concise | refs/heads/master | concise/layers.py | 1 | import numpy as np
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers.pooling import _GlobalPooling1D
from keras.layers import Conv1D, Input, LocallyConnected1D
from keras.layers.core import Dropout
from concise.utils.plot import seqlogo, seqlogo_fig
import matplotlib.pyplot as plt
from keras.engine import InputSpec
from concise.utils.pwm import DEFAULT_BASE_BACKGROUND, pssm_array2pwm_array, _pwm2pwm_info
from keras import activations
from keras import constraints
from concise import initializers
from concise import regularizers
from concise.regularizers import GAMRegularizer, SplineSmoother
from concise.utils.splines import BSpline
from concise.utils.helper import get_from_module
from concise.utils.plot import heatmap
from concise.preprocessing.sequence import (DNA, RNA, AMINO_ACIDS,
CODONS, STOP_CODONS)
from concise.preprocessing.structure import RNAplfold_PROFILES
# --------------------------------------------
# Input()
def InputDNA(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeDNA` or `encodeRNA`
Wrapper for: `keras.layers.Input((seq_length, 4), name=name, **kwargs)`
"""
return Input((seq_length, 4), name=name, **kwargs)
InputRNA = InputDNA
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs):
"""Input placeholder for array returned by `encodeCodon`
Note: The seq_length is divided by 3
Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`
"""
if ignore_stop_codons:
vocab = CODONS
else:
vocab = CODONS + STOP_CODONS
assert seq_length % 3 == 0
return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
def InputAA(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeAA`
Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)`
"""
return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
def InputRNAStructure(seq_length, name=None, **kwargs):
"""Input placeholder for array returned by `encodeRNAStructure`
Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)`
"""
return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
# deprecated
def InputSplines(seq_length, n_bases=10, name=None, **kwargs):
"""Input placeholder for array returned by `encodeSplines`
Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs):
"""Input placeholder for array returned by `encodeSplines`
Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
# TODO - deprecate
def InputDNAQuantity(seq_length, n_features=1, name=None, **kwargs):
"""Convenience wrapper around `keras.layers.Input`:
`Input((seq_length, n_features), name=name, **kwargs)`
"""
return Input((seq_length, n_features), name=name, **kwargs)
# TODO - deprecate
def InputDNAQuantitySplines(seq_length, n_bases=10, name="DNASmoothPosition", **kwargs):
"""Convenience wrapper around keras.layers.Input:
`Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs)
# --------------------------------------------
class GlobalSumPooling1D(_GlobalPooling1D):
"""Global average pooling operation for temporal data.
# Note
- Input shape: 3D tensor with shape: `(batch_size, steps, features)`.
- Output shape: 2D tensor with shape: `(batch_size, channels)`
"""
def call(self, inputs):
return K.sum(inputs, axis=1)
class ConvSequence(Conv1D):
"""Convenience wrapper over `keras.layers.Conv1D` with 3 changes:
- additional plotting method: `plot_weights(index=None, plot_type="motif_raw", figsize=None, ncol=1)`
- **index**: can be a particular index or a list of indicies
- **plot_type**: Can be one of `"heatmap"`, `"motif_raw"`, `"motif_pwm"` or `"motif_pwm_info"`.
- **figsize**: tuple, Figure size
- **ncol**: Number of axis columns
- additional argument `seq_length` instead of `input_shape`
- restriction in build method: `input_shape[-1]` needs to the match the vocabulary size
Clasess `Conv*` all inherit from `ConvSequence` and define the corresponding vocabulary:
- ConvDNA
- ConvRNA
- ConvRNAStructure
- ConvAA
- ConvCodon
"""
VOCAB = DNA
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
seq_length=None,
**kwargs):
# override input shape
if seq_length:
kwargs["input_shape"] = (seq_length, len(self.VOCAB))
kwargs.pop("batch_input_shape", None)
super(ConvSequence, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.seq_length = seq_length
def build(self, input_shape):
if input_shape[-1] is not len(self.VOCAB):
raise ValueError("{cls} requires input_shape[-1] == {n}. Given: {s}".
format(cls=self.__class__.__name__, n=len(self.VOCAB), s=input_shape[-1]))
return super(ConvSequence, self).build(input_shape)
def get_config(self):
config = super(ConvSequence, self).get_config()
config["seq_length"] = self.seq_length
return config
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs):
"""Plot weights as a heatmap
index = can be a particular index or a list of indicies
**kwargs - additional arguments to concise.utils.plot.heatmap
"""
W = self.get_weights()[0]
if index is None:
index = np.arange(W.shape[2])
fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ",
vocab=self.VOCAB, figsize=figsize, **kwargs)
# plt.show()
return fig
def _plot_weights_motif(self, index, plot_type="motif_raw",
background_probs=DEFAULT_BASE_BACKGROUND,
ncol=1,
figsize=None):
"""Index can only be a single int
"""
w_all = self.get_weights()
if len(w_all) == 0:
raise Exception("Layer needs to be initialized first")
W = w_all[0]
if index is None:
index = np.arange(W.shape[2])
if isinstance(index, int):
index = [index]
fig = plt.figure(figsize=figsize)
if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS:
arr = pssm_array2pwm_array(W, background_probs)
elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS:
arr = W
elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS:
quasi_pwm = pssm_array2pwm_array(W, background_probs)
arr = _pwm2pwm_info(quasi_pwm)
else:
raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ")
# fig.show()
return fig
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs):
"""Plot filters as heatmap or motifs
index = can be a particular index or a list of indicies
**kwargs - additional arguments to concise.utils.plot.heatmap
"""
if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap":
return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs)
elif plot_type[:5] == "motif":
return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs)
else:
raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
class ConvDNA(ConvSequence):
VOCAB = DNA
VOCAB_name = "DNA"
AVAILABLE_PLOTS = ["heatmap", "motif_raw", "motif_pwm", "motif_pwm_info"]
class ConvRNA(ConvDNA):
VOCAB = RNA
VOCAB_name = "RNA"
AVAILABLE_PLOTS = ["heatmap", "motif_raw", "motif_pwm", "motif_pwm_info"]
class ConvAA(ConvDNA):
VOCAB = AMINO_ACIDS
VOCAB_name = "AA"
AVAILABLE_PLOTS = ["heatmap", "motif_raw"]
class ConvRNAStructure(ConvDNA):
VOCAB = RNAplfold_PROFILES
VOCAB_name = "RNAStruct"
AVAILABLE_PLOTS = ["heatmap", "motif_raw"]
class ConvCodon(ConvSequence):
VOCAB = CODONS
AVAILABLE_PLOTS = ["heatmap"]
def build(self, input_shape):
if input_shape[-1] not in [len(CODONS), len(CODONS + STOP_CODONS)]:
raise ValueError("{cls} requires input_shape[-1] == {n} or {m}".
format(cls=self.__class__.__name__,
n=len(CODONS),
m=len(CODONS + STOP_CODONS)))
if input_shape[-1] == len(CODONS + STOP_CODONS):
self.VOCAB = CODONS + STOP_CODONS
return super(ConvSequence, self).build(input_shape)
# --------------------------------------------
############################################
# Smoothing layers
# TODO - re-write SplineWeight1D with SplineT layer
# TODO - SplineWeight1D - use new API and update
# - think how to call share_splines...?
# - use a regularizer rather than just
class SplineWeight1D(Layer):
"""Up- or down-weight positions in the activation array of 1D convolutions:
`x^{out}_{ijk} = x^{in}_{ijk}* (1 + f_S^k(j)) \;,`
where f_S is the spline transformation.
# Arguments
n_bases: int; Number of spline bases used for the positional effect.
l2_smooth: (float) L2 regularization strength for the second
order differences in positional bias' smooth splines. (GAM smoothing regularization)
l2: (float) L2 regularization strength for the spline base coefficients.
use_bias: boolean; should we add a bias to the transition
bias_initializer: bias initializer - from `keras.initializers`
"""
def __name__(self):
return "SplineWeight1D"
def __init__(self,
# spline type
n_bases=10,
spline_degree=3,
share_splines=False,
# regularization
l2_smooth=0,
l2=0,
use_bias=False,
bias_initializer='zeros',
**kwargs):
self.n_bases = n_bases
self.spline_degree = spline_degree
self.share_splines = share_splines
self.l2 = l2
self.l2_smooth = l2_smooth
self.use_bias = use_bias
self.bias_initializer = initializers.get(bias_initializer)
super(SplineWeight1D, self).__init__(**kwargs)
def build(self, input_shape):
# input_shape = (None, steps, filters)
start = 0
end = input_shape[1]
filters = input_shape[2]
if self.share_splines:
n_spline_tracks = 1
else:
n_spline_tracks = filters
# setup the bspline object
self.bs = BSpline(start, end - 1,
n_bases=self.n_bases,
spline_order=self.spline_degree
)
# create X_spline,
self.positions = np.arange(end)
self.X_spline = self.bs.predict(self.positions, add_intercept=False) # shape = (end, self.n_bases)
# convert to the right precision and K.constant
self.X_spline_K = K.constant(K.cast_to_floatx(self.X_spline))
# add weights - all set to 0
self.kernel = self.add_weight(shape=(self.n_bases, n_spline_tracks),
initializer='zeros',
name='kernel',
regularizer=GAMRegularizer(self.n_bases, self.spline_degree,
self.l2_smooth, self.l2),
trainable=True)
if self.use_bias:
self.bias = self.add_weight((n_spline_tracks, ),
initializer=self.bias_initializer,
name='bias',
regularizer=None)
super(SplineWeight1D, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
spline_track = K.dot(self.X_spline_K, self.kernel)
if self.use_bias:
spline_track = K.bias_add(spline_track, self.bias)
# if self.spline_exp:
# spline_track = K.exp(spline_track)
# else:
spline_track = spline_track + 1
# multiply together the two coefficients
output = spline_track * x
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'n_bases': self.n_bases,
'spline_degree': self.spline_degree,
'share_splines': self.share_splines,
# 'spline_exp': self.spline_exp,
'l2_smooth': self.l2_smooth,
'l2': self.l2,
'use_bias': self.use_bias,
'bias_initializer': initializers.serialize(self.bias_initializer),
}
base_config = super(SplineWeight1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def positional_effect(self):
w = self.get_weights()[0]
pos_effect = np.dot(self.X_spline, w)
return {"positional_effect": pos_effect, "positions": self.positions}
def plot(self, *args, **kwargs):
pe = self.positional_effect()
plt.plot(pe["positions"], pe["positional_effect"], *args, **kwargs)
plt.xlabel("Position")
plt.ylabel("Positional effect")
# SplineT -> use just locally-connected layers
# SplineT1D -> wrap ConvSplines -> However,
# It's better if we use just matrix multiplications.
# - We can have more control over the inputs
# - I think it's better to explicitly state the dimensions: 1D or so.
class SplineT(Layer):
"""Spline transformation layer.
As input, it needs an array of scalars pre-processed by `concise.preprocessing.EncodeSplines`
Specifically, the input/output dimensions are:
- Input: N x ... x channels x n_bases
- Output: N x ... x channels
# Arguments
shared_weights: bool, if True spline transformation weights
are shared across different features
kernel_regularizer: use `concise.regularizers.SplineSmoother`
other arguments: See `keras.layers.Dense`
"""
def __init__(self,
# regularization
shared_weights=False,
kernel_regularizer=None,
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs
):
super(SplineT, self).__init__(**kwargs) # Be sure to call this somewhere!
self.shared_weights = shared_weights
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.input_spec = InputSpec(min_ndim=3)
def build(self, input_shape):
assert len(input_shape) >= 3
n_bases = input_shape[-1]
n_features = input_shape[-2]
# self.input_shape = input_shape
self.inp_shape = input_shape
self.n_features = n_features
self.n_bases = n_bases
if self.shared_weights:
use_n_features = 1
else:
use_n_features = self.n_features
# print("n_bases: {0}".format(n_bases))
# print("n_features: {0}".format(n_features))
self.kernel = self.add_weight(shape=(n_bases, use_n_features),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
trainable=True)
if self.use_bias:
self.bias = self.add_weight((n_features, ),
initializer=self.bias_initializer,
name='bias',
regularizer=None)
self.built = True
super(SplineT, self).build(input_shape) # Be sure to call this somewhere!
def compute_output_shape(self, input_shape):
return input_shape[:-1]
def call(self, inputs):
N = len(self.inp_shape)
# put -2 axis (features) to the front
# import pdb
# pdb.set_trace()
if self.shared_weights:
return K.squeeze(K.dot(inputs, self.kernel), -1)
output = K.permute_dimensions(inputs, (N - 2, ) + tuple(range(N - 2)) + (N - 1,))
output_reshaped = K.reshape(output, (self.n_features, -1, self.n_bases))
bd_output = K.batch_dot(output_reshaped, K.transpose(self.kernel))
output = K.reshape(bd_output, (self.n_features, -1) + self.inp_shape[1:(N - 2)])
# move axis 0 (features) to back
output = K.permute_dimensions(output, tuple(range(1, N - 1)) + (0,))
if self.use_bias:
output = K.bias_add(output, self.bias, data_format="channels_last")
return output
def get_config(self):
config = {
'shared_weights': self.shared_weights,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer)
}
base_config = super(SplineT, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO - add X_spline as non-trainable weights
# Deprecated
class GAMSmooth(Layer):
def __name__(self):
return "GAMSmooth"
def __init__(self,
# spline type
n_bases=10,
spline_order=3,
share_splines=False,
spline_exp=False,
# regularization
l2_smooth=1e-5,
l2=1e-5,
use_bias=False,
bias_initializer='zeros',
**kwargs):
"""
# Arguments
n_splines int: Number of splines used for the positional bias.
spline_exp (bool): If True, the positional bias score is observed by: `np.exp(spline_score)`,
where `spline_score` is the linear combination of B-spline basis functions.
If False, `np.exp(spline_score + 1)` is used.
l2 (float): L2 regularization strength for the second order differences in positional bias' smooth splines.
(GAM smoothing regularization)
l2_smooth (float): L2 regularization strength for the spline base coefficients.
use_bias: boolean; should we add a bias to the transition
bias_initializer; bias initializer - from keras.initailizers
"""
self.n_bases = n_bases
self.spline_order = spline_order
self.share_splines = share_splines
self.spline_exp = spline_exp
self.l2 = l2
self.l2_smooth = l2_smooth
self.use_bias = use_bias
self.bias_initializer = initializers.get(bias_initializer)
super(GAMSmooth, self).__init__(**kwargs)
def build(self, input_shape):
# input_shape = (None, steps, filters)
start = 0
end = input_shape[1]
filters = input_shape[2]
if self.share_splines:
n_spline_tracks = 1
else:
n_spline_tracks = filters
# setup the bspline object
self.bs = BSpline(start, end - 1,
n_bases=self.n_bases,
spline_order=self.spline_order
)
# create X_spline,
self.positions = np.arange(end)
self.X_spline = self.bs.predict(self.positions, add_intercept=False) # shape = (end, self.n_bases)
# convert to the right precision and K.constant
self.X_spline_K = K.constant(K.cast_to_floatx(self.X_spline))
# add weights - all set to 0
self.kernel = self.add_weight(shape=(self.n_bases, n_spline_tracks),
initializer='zeros',
name='kernel',
regularizer=GAMRegularizer(self.n_bases, self.spline_order,
self.l2_smooth, self.l2),
trainable=True)
if self.use_bias:
self.bias = self.add_weight((n_spline_tracks, ),
initializer=self.bias_initializer,
name='bias',
regularizer=None)
super(GAMSmooth, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
spline_track = K.dot(self.X_spline_K, self.kernel)
if self.use_bias:
spline_track = K.bias_add(spline_track, self.bias)
if self.spline_exp:
spline_track = K.exp(spline_track)
else:
spline_track = spline_track + 1
# multiply together the two coefficients
output = spline_track * x
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'n_bases': self.n_bases,
'spline_order': self.spline_order,
'share_splines': self.share_splines,
'spline_exp': self.spline_exp,
'l2_smooth': self.l2_smooth,
'l2': self.l2,
'use_bias': self.use_bias,
'bias_initializer': initializers.serialize(self.bias_initializer),
}
base_config = super(GAMSmooth, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def positional_effect(self):
w = self.get_weights()[0]
pos_effect = np.dot(self.X_spline, w)
return {"positional_effect": pos_effect, "positions": self.positions}
def plot(self, *args, **kwargs):
pe = self.positional_effect()
plt.plot(pe["positions"], pe["positional_effect"], *args, **kwargs)
plt.xlabel("Position")
plt.ylabel("Positional effect")
# ResSplineWeight
# SplineWeight ?
# WeightedSum1D
# TODO - add the plotting functionality
# TODO - rename the layer
# additional arguments?
# - share_splines=False,
# - spline_exp=False
#
# TODO - use similar arguments to GAMSmooth (not as a thin wrapper around Conv1d)
# TODO - fix & unit-test this layer
# ConvSplineTr1D
# DenseSplineTr
# SplineTr
class ConvSplines(Conv1D):
"""Convenience wrapper over `keras.layers.Conv1D` with 2 changes:
- additional argument seq_length specifying input_shape (as in ConvDNA)
- restriction in kernel_regularizer - needs to be of class GAMRegularizer
- hard-coded values:
- kernel_size=1,
- strides=1,
- padding='valid',
- dilation_rate=1,
"""
def __init__(self, filters,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=GAMRegularizer(),
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(ConvSplines, self).__init__(
filters=filters,
kernel_size=1,
strides=1,
padding='valid',
dilation_rate=1,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
if not isinstance(self.kernel_regularizer, regularizers.GAMRegularizer):
raise ValueError("Regularizer has to be of type concise.regularizers.GAMRegularizer. " +
"Current type: " + str(type(self.kernel_regularizer)),
"\nObject: " + str(self.kernel_regularizer))
# self.seq_length = seq_length
def build(self, input_shape):
# update the regularizer
self.kernel_regularizer.n_bases = input_shape[2]
return super(ConvSplines, self).build(input_shape)
def get_config(self):
config = super(ConvSplines, self).get_config()
config.pop('kernel_size')
config.pop('strides')
config.pop('padding')
config.pop('dilation_rate')
# config["seq_length"] = self.seq_length
return config
class BiDropout(Dropout):
"""Applies Dropout to the input, no matter if in learning phase or not.
"""
def __init__(self, bi_dropout=True, **kwargs):
# __init__(self, rate, noise_shape=None, seed=None, **kwargs)
super(BiDropout, self).__init__(**kwargs)
self.bi_dropout = bi_dropout
#
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
#
def dropped_inputs():
return K.dropout(inputs, self.rate, noise_shape, seed=self.seed)
if self.bi_dropout:
# K.in_train_phase returns the first argument if in training phase otherwise the second
# return K.in_train_phase(dropped_inputs, inputs, training=training)
# Taken from keras.backend.tensorflow_backend
if callable(dropped_inputs):
return dropped_inputs()
else:
return dropped_inputs
else:
return K.in_train_phase(dropped_inputs, inputs,
training=training)
return inputs
#
@classmethod
def create_from_dropout(cls, dropout_obj):
if not isinstance(dropout_obj, Dropout):
raise Exception("Only Dropout objects can be converted this way!")
kwargs = dropout_obj.get_config()
# alternatively can we use "get_config" in combination with (Layer.__init__)allowed_kwargs?
return cls(**kwargs)
# backcompatibility
ConvDNAQuantitySplines = ConvSplines
AVAILABLE = ["InputDNA", "ConvDNA",
"InputRNA", "ConvRNA",
"InputCodon", "ConvCodon",
"InputAA", "ConvAA",
"InputRNAStructure", "ConvRNAStructure",
"InputSplines", "ConvSplines",
"GlobalSumPooling1D",
"SplineWeight1D",
"SplineT",
# legacy
"InputDNAQuantitySplines", "InputDNAQuantity",
"GAMSmooth", "ConvDNAQuantitySplines", "BiDropout"]
def get(name):
return get_from_module(name, globals())
|
codervince/flashingredlight | refs/heads/master | env/lib/python2.7/site-packages/flask/testsuite/ext.py | 563 | # -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
|
MLAB-project/data-uploader | refs/heads/master | dataUpload.py | 2 | #!/usr/bin/python
import os
import sys
import optparse
from src import DataUpload
import time
def main():
arg = sys.argv
if len(arg) != 2:
print "Usage: dataUpload [radio-observer configFile]"
sys.exit(1)
else:
du = DataUpload.dataUpload(arg)
while True:
last_start = time.time()
try:
du.start()
except Exception, e:
print "CHYBA: "
print e
wait_time = (last_start + 3600 * 100 - time.time())
if wait_time < time.time():
print "wait", wait_time, "ms \n\n\n"
time.sleep(wait_time/1000)
if __name__ == '__main__':
main()
|
analogue/bravado-core | refs/heads/master | bravado_core/marshal.py | 7 | from six import iteritems
from bravado_core import formatter, schema
from bravado_core.exception import SwaggerMappingError
from bravado_core.model import is_model, MODEL_MARKER
from bravado_core.schema import is_dict_like
from bravado_core.schema import is_list_like
from bravado_core.schema import SWAGGER_PRIMITIVES
from bravado_core.schema import get_spec_for_prop
def marshal_schema_object(swagger_spec, schema_object_spec, value):
"""Marshal the value using the given schema object specification.
Marshaling includes:
- transform the value according to 'format' if available
- return the value in a form suitable for 'on-the-wire' transmission
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type schema_object_spec: dict
:type value: int, long, string, unicode, boolean, list, dict, Model type
:return: marshaled value
:rtype: int, long, string, unicode, boolean, list, dict
:raises: SwaggerMappingError
"""
deref = swagger_spec.deref
schema_object_spec = deref(schema_object_spec)
obj_type = schema_object_spec['type']
if obj_type in SWAGGER_PRIMITIVES:
return marshal_primitive(swagger_spec, schema_object_spec, value)
if obj_type == 'array':
return marshal_array(swagger_spec, schema_object_spec, value)
if is_model(swagger_spec, schema_object_spec):
# Allow models to be passed in as dicts for flexibility.
if is_dict_like(value):
return marshal_object(swagger_spec, schema_object_spec, value)
# It is important that the 'model' check comes before 'object' check
# below. Model specs are of type 'object' but also have a MODEL_MARKER
# key for identification.
return marshal_model(swagger_spec, schema_object_spec, value)
if obj_type == 'object':
return marshal_object(swagger_spec, schema_object_spec, value)
if obj_type == 'file':
return value
raise SwaggerMappingError('Unknown type {0} for value {1}'.format(
obj_type, value))
def marshal_primitive(swagger_spec, primitive_spec, value):
"""Marshal a python primitive type into a jsonschema primitive.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type primitive_spec: dict
:type value: int, long, float, boolean, string, unicode, or an object
based on 'format'
:rtype: int, long, float, boolean, string, unicode, etc
:raises: SwaggerMappingError
"""
default_used = False
if value is None and schema.has_default(swagger_spec, primitive_spec):
default_used = True
value = schema.get_default(swagger_spec, primitive_spec)
if value is None and schema.is_required(swagger_spec, primitive_spec):
raise SwaggerMappingError(
'Spec {0} is a required value'.format(primitive_spec))
if not default_used:
value = formatter.to_wire(swagger_spec, primitive_spec, value)
return value
def marshal_array(swagger_spec, array_spec, array_value):
"""Marshal a jsonschema type of 'array' into a json-like list.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type array_spec: dict
:type array_value: list
:rtype: list
:raises: SwaggerMappingError
"""
if not is_list_like(array_value):
raise SwaggerMappingError('Expected list like type for {0}: {1}'
.format(type(array_value), array_value))
items_spec = swagger_spec.deref(array_spec).get('items')
return [
marshal_schema_object(
swagger_spec,
items_spec,
element)
for element in array_value
]
def marshal_object(swagger_spec, object_spec, object_value):
"""Marshal a python dict to json dict.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type object_spec: dict
:type object_value: dict
:rtype: dict
:raises: SwaggerMappingError
"""
deref = swagger_spec.deref
if not is_dict_like(object_value):
raise SwaggerMappingError('Expected dict like type for {0}:{1}'.format(
type(object_value), object_value))
result = {}
for k, v in iteritems(object_value):
# Values cannot be None - skip them entirely!
if v is None:
continue
prop_spec = get_spec_for_prop(
swagger_spec, deref(object_spec), object_value, k)
if prop_spec:
result[k] = marshal_schema_object(swagger_spec, prop_spec, v)
else:
# Don't marshal when a spec is not available - just pass through
result[k] = v
return result
def marshal_model(swagger_spec, model_spec, model_value):
"""Marshal a Model instance into a json-like dict.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type model_spec: dict
:type model_value: Model instance
:rtype: dict
:raises: SwaggerMappingError
"""
deref = swagger_spec.deref
model_name = deref(model_spec).get(MODEL_MARKER)
model_type = swagger_spec.definitions.get(model_name, None)
if model_type is None:
raise SwaggerMappingError('Unknown model {0}'.format(model_name))
if not isinstance(model_value, model_type):
raise SwaggerMappingError(
'Expected model of type {0} for {1}:{2}'
.format(model_name, type(model_value), model_value))
# just convert the model to a dict and feed into `marshal_object` because
# models are essentially 'type':'object' when marshaled
attr_names = dir(model_value)
object_value = dict(
(attr_name, getattr(model_value, attr_name))
for attr_name in attr_names)
return marshal_object(swagger_spec, model_spec, object_value)
|
Xeralux/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/garbage_collection_test.py | 81 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests which set DEBUG_SAVEALL and assert no garbage was created.
This flag seems to be sticky, so these tests have been isolated for now.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class NoReferenceCycleTests(test_util.TensorFlowTestCase):
@test_util.assert_no_garbage_created
def testEagerResourceVariables(self):
with context.eager_mode():
resource_variable_ops.ResourceVariable(1.0, name="a")
@test_util.assert_no_garbage_created
def testTensorArrays(self):
with context.eager_mode():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
if __name__ == "__main__":
test.main()
|
dbremner/bite-project | refs/heads/master | deps/gdata-python-client/src/gdata/tlslite/api.py | 359 | """Import this module for easy access to TLS Lite objects.
The TLS Lite API consists of classes, functions, and variables spread
throughout this package. Instead of importing them individually with::
from tlslite.TLSConnection import TLSConnection
from tlslite.HandshakeSettings import HandshakeSettings
from tlslite.errors import *
.
.
It's easier to do::
from tlslite.api import *
This imports all the important objects (TLSConnection, Checker,
HandshakeSettings, etc.) into the global namespace. In particular, it
imports::
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded,
gmpyLoaded, pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey,
parseAsPublicKey, parsePrivateKey
"""
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
try:
import twisted
del(twisted)
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
except ImportError:
pass
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \
pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \
parseAsPublicKey, parsePrivateKey
|
yotchang4s/cafebabepy | refs/heads/develop | src/main/python/idlelib/idle_test/test_configdialog.py | 3 | """Test idlelib.configdialog.
Half the class creates dialog, half works with user customizations.
Coverage: 46% just by creating dialog, 56% with current tests.
"""
from idlelib.configdialog import ConfigDialog, idleConf # test import
from test.support import requires
requires('gui')
from tkinter import Tk
import unittest
import idlelib.config as config
# Tests should not depend on fortuitous user configurations.
# They must not affect actual user .cfg files.
# Use solution from test_config: empty parsers with no filename.
usercfg = idleConf.userCfg
testcfg = {
'main': config.IdleUserConfParser(''),
'highlight': config.IdleUserConfParser(''),
'keys': config.IdleUserConfParser(''),
'extensions': config.IdleUserConfParser(''),
}
# ConfigDialog.changedItems is a 3-level hierarchical dictionary of
# pending changes that mirrors the multilevel user config dict.
# For testing, record args in a list for comparison with expected.
changes = []
class TestDialog(ConfigDialog):
def AddChangedItem(self, *args):
changes.append(args)
def setUpModule():
global root, configure
idleConf.userCfg = testcfg
root = Tk()
root.withdraw()
configure = TestDialog(root, 'Test', _utest=True)
def tearDownModule():
global root, configure
idleConf.userCfg = testcfg
configure.remove_var_callbacks()
del configure
root.update_idletasks()
root.destroy()
del root
class FontTabTest(unittest.TestCase):
def setUp(self):
changes.clear()
def test_font(self):
# Set values guaranteed not to be defaults.
dfont = idleConf.GetFont(root, 'main', 'EditorWindow')
dsize = str(dfont[1])
dbold = dfont[2] == 'bold'
configure.fontName.set('Test Font')
expected = [
('main', 'EditorWindow', 'font', 'Test Font'),
('main', 'EditorWindow', 'font-size', dsize),
('main', 'EditorWindow', 'font-bold', dbold)]
self.assertEqual(changes, expected)
changes.clear()
configure.fontSize.set(20)
expected = [
('main', 'EditorWindow', 'font', 'Test Font'),
('main', 'EditorWindow', 'font-size', '20'),
('main', 'EditorWindow', 'font-bold', dbold)]
self.assertEqual(changes, expected)
changes.clear()
configure.fontBold.set(not dbold)
expected = [
('main', 'EditorWindow', 'font', 'Test Font'),
('main', 'EditorWindow', 'font-size', '20'),
('main', 'EditorWindow', 'font-bold', not dbold)]
self.assertEqual(changes, expected)
#def test_sample(self): pass # TODO
def test_tabspace(self):
configure.spaceNum.set(6)
self.assertEqual(changes, [('main', 'Indent', 'num-spaces', 6)])
class HighlightTest(unittest.TestCase):
def setUp(self):
changes.clear()
#def test_colorchoose(self): pass # TODO
class KeysTest(unittest.TestCase):
def setUp(self):
changes.clear()
class GeneralTest(unittest.TestCase):
def setUp(self):
changes.clear()
def test_startup(self):
configure.radioStartupEdit.invoke()
self.assertEqual(changes,
[('main', 'General', 'editor-on-startup', 1)])
def test_autosave(self):
configure.radioSaveAuto.invoke()
self.assertEqual(changes, [('main', 'General', 'autosave', 1)])
def test_editor_size(self):
configure.entryWinHeight.insert(0, '1')
self.assertEqual(changes, [('main', 'EditorWindow', 'height', '140')])
changes.clear()
configure.entryWinWidth.insert(0, '1')
self.assertEqual(changes, [('main', 'EditorWindow', 'width', '180')])
#def test_help_sources(self): pass # TODO
if __name__ == '__main__':
unittest.main(verbosity=2)
|
robbrockbank/calicoctl | refs/heads/master | calico_containers/tests/st/__init__.py | 12133432 | |
gtsiokos/diogenis | refs/heads/master | common/tests.py | 6666 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
eerimoq/asn1tools | refs/heads/master | tests/files/time_types_pp.py | 1 | EXPECTED = {'Foo': {'extensibility-implied': False,
'imports': {},
'object-classes': {},
'object-sets': {},
'types': {'B': {'type': 'DATE'},
'C': {'type': 'TIME-OF-DAY'},
'D': {'type': 'DATE-TIME'}},
'values': {}}}
|
KohlsTechnology/ansible | refs/heads/devel | test/units/cli/test_cli.py | 57 | # (c) 2017, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from units.mock.loader import DictDataLoader
from ansible.release import __version__
from ansible.parsing import vault
from ansible import cli
class TestCliVersion(unittest.TestCase):
def test_version(self):
ver = cli.CLI.version('ansible-cli-test')
self.assertIn('ansible-cli-test', ver)
self.assertIn('python version', ver)
def test_version_info(self):
version_info = cli.CLI.version_info()
self.assertEqual(version_info['string'], __version__)
def test_version_info_gitinfo(self):
version_info = cli.CLI.version_info(gitinfo=True)
self.assertIn('python version', version_info['string'])
class TestCliBuildVaultIds(unittest.TestCase):
def setUp(self):
self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
self.mock_isatty = self.tty_patcher.start()
def tearDown(self):
self.tty_patcher.stop()
def test(self):
res = cli.CLI.build_vault_ids(['foo@bar'])
self.assertEqual(res, ['foo@bar'])
def test_create_new_password_no_vault_id(self):
res = cli.CLI.build_vault_ids([], create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_no_vault_id_no_auto_prompt(self):
res = cli.CLI.build_vault_ids([], auto_prompt=False, create_new_password=True)
self.assertEqual(res, [])
def test_no_vault_id_no_auto_prompt(self):
# similate 'ansible-playbook site.yml' with out --ask-vault-pass, should not prompt
res = cli.CLI.build_vault_ids([], auto_prompt=False)
self.assertEqual(res, [])
def test_no_vault_ids_auto_prompt(self):
# create_new_password=False
# simulate 'ansible-vault edit encrypted.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_no_vault_ids_auto_prompt_ask_vault_pass(self):
# create_new_password=False
# simulate 'ansible-vault edit --ask-vault-pass encrypted.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True, ask_vault_pass=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_auto_prompt(self):
# simulate 'ansible-vault encrypt somefile.yml'
res = cli.CLI.build_vault_ids([], auto_prompt=True, create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_no_vault_id_ask_vault_pass(self):
res = cli.CLI.build_vault_ids([], ask_vault_pass=True,
create_new_password=True)
self.assertEqual(res, ['default@prompt_ask_vault_pass'])
def test_create_new_password_with_vault_ids(self):
res = cli.CLI.build_vault_ids(['foo@bar'], create_new_password=True)
self.assertEqual(res, ['foo@bar'])
def test_create_new_password_no_vault_ids_password_files(self):
res = cli.CLI.build_vault_ids([], vault_password_files=['some-password-file'],
create_new_password=True)
self.assertEqual(res, ['default@some-password-file'])
def test_everything(self):
res = cli.CLI.build_vault_ids(['blip@prompt', 'baz@prompt_ask_vault_pass',
'some-password-file', 'qux@another-password-file'],
vault_password_files=['yet-another-password-file',
'one-more-password-file'],
ask_vault_pass=True,
create_new_password=True,
auto_prompt=False)
self.assertEqual(set(res), set(['blip@prompt', 'baz@prompt_ask_vault_pass',
'default@prompt_ask_vault_pass',
'some-password-file', 'qux@another-password-file',
'default@yet-another-password-file',
'default@one-more-password-file']))
class TestCliSetupVaultSecrets(unittest.TestCase):
def setUp(self):
self.fake_loader = DictDataLoader({})
self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
self.mock_isatty = self.tty_patcher.start()
self.display_v_patcher = patch('ansible.cli.display.verbosity', return_value=6)
self.mock_display_v = self.display_v_patcher.start()
cli.display.verbosity = 5
def tearDown(self):
self.tty_patcher.stop()
self.display_v_patcher.stop()
cli.display.verbosity = 0
def test(self):
res = cli.CLI.setup_vault_secrets(None, None, auto_prompt=False)
self.assertIsInstance(res, list)
@patch('ansible.cli.get_file_vault_secret')
def test_password_file(self, mock_file_secret):
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='file1',
filename=filename)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['secret1@%s' % filename, 'secret2'],
vault_password_files=[filename])
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['secret1'])
self.assertIn('secret1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'file1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt'],
ask_vault_pass=True,
auto_prompt=False)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['prompt1'])
self.assertIn('prompt1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_no_tty(self, mock_prompt_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1',
name='bytes_should_be_prompt1_password',
spec=vault.PromptVaultSecret)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt'],
ask_vault_pass=True,
auto_prompt=False)
self.assertIsInstance(res, list)
self.assertEqual(len(res), 2)
matches = vault.match_secrets(res, ['prompt1'])
self.assertIn('prompt1', [x[0] for x in matches])
self.assertEquals(len(matches), 1)
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_no_tty_and_password_file(self, mock_prompt_secret, mock_file_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='file1',
filename=filename)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt', 'file1@/dev/null/secret'],
ask_vault_pass=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['file1'])
self.assertIn('file1', [x[0] for x in matches])
self.assertNotIn('prompt1', [x[0] for x in matches])
match = matches[0][1]
self.assertEqual(match.bytes, b'file1_password')
def _assert_ids(self, vault_id_names, res, password=b'prompt1_password'):
self.assertIsInstance(res, list)
len_ids = len(vault_id_names)
matches = vault.match_secrets(res, vault_id_names)
self.assertEqual(len(res), len_ids, 'len(res):%s does not match len_ids:%s' % (len(res), len_ids))
self.assertEqual(len(matches), len_ids)
for index, prompt in enumerate(vault_id_names):
self.assertIn(prompt, [x[0] for x in matches])
# simple mock, same password/prompt for each mock_prompt_secret
self.assertEqual(matches[index][1].bytes, password)
@patch('ansible.cli.PromptVaultSecret')
def test_multiple_prompts(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt',
'prompt2@prompt'],
ask_vault_pass=False)
vault_id_names = ['prompt1', 'prompt2']
self._assert_ids(vault_id_names, res)
@patch('ansible.cli.PromptVaultSecret')
def test_multiple_prompts_and_ask_vault_pass(self, mock_prompt_secret):
self.mock_isatty.return_value = False
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='prompt1')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['prompt1@prompt',
'prompt2@prompt',
'prompt3@prompt_ask_vault_pass'],
ask_vault_pass=True)
# We provide some vault-ids and secrets, so auto_prompt shouldn't get triggered,
# so there is
vault_id_names = ['prompt1', 'prompt2', 'prompt3', 'default']
self._assert_ids(vault_id_names, res)
@patch('ansible.cli.C')
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_default_file_vault(self, mock_prompt_secret,
mock_file_secret,
mock_config):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
vault_id='default')
mock_config.DEFAULT_VAULT_PASSWORD_FILE = '/dev/null/faux/vault_password_file'
mock_config.DEFAULT_VAULT_IDENTITY = 'default'
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=False)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['default'])
# --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
# if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
self.assertEqual(matches[0][1].bytes, b'file1_password')
self.assertEqual(len(matches), 1)
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=True,
auto_prompt=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['default'])
self.assertEqual(matches[0][1].bytes, b'file1_password')
self.assertEqual(matches[1][1].bytes, b'prompt1_password')
self.assertEqual(len(matches), 2)
@patch('ansible.cli.get_file_vault_secret')
@patch('ansible.cli.PromptVaultSecret')
def test_default_file_vault_identity_list(self, mock_prompt_secret,
mock_file_secret):
default_vault_ids = ['some_prompt@prompt',
'some_file@/dev/null/secret']
mock_prompt_secret.return_value = MagicMock(bytes=b'some_prompt_password',
vault_id='some_prompt')
filename = '/dev/null/secret'
mock_file_secret.return_value = MagicMock(bytes=b'some_file_password',
vault_id='some_file',
filename=filename)
vault_ids = default_vault_ids
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=vault_ids,
create_new_password=False,
ask_vault_pass=True)
self.assertIsInstance(res, list)
matches = vault.match_secrets(res, ['some_file'])
# --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
# if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
self.assertEqual(matches[0][1].bytes, b'some_file_password')
matches = vault.match_secrets(res, ['some_prompt'])
self.assertEqual(matches[0][1].bytes, b'some_prompt_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_just_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=False,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['default'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=[],
create_new_password=True,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['default'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='some_vault_id')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt'],
create_new_password=True,
ask_vault_pass=False)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt_ask_vault_pass'],
create_new_password=True,
ask_vault_pass=False)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
@patch('ansible.cli.PromptVaultSecret')
def test_prompt_new_password_vault_id_prompt_ask_vault_pass_ask_vault_pass(self, mock_prompt_secret):
mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
vault_id='default')
res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
vault_ids=['some_vault_id@prompt_ask_vault_pass'],
create_new_password=True,
ask_vault_pass=True)
self.assertIsInstance(res, list)
match = vault.match_secrets(res, ['some_vault_id'])[0][1]
self.assertEqual(match.bytes, b'prompt1_password')
|
allen501pc/phpmyadmin | refs/heads/master | doc/conf.py | 1 | # -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0.0-dev'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = u'2012, The phpMyAdmin devel team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
mgedmin/ansible | refs/heads/devel | lib/ansible/modules/packaging/language/__init__.py | 12133432 | |
sunny94/temp | refs/heads/iss8501_parsing | sympy/polys/tests/__init__.py | 12133432 | |
cactusbin/nyt | refs/heads/master | matplotlib/doc/users/plotting/examples/annotate_simple_coord02.py | 6 |
import matplotlib.pyplot as plt
plt.figure(figsize=(3,2))
ax=plt.axes([0.1, 0.1, 0.8, 0.7])
an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data",
va="center", ha="center",
bbox=dict(boxstyle="round", fc="w"))
an2 = ax.annotate("Test 2", xy=(0.5, 1.), xycoords=an1,
xytext=(0.5,1.1), textcoords=(an1, "axes fraction"),
va="bottom", ha="center",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
plt.show()
|
overviewer/Minecraft-Overviewer | refs/heads/master | overviewer_core/optimizeimages.py | 4 | # This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
class Optimizer:
binaryname = ""
binarynames = []
def __init__(self):
raise NotImplementedError("I can't let you do that, Dave.")
def optimize(self, img):
raise NotImplementedError("I can't let you do that, Dave.")
def fire_and_forget(self, args):
subprocess.check_call(args)
def check_availability(self):
path = os.environ.get("PATH").split(os.pathsep)
def exists_in_path(prog):
result = [x for x in path if os.path.exists(os.path.join(x, prog))]
return len(result) != 0
binaries = self.binarynames + [x + ".exe" for x in self.binarynames]
for b in binaries:
if (exists_in_path(b)):
self.binaryname = b
break
else:
raise Exception("Optimization programs '%s' were not found!" %
binaries)
def is_crusher(self):
"""Should return True if the optimization is lossless, i.e. none of the
actual image data will be changed."""
raise NotImplementedError("I'm so abstract I can't even say whether "
"I'm a crusher.")
class NonAtomicOptimizer(Optimizer):
def cleanup(self, img):
os.remove(img)
os.rename(img + ".tmp", img)
def fire_and_forget(self, args, img):
subprocess.check_call(args)
self.cleanup(img)
class PNGOptimizer:
def __init__(self):
raise NotImplementedError("I can't let you do that, Dave.")
class JPEGOptimizer:
def __init__(self):
raise NotImplementedError("I can't let you do that, Dave.")
class pngnq(NonAtomicOptimizer, PNGOptimizer):
binarynames = ["pngnq-s9", "pngnq"]
def __init__(self, sampling=3, dither="n"):
if sampling < 1 or sampling > 10:
raise Exception("Invalid sampling value '%d' for pngnq!" %
sampling)
if dither not in ["n", "f"]:
raise Exception("Invalid dither method '%s' for pngnq!" % dither)
self.sampling = sampling
self.dither = dither
def optimize(self, img):
if img.endswith(".tmp"):
extension = ".tmp"
else:
extension = ".png.tmp"
args = [self.binaryname, "-s", str(self.sampling), "-f", "-e",
extension, img]
# Workaround for poopbuntu 12.04 which ships an old broken pngnq
if self.dither != "n":
args.insert(1, "-Q")
args.insert(2, self.dither)
NonAtomicOptimizer.fire_and_forget(self, args, img)
def is_crusher(self):
return False
class pngcrush(NonAtomicOptimizer, PNGOptimizer):
binarynames = ["pngcrush"]
# really can't be bothered to add some interface for all
# the pngcrush options, it sucks anyway
def __init__(self, brute=False):
self.brute = brute
def optimize(self, img):
args = [self.binaryname, img, img + ".tmp"]
if self.brute: # Was the user an idiot?
args.insert(1, "-brute")
NonAtomicOptimizer.fire_and_forget(self, args, img)
def is_crusher(self):
return True
class optipng(Optimizer, PNGOptimizer):
binarynames = ["optipng"]
def __init__(self, olevel=2):
self.olevel = olevel
def optimize(self, img):
Optimizer.fire_and_forget(self, [self.binaryname, "-o" +
str(self.olevel), "-quiet", img])
def is_crusher(self):
return True
class advpng(Optimizer, PNGOptimizer):
binarynames = ["advpng"]
crusher = True
def __init__(self, olevel=3):
self.olevel = olevel
def optimize(self, img):
Optimizer.fire_and_forget(self, [self.binaryname, "-z" +
str(self.olevel), "-q", img])
def is_crusher(self):
return True
class jpegoptim(Optimizer, JPEGOptimizer):
binarynames = ["jpegoptim"]
crusher = True
quality = None
target_size = None
def __init__(self, quality=None, target_size=None):
if quality is not None:
if quality < 0 or quality > 100:
raise Exception("Invalid target quality %d for jpegoptim" %
quality)
self.quality = quality
if target_size is not None:
self.target_size = target_size
def optimize(self, img):
args = [self.binaryname, "-q", "-p"]
if self.quality is not None:
args.append("-m" + str(self.quality))
if self.target_size is not None:
args.append("-S" + str(self.target_size))
args.append(img)
Optimizer.fire_and_forget(self, args)
def is_crusher(self):
# Technically, optimisation is lossless if input image quality
# is below target quality, but this is irrelevant in this case
if (self.quality is not None) or (self.target_size is not None):
return False
else:
return True
class oxipng(Optimizer, PNGOptimizer):
binarynames = ["oxipng"]
def __init__(self, olevel=2, threads=1):
if olevel > 6:
raise Exception("olevel should be between 0 and 6 inclusive")
if threads < 1:
raise Exception("threads needs to be at least 1")
self.olevel = olevel
self.threads = threads
def optimize(self, img):
Optimizer.fire_and_forget(self, [self.binaryname, "-o" +
str(self.olevel), "-q", "-t" +
str(self.threads), img])
def is_crusher(self):
return True
def optimize_image(imgpath, imgformat, optimizers):
for opt in optimizers:
if imgformat == 'png':
if isinstance(opt, PNGOptimizer):
opt.optimize(imgpath)
elif imgformat == 'jpg':
if isinstance(opt, JPEGOptimizer):
opt.optimize(imgpath)
|
mcr/ietfdb | refs/heads/master | django/contrib/flatpages/__init__.py | 12133432 | |
stacywsmith/ansible | refs/heads/devel | lib/ansible/modules/messaging/__init__.py | 12133432 | |
felliott/osf.io | refs/heads/develop | website/reviews/__init__.py | 12133432 | |
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/django/conf/locale/ru/__init__.py | 12133432 | |
Sarah-Alsinan/muypicky | refs/heads/master | lib/python3.6/site-packages/django/conf/locale/en/__init__.py | 12133432 | |
vismartltd/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/social_facebook/courses/__init__.py | 128 | """
Courses API
"""
|
avrem/ardupilot | refs/heads/pmu40 | Tools/LogAnalyzer/tests/TestVCC.py | 73 | from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestVCC(Test):
'''test for VCC within recommendations, or abrupt end to log in flight'''
def __init__(self):
Test.__init__(self)
self.name = "VCC"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if not "CURR" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No CURR log data"
return
# just a naive min/max test for now
try:
vccMin = logdata.channels["CURR"]["Vcc"].min()
vccMax = logdata.channels["CURR"]["Vcc"].max()
except KeyError as e:
vccMin = logdata.channels["POWR"]["Vcc"].min()
vccMax = logdata.channels["POWR"]["Vcc"].max()
vccMin *= 1000
vccMax *= 1000
vccDiff = vccMax - vccMin;
vccMinThreshold = 4.6 * 1000;
vccMaxDiff = 0.3 * 1000;
if vccDiff > vccMaxDiff:
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "VCC min/max diff %sv, should be <%sv" % (vccDiff/1000.0, vccMaxDiff/1000.0)
elif vccMin < vccMinThreshold:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "VCC below minimum of %sv (%sv)" % (repr(vccMinThreshold/1000.0),repr(vccMin/1000.0))
|
onfido/dependencies-resolver | refs/heads/master | tests/dependency_handler/test_validation.py | 1 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import pytest
from dependencies_resolver.dependency_handler.validation import validate_data
from jsonschema import ValidationError
mocked_dependencies = {
"repository": "onfido-blobs",
"dependencies": [
{
"name": "model.pb",
"version": "latest",
"location": "/tmp/model"
}
]
}
def test_bad_repository_url():
"""A test to check if the repository is a valid S3 bucket name.
A valid S3 bucket name should contain 's3://' as the prefix, to make it
explicit that the remote repository is S3 bucket.
:return: Nothing, an exception is being raised.
"""
with pytest.raises(ValidationError):
validate_data(mocked_dependencies)
def test_location_without_trailing_slash():
"""A test to check if the location property of the dependency is ending
with a trailing slash, as it should ALWAYS be a directory.
:return: Nothing, an exception is being raised.
"""
mocked_dependencies['repository'] = 's3://' + mocked_dependencies[
'repository'][5:]
with pytest.raises(ValidationError):
validate_data(mocked_dependencies)
|
MSOpenTech/python-social-auth | refs/heads/master | social/tests/backends/test_nationbuilder.py | 77 | import json
from social.tests.backends.oauth import OAuth2Test
class NationBuilderOAuth2Test(OAuth2Test):
backend_path = 'social.backends.nationbuilder.NationBuilderOAuth2'
user_data_url = 'https://foobar.nationbuilder.com/api/v1/people/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'created_at': 1422937981,
'expires_in': 2592000
})
user_data_body = json.dumps({
'person': {
'twitter_followers_count': None,
'last_name': 'Bar',
'rule_violations_count': 0,
'linkedin_id': None,
'recruiter_id': None,
'membership_expires_at': None,
'donations_raised_count': 0,
'last_contacted_at': None,
'prefix': None,
'profile_content_html': None,
'email4': None,
'email2': None,
'availability': None,
'occupation': None,
'user_submitted_address': None,
'could_vote_status': None,
'state_upper_district': None,
'salesforce_id': None,
'van_id': None,
'phone_time': None,
'profile_content': None,
'auto_import_id': None,
'parent_id': None,
'email4_is_bad': False,
'twitter_updated_at': None,
'email3_is_bad': False,
'bio': None,
'party_member': None,
'unsubscribed_at': None,
'fax_number': None,
'last_contacted_by': None,
'active_customer_expires_at': None,
'federal_donotcall': False,
'warnings_count': 0,
'first_supporter_at': '2015-02-02T19:30:28-08:00',
'previous_party': None,
'donations_raised_amount_this_cycle_in_cents': 0,
'call_status_name': None,
'marital_status': None,
'facebook_updated_at': None,
'donations_count': 0,
'note_updated_at': None,
'closed_invoices_count': None,
'profile_headline': None,
'fire_district': None,
'mobile_normalized': None,
'import_id': None,
'last_call_id': None,
'donations_raised_amount_in_cents': 0,
'facebook_address': None,
'is_profile_private': False,
'last_rule_violation_at': None,
'sex': None,
'full_name': 'Foo Bar',
'last_donated_at': None,
'donations_pledged_amount_in_cents': 0,
'primary_email_id': 1,
'media_market_name': None,
'capital_amount_in_cents': 500,
'datatrust_id': None,
'precinct_code': None,
'email3': None,
'religion': None,
'first_prospect_at': None,
'judicial_district': None,
'donations_count_this_cycle': 0,
'work_address': None,
'is_twitter_follower': False,
'email1': 'foobar@gmail.com',
'email': 'foobar@gmail.com',
'contact_status_name': None,
'mobile_opt_in': True,
'twitter_description': None,
'parent': None,
'tags': [],
'first_volunteer_at': None,
'inferred_support_level': None,
'banned_at': None,
'first_invoice_at': None,
'donations_raised_count_this_cycle': 0,
'is_donor': False,
'twitter_location': None,
'email1_is_bad': False,
'legal_name': None,
'language': None,
'registered_at': None,
'call_status_id': None,
'last_invoice_at': None,
'school_sub_district': None,
'village_district': None,
'twitter_name': None,
'membership_started_at': None,
'subnations': [],
'meetup_address': None,
'author_id': None,
'registered_address': None,
'external_id': None,
'twitter_login': None,
'inferred_party': None,
'spent_capital_amount_in_cents': 0,
'suffix': None,
'mailing_address': None,
'is_leaderboardable': True,
'twitter_website': None,
'nbec_guid': None,
'city_district': None,
'church': None,
'is_profile_searchable': True,
'employer': None,
'is_fundraiser': False,
'email_opt_in': True,
'recruits_count': 0,
'email2_is_bad': False,
'county_district': None,
'recruiter': None,
'twitter_friends_count': None,
'facebook_username': None,
'active_customer_started_at': None,
'pf_strat_id': None,
'locale': None,
'twitter_address': None,
'is_supporter': True,
'do_not_call': False,
'profile_image_url_ssl': 'https://d3n8a8pro7vhmx.cloudfront.net'
'/assets/icons/buddy.png',
'invoices_amount_in_cents': None,
'username': None,
'donations_amount_in_cents': 0,
'is_volunteer': False,
'civicrm_id': None,
'supranational_district': None,
'precinct_name': None,
'invoice_payments_amount_in_cents': None,
'work_phone_number': None,
'phone': '213.394.4623',
'received_capital_amount_in_cents': 500,
'primary_address': None,
'is_possible_duplicate': False,
'invoice_payments_referred_amount_in_cents': None,
'donations_amount_this_cycle_in_cents': 0,
'priority_level': None,
'first_fundraised_at': None,
'phone_normalized': '2133944623',
'rnc_regid': None,
'twitter_id': None,
'birthdate': None,
'mobile': None,
'federal_district': None,
'donations_to_raise_amount_in_cents': 0,
'support_probability_score': None,
'invoices_count': None,
'nbec_precinct_code': None,
'website': None,
'closed_invoices_amount_in_cents': None,
'home_address': None,
'school_district': None,
'support_level': None,
'demo': None,
'children_count': 0,
'updated_at': '2015-02-02T19:30:28-08:00',
'membership_level_name': None,
'billing_address': None,
'is_ignore_donation_limits': False,
'signup_type': 0,
'precinct_id': None,
'rnc_id': None,
'id': 2,
'ethnicity': None,
'is_survey_question_private': False,
'middle_name': None,
'author': None,
'last_fundraised_at': None,
'state_file_id': None,
'note': None,
'submitted_address': None,
'support_level_changed_at': None,
'party': None,
'contact_status_id': None,
'outstanding_invoices_amount_in_cents': None,
'page_slug': None,
'outstanding_invoices_count': None,
'first_recruited_at': None,
'county_file_id': None,
'first_name': 'Foo',
'facebook_profile_url': None,
'city_sub_district': None,
'has_facebook': False,
'is_deceased': False,
'labour_region': None,
'state_lower_district': None,
'dw_id': None,
'created_at': '2015-02-02T19:30:28-08:00',
'is_prospect': False,
'priority_level_changed_at': None,
'is_mobile_bad': False,
'overdue_invoices_count': None,
'ngp_id': None,
'do_not_contact': False,
'first_donated_at': None,
'turnout_probability_score': None
},
'precinct': None
})
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_NATIONBUILDER_SLUG': 'foobar'
})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_NATIONBUILDER_SLUG': 'foobar'
})
self.do_partial_pipeline()
|
NeCTAR-RC/swift | refs/heads/nectar/icehouse | swift/common/middleware/catch_errors.py | 3 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from swift.common.swob import Request, HTTPServerError
from swift.common.utils import get_logger, generate_trans_id
from swift.common.wsgi import WSGIContext
class CatchErrorsContext(WSGIContext):
def __init__(self, app, logger, trans_id_suffix=''):
super(CatchErrorsContext, self).__init__(app)
self.logger = logger
self.trans_id_suffix = trans_id_suffix
def handle_request(self, env, start_response):
trans_id = generate_trans_id(self.trans_id_suffix)
env['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
try:
# catch any errors in the pipeline
resp = self._app_call(env)
except: # noqa
self.logger.exception(_('Error: An error occurred'))
resp = HTTPServerError(request=Request(env),
body='An error occurred',
content_type='text/plain')
resp.headers['X-Trans-Id'] = trans_id
return resp(env, start_response)
# make sure the response has the trans_id
if self._response_headers is None:
self._response_headers = []
self._response_headers.append(('X-Trans-Id', trans_id))
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
class CatchErrorMiddleware(object):
"""
Middleware that provides high-level error handling and ensures that a
transaction id will be set for every request.
"""
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='catch-errors')
self.trans_id_suffix = conf.get('trans_id_suffix', '')
def __call__(self, env, start_response):
"""
If used, this should be the first middleware in pipeline.
"""
context = CatchErrorsContext(self.app,
self.logger,
self.trans_id_suffix)
return context.handle_request(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def except_filter(app):
return CatchErrorMiddleware(app, conf)
return except_filter
|
ermo/privateer_wcu | refs/heads/master | modules/missions/total_jump.py | 1 | import Briefing
import Director
import VS
import debug
import faction_ships
import launch
import unit
import universe
import vsrandom
class total_jump:
def __init__(self):
VS.SetDifficulty(.1)
self.lasttime=-1000
self.waittime=5.0
def launch_new_wave(self):
un = VS.getPlayer()
if (vsrandom.randrange(0,4)==0):
if (un):
currentsystem = VS.getSystemFile()
numadj=VS.GetNumAdjacentSystems(currentsystem)
if (numadj):
cursys=VS.GetAdjacentSystem(currentsystem,vsrandom.randrange(0,numadj))
else:
cursys = 'enigma_sector/heavens_gate'
debug.info("TJ: jumping to "+cursys)
un.JumpTo(cursys)
else:
debug.warn("TJ: jumping to [ERROR: you are null]")
return
else:
siglist=universe.significantUnits()
if len(siglist)==0:
debug.info("TJ: siglist empty")
return
sig=siglist[vsrandom.randrange(0,len(siglist))]
if (not sig):
debug.info("TJ: sig null")
return
debug.info("TJ: autopiloting to "+sig.getName())
un.AutoPilotTo(sig,True)
un.SetTarget(sig)
## side = vsrandom.randrange(0,2)
## faction="confed"
## ai = vsrandom.randrange(0,2)
## if (ai==0):
## ai = "printhello.py"
## else:
## ai = "default"
## if (side==0):
## faction=faction_ships.get_enemy_of("confed")
## else:
## faction=faction_ships.get_friend_of("confed")
## launched = launch.launch_wave_around_unit ("Shadow",faction,faction_ships.getRandomFighter(faction),ai,vsrandom.randrange(1,10),100.0,2000.0,VS.getPlayer(),'')
## if (vsrandom.randrange(0,10)==0):
## launch.launch_wave_around_unit ("ShadowCap",faction,faction_ships.getRandomCapitol(faction),ai,1,2000.0,4000.0,VS.getPlayer(),'')
def Execute (self):
# un=VS.getUnit(0);
# i=0
# while (un):
# debug.info(un.getName())
# i+=1
# un= VS.getUnit(i)
time = VS.GetGameTime()
if (time-self.lasttime>self.waittime):
self.launch_new_wave()
self.waittime=vsrandom.randrange(10.0,30.0)
self.lasttime=time
def initbriefing(self):
debug.info("ending briefing")
def loopbriefing(self):
debug.info("loop briefing")
Briefing.terminate();
def endbriefing(self):
debug.info("ending briefing")
|
ffrankies/tf-terry | refs/heads/master | tests/__init__.py | 12133432 | |
ChameleonCloud/blazar | refs/heads/chameleoncloud/train | blazar/plugins/__init__.py | 12133432 | |
joalder/billstation | refs/heads/master | station/__init__.py | 12133432 | |
hellofreedom/ansible | refs/heads/devel | lib/ansible/plugins/shell/powershell.py | 35 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import re
import random
import shlex
import time
from ansible.utils.unicode import to_bytes, to_unicode
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
class ShellModule(object):
def env_prefix(self, **kwargs):
return ''
def join_path(self, *args):
parts = []
for arg in args:
arg = self._unquote(arg).replace('/', '\\')
parts.extend([a for a in arg.split('\\') if a])
path = '\\'.join(parts)
if path.startswith('~'):
return path
return '"%s"' % path
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, mode, path):
return ''
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None):
basefile = self._escape(self._unquote(basefile))
# FIXME: Support system temp path!
return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
def expand_user(self, user_home_path):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Host (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = 'Write-Host ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:])
else:
script = 'Write-Host "%s"' % self._escape(user_home_path)
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "3";
}
Else
{
Write-Host "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_unicode, cmd_parts)
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
cmd_parts.insert(0, '&')
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
script = '''
Try
{
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (' '.join(cmd_parts))
if rm_tmp:
rm_tmp = self._escape(self._unquote(rm_tmp))
rm_cmd = 'Remove-Item "%s" -Force -Recurse -ErrorAction SilentlyContinue' % rm_tmp
script = '%s\nFinally { %s }' % (script, rm_cmd)
return self._encode_script(script)
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_unicode(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
replace = lambda m: substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(self, script, as_list=False, strict_mode=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_unicode(script)
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
|
barliant/rpb-rsc | refs/heads/master | karawang.py | 1 | import pandas as pd
import numpy as np
import matplotlib.pylab as plt
#%matplotlib inline
from matplotlib.pylab import rcParams
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=12)
#rolmean = Series.rolling(window=12, center=False).mean()
rolstd = pd.rolling_std(timeseries, window=12)
#rolmean = Series.rolling(window=12, center=False).std()
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
#plt.show(block=False)
plt.show()
#Perform Dickey-Fuller test:
print 'Results of Dickey-Fuller Test:'
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print dfoutput
rcParams['figure.figsize'] = 15, 6
data = pd.read_csv('karawang.csv')
print data.head()
print '\n Data Types:'
print data.dtypes
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m')
data = pd.read_csv('karawang.csv', parse_dates=['Bulan'], index_col='Bulan', date_parser = dateparse)
print data.head()
print data.dtypes
print data.index
ts = data['Karawang']
print ts.head(10)
test_stationarity(ts)
plt.plot(ts)
plt.show()
from datetime import datetime
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=12)
rolstd = pd.rolling_std(timeseries, window=12)
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
#Perform Dickey-Fuller test:
print 'Results of Dickey-Fuller Test:'
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print dfoutput
# estimating and eliminating trends
ts_log = np.log(ts)
plt.plot(ts_log)
plt.show()
# moving average
moving_avg = pd.rolling_mean(ts_log, 12)
plt.plot(ts_log)
plt.plot(moving_avg, color='red')
plt.show()
|
raycarnes/odoomrp-utils | refs/heads/8.0 | stock_planning/wizard/__init__.py | 15 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import wiz_stock_planning
|
rschnapka/server-tools | refs/heads/8.0 | users_ldap_push/models/res_users.py | 10 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import _, models, fields, api, exceptions
import logging
_logger = logging.getLogger(__name__)
try:
import ldap
import ldap.modlist
except ImportError:
_logger.debug('Can not `from ldap.filter import filter_format`.')
class ResUsers(models.Model):
_inherit = 'res.users'
ldap_entry_dn = fields.Char('LDAP DN', readonly=True)
is_ldap_user = fields.Boolean(
'LDAP user', compute='_compute_is_ldap_user', default=True)
@api.model
@api.returns('self', lambda record: record.id)
def create(self, values):
result = super(ResUsers, self).create(values)
result.push_to_ldap(values)
return result
@api.multi
def write(self, values):
result = super(ResUsers, self).write(values)
self.push_to_ldap(values)
return result
@api.multi
def _push_to_ldap_possible(self, values):
return bool(self._get_ldap_configuration())
@api.multi
def _get_ldap_configuration(self):
self.ensure_one()
return self.sudo().company_id.ldaps.filtered('create_ldap_entry')[:1]
@api.multi
def _get_ldap_values(self, values):
self.ensure_one()
conf = self._get_ldap_configuration()
result = {}
for mapping in conf.create_ldap_entry_field_mappings:
field_name = mapping.field_id.name
if field_name not in values or not values[field_name]:
continue
result[mapping.attribute] = [str(values[field_name])]
if result:
result['objectClass'] = conf.create_ldap_entry_objectclass\
.encode('utf-8').split(',')
return result
@api.multi
def _get_ldap_dn(self, values):
self.ensure_one()
conf = self._get_ldap_configuration()
dn = conf.create_ldap_entry_field_mappings.filtered('use_for_dn')
assert dn, 'No DN attribute mapping given!'
assert self[dn.field_id.name], 'DN attribute empty!'
return '%s=%s,%s' % (
dn.attribute,
ldap.dn.escape_dn_chars(self[dn.field_id.name].encode('utf-8')),
conf.create_ldap_entry_base or conf.ldap_base)
@api.multi
def push_to_ldap(self, values):
for this in self:
if not values.get('is_ldap_user') and not this.is_ldap_user:
continue
if not this._push_to_ldap_possible(values):
continue
ldap_values = this._get_ldap_values(values)
if not ldap_values:
continue
ldap_configuration = this._get_ldap_configuration()
ldap_connection = ldap_configuration.connect(
ldap_configuration.read()[0])
ldap_connection.simple_bind_s(
(ldap_configuration.ldap_binddn or '').encode('utf-8'),
(ldap_configuration.ldap_password or '').encode('utf-8'))
try:
if not this.ldap_entry_dn:
this._push_to_ldap_create(
ldap_connection, ldap_configuration, values,
ldap_values)
if this.ldap_entry_dn:
this._push_to_ldap_write(
ldap_connection, ldap_configuration, values,
ldap_values)
except ldap.LDAPError as e:
_logger.exception(e)
raise exceptions.Warning(_('Error'), e.message)
finally:
ldap_connection.unbind_s()
@api.multi
def _push_to_ldap_create(self, ldap_connection, ldap_configuration, values,
ldap_values):
self.ensure_one()
dn = self._get_ldap_dn(values)
ldap_connection.add_s(
dn,
ldap.modlist.addModlist(ldap_values))
self.write({'ldap_entry_dn': dn})
@api.multi
def _push_to_ldap_write(self, ldap_connection, ldap_configuration, values,
ldap_values):
self.ensure_one()
dn = self.ldap_entry_dn.encode('utf-8')
dn_mapping = ldap_configuration.create_ldap_entry_field_mappings\
.filtered('use_for_dn')
if dn_mapping.attribute in ldap_values:
ldap_values.pop(dn_mapping.attribute)
ldap_entry = ldap_connection.search_s(
dn, ldap.SCOPE_BASE, '(objectClass=*)',
map(lambda x: x.encode('utf-8'), ldap_values.keys()))
assert ldap_entry, '%s not found!' % self.ldap_entry_dn
ldap_entry = ldap_entry[0][1]
ldap_connection.modify_s(
dn,
ldap.modlist.modifyModlist(ldap_entry, ldap_values))
@api.one
@api.depends('ldap_entry_dn')
def _compute_is_ldap_user(self):
self.is_ldap_user = bool(self.ldap_entry_dn)
@api.one
def _change_ldap_password(self, new_passwd, auth_dn=None,
auth_passwd=None):
ldap_configuration = self.env.user.sudo()._get_ldap_configuration()
ldap_connection = ldap_configuration.connect(
ldap_configuration.read()[0])
dn = auth_dn or ldap_configuration.ldap_binddn
old_passwd = auth_passwd or ldap_configuration.ldap_password
ldap_connection.simple_bind_s(
dn.encode('utf-8'), old_passwd.encode('utf-8'))
self.env['ir.model.access'].check('res.users', 'write')
self.env.user.check_access_rule('write')
try:
ldap_connection.passwd_s(
self.ldap_entry_dn, None, new_passwd.encode('utf-8'))
except ldap.LDAPError, e:
raise exceptions.Warning(_('Error'), e.message)
finally:
ldap_connection.unbind_s()
return True
@api.model
def change_password(self, old_passwd, new_passwd):
if self.env.user.is_ldap_user:
return self.env.user._change_ldap_password(
new_passwd, auth_dn=self.env.user.ldap_entry_dn,
auth_passwd=old_passwd)
return super(ResUsers, self).change_password(old_passwd, new_passwd)
|
hynnet/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/fixes/fix_print.py | 326 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name(u"print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name(u"print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = u""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, u"sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, u"end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, u"file", file)
n_stmt = Call(Name(u"print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = u""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, u"="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = u" "
l_nodes.append(n_argument)
|
w0bb13z/Life-Objects | refs/heads/master | Mock_Ups/Animal.py | 1 | #!/usr/bin/python36
import os, sys, getopt
try:
import numpy
except ImportWarning:
"It is recommended to have 'numpy' installed for 'python3.6'."
pass
class Animal:
"""\
This is a class object representing a real world 'Animal'.
Note: This does not include "citizen" implementations such as...
- Jobs, Housing, Social Security Number...etc
:args: Each attribute provided during class' __init__
- name______________Animal's name. 'None' if Feral.
- species___________Animal's generalized species, such as "cat".
- kingdom___________Animal's generalized kingdom, such as 'feline'.
- age_______________Animal's uptime since birth, or to general knowledge.
- gender____________Animal's gender of birth.
- birth_defects_____Animal's list of known medical birth defects.
- health____________Animal's general medical health as a percentage.
- height____________Animal's height in feet, and inches. such as [2,6] [ft,inches].
- weight____________Animal's weight in American pounds (lbs.).
- IsFeral___________<Bool> Is the animal feral?
- IsOwned___________<Bool> Is the animal owned?
- IsTrained_________<Bool> Is the animal trained?
- IsInHeat__________<Bool> Is the animal in heat? (mating season)
- IsPregnant________<Bool> Is the animal pregnant?
- IsColorblind______<Bool> Is the animal colorblind? (generally determined by species.)
- IsVaccinated______<Bool> Is the animal vaccinated as per standard practice?
"""
# TBA: Please only add to these if you also have a functional purpose in mind for it.
# TBA: The act of assigning a value to a container/variable is the act of using up
# TBA: private memory. Literally. Let's not waste it.
def __init__(self,name,species,kingdom,age,gender,birth_defects,
health,IsTrained,IsOwned,IsFeral,height,weight,
IsPregnant,IsInHeat,IsColorblind,IsVaccinated):
self.name = name
self.species = species
self.kingdom = kingdom
self.age = age
self.gender = gender
self.birth_defects = birth_defects
self.health = health
self.height = height
self.weight = weight
self.IsFeral = IsFeral
self.IsOwned = IsOwned
self.IsTrained = IsTrained
self.IsInHeat = IsInHeat
self.IsPregnant = IsPregnant
self.IsColorblind = IsColorblind
self.IsVaccinated = IsVaccinated
self.dictAttributes = {'name':self.name, 'species':self.species,
'kingdom':self.kingdom, 'age':self.age,
'gender':self.gender, 'birth_defects':self.birth_defects[0:],
'health':self.health, 'height':self.height[0:], 'weight':self.weight,
'isferal':self.IsFeral, 'isowned':self.IsOwned,
'istrained':self.IsTrained, 'isinheat':self.IsInHeat,
'ispregnant':self.IsPregnant, 'iscolorblind':self.IsColorblind,
'isvaccinated':self.IsVaccinated}
pass
def PrettyPrint(self):
print("""\n\
Name: {}
Species: {}
Kingdom: {}
Age: {} Years Old
Gender: {}
Birth Defects: {}
Health: {}
Height: {}
Weight: {}
Is Feral: {}
Is Owned: {}
Is Trained: {}
Is In Heat: {}
Is Pregnant: {}
Is Colorblind: {}
Is Vaccinated: {}
""".format(self.name,self.species,self.kingdom,self.age,
self.gender,self.birth_defects,self.health,
self.height[0:],self.weight,self.IsFeral,self.IsOwned,
self.IsTrained,self.IsInHeat,self.IsPregnant,
self.IsColorblind,self.IsVaccinated))
pass
def get(self, *args):
"""\
This function returns a list of values in the same corresponding order
that the user provides the arguments in.
:return: PassedArgs[] => dictAttributes[str(PassedArgs[])]
"""
toReturn = []
for i in args:
if str(i) in self.dictAttributes:
toReturn.append(self.dictAttributes[str(i)])
else:
pass
pass
return toReturn
def set(self, *args):
"""\
This function allows the modification of core attribute values.
Such as in the wake of a "life changing" event or the acquisition of
behavior changing knowledge or even personal choices such as changing
their own name.
"""
for i in args:
if len(i) > 1:
if self.dictAttributes[i[0]] is list:
try:
cntr = 0
#TBA: ERROR. THIS IS UNALIGNED. (IN PERSON.PY ASWELL!!)
for iA in i:
cntr += 1
if cntr is 2:
cntr = 0
self.dictAttributes[i[0]][1] = i[0][1]
else:
self.dictAttributes[i[0]][0] = i[0][0]
pass
pass
pass
except IndexError:
print("Error ID: 0x002 [Array Append]")
sys.exit(0x002)
try:
self.dictAttributes[i[0]] = i[1]
print("Animal's {} was changed to '{}' successfully"
.format(str(i[0]), str(i[1])))
continue
except IndexError:
print("""\n\
Error. Set is used like this:
> Animal.set([attributeName,valueToSet])
real EG:
> myanimal.set(['isvaccinated', True])\n
""")
break
pass
else:
print("""\n\
Error. Set is used like this:
> Animal.set([attributeName,valueToSet])
real EG:
> myanimal.set(['isvaccinated', True])\n
""")
break
pass
pass
#### END OF CLASS ####
#### TESTS GO HERE ####
myanimal = Animal("Annabelle","Cat","Feline", 5, 'Female',
['Blind in Left Eye'], '65%', True, True,
False, [2,6], 34, False, True, False, True)
myanimal.PrettyPrint()
print(myanimal.get('name','height','height','isvaccinated'))
myanimal.set(['name','Vivian'],['height',[4,2]])
print(myanimal.get('height'))
myanimal.PrettyPrint()
print(myanimal.get('name','height','height','isvaccinated'))
myanimal.PrettyPrint()
#### END TESTS ####
|
noahwilliamsson/micropython | refs/heads/wpa-enterprise | tests/basics/object1.py | 110 | # test builtin object()
# creation
object()
# printing
print(repr(object())[:7])
|
ajslater/picopt | refs/heads/develop | tests/__init__.py | 1 | """Tests init."""
import inspect
from pathlib import Path
TEST_FILES_DIR = Path("tests/test_files")
IMAGES_DIR = TEST_FILES_DIR / "images"
INVALID_DIR = TEST_FILES_DIR / "invalid"
COMIC_DIR = TEST_FILES_DIR / "comic_archives"
TMP_ROOT = "/tmp"
def get_test_dir():
"""Return a module specific tmpdir."""
frame = inspect.currentframe()
if frame and frame.f_back:
caller = frame.f_back
module_name = caller.f_globals["__name__"]
else:
module_name = "unknown"
return TMP_ROOT / Path("picopt-" + module_name)
|
s40171225/2016fallcp_hw | refs/heads/gh-pages | course/week12.py | 19 | # week12.py
print("week12.py")
|
PennartLoettring/Poettrix | refs/heads/master | rootfs/usr/lib/python3.4/xml/__init__.py | 925 | """Core XML support for Python.
This package contains four sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
etree -- The ElementTree XML library. This is a subset of the full
ElementTree XML release.
"""
__all__ = ["dom", "parsers", "sax", "etree"]
|
Nepherhotep/django | refs/heads/master | tests/template_tests/urls.py | 153 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include, url
from . import views
ns_patterns = [
# Test urls for testing reverse lookups
url(r'^$', views.index),
url(r'^client/([0-9,]+)/$', views.client),
url(r'^client/(?P<id>[0-9]+)/(?P<action>[^/]+)/$', views.client_action),
url(r'^client/(?P<client_id>[0-9]+)/(?P<action>[^/]+)/$', views.client_action),
url(r'^named-client/([0-9]+)/$', views.client2, name="named.client"),
]
urlpatterns = ns_patterns + [
# Unicode strings are permitted everywhere.
url(r'^Юникод/(\w+)/$', views.client2, name="метка_оператора"),
url(r'^Юникод/(?P<tag>\S+)/$', views.client2, name="метка_оператора_2"),
# Test urls for namespaces and current_app
url(r'ns1/', include((ns_patterns, 'app'), 'ns1')),
url(r'ns2/', include((ns_patterns, 'app'))),
]
|
akosyakov/intellij-community | refs/heads/master | python/testData/inspections/PyMethodOverridingInspection/NotOverridingMethod.py | 83 | class B:
pass
class C(B):
def foo(self):
pass
|
raphaelm/python-sepadd | refs/heads/master | tests/__init__.py | 12133432 | |
gslab-econ/gslab_python | refs/heads/master | gslab_misc/gencat/tests/test_zipFile.py | 1 | import unittest
import os
import shutil
import zipfile
import sys
# Ensure the script is run from its own directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../../')
from gencat import gencat
class MockCat(gencat):
def makeZipDict(self):
pass
def makeConcatDict(self):
pass
class test_zipFiles(unittest.TestCase):
def setUp(self):
paths = ['./test_data', './test_temp', './test_out']
for path in paths:
try:
os.makedirs(path)
except:
shutil.rmtree(path, ignore_errors = True)
os.makedirs(path)
for FILE in ['./test_data/file1.txt', './test_data/file2.txt']:
with open(FILE, 'wb') as f:
f.write('''THIS IS A TEST FILE.\n''')
def test_oneFile(self):
'''
Test that contentation functions for a single file.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text = f.read()
self.assertEqual(text, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
def test_twoFile(self):
'''
Test that two text files are concatenated into one without loss of content.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', ) + ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text = f.read()
test_text = '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.' + \
'\n\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n'
self.assertEqual(text, test_text)
def test_twoZips(self):
'''
Test that two files can be concatenated to different text files and stored in separate zip files.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.zip_dict['zip2'] = ('concat2', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.concat_dict['concat2'] = ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(os.path.isfile('./test_out/zip2.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip2.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with zipfile.ZipFile('./test_out/zip2.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text1 = f.read()
with open('./test_out/zip2/concat2.txt', 'rU') as f:
text2 = f.read()
self.assertEqual(text1, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
self.assertEqual(text2, '\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n')
def test_twoConcatsOneZip(self):
'''
Test that two files can be concatenated to different text files and stored in the same zip file.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', ) + ('concat2', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.concat_dict['concat2'] = ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text1 = f.read()
with open('./test_out/zip1/concat2.txt', 'rU') as f:
text2 = f.read()
self.assertEqual(text1, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
self.assertEqual(text2, '\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n')
def tearDown(self):
paths = ['./test_data', './test_temp', './test_out']
for path in paths:
shutil.rmtree(path, ignore_errors = True)
if __name__ == '__main__':
unittest.main()
|
adoosii/edx-platform | refs/heads/master | lms/djangoapps/commerce/api/urls.py | 128 | """ API URLs. """
from django.conf.urls import patterns, url, include
urlpatterns = patterns(
'',
url(r'^v0/', include('commerce.api.v0.urls', namespace='v0')),
url(r'^v1/', include('commerce.api.v1.urls', namespace='v1')),
)
|
divio/django-cms | refs/heads/develop | cms/tests/test_settings.py | 1 | from classytags.utils import flatten_context
from cms import constants
from cms.test_utils.testcases import CMSTestCase
from cms.utils.conf import get_cms_setting
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.test.utils import override_settings
class SettingsTests(CMSTestCase):
@override_settings(
CMS_TEMPLATES=[('subdir/template.html', 'Subdir')],
DEBUG=True,
TEMPLATE_DEBUG=True,
)
def test_cms_templates_with_pathsep(self):
from sekizai.context import SekizaiContext
context = flatten_context(SekizaiContext())
self.assertEqual(render_to_string('subdir/template.html', context).strip(), 'test')
@override_settings(SITE_ID='broken')
def test_non_numeric_site_id(self):
self.assertRaises(
ImproperlyConfigured,
get_cms_setting, 'LANGUAGES'
)
@override_settings(LANGUAGE_CODE='en-us')
def test_invalid_language_code(self):
self.assertRaises(
ImproperlyConfigured,
get_cms_setting, 'LANGUAGES'
)
@override_settings(CMS_TEMPLATE_INHERITANCE=True)
def test_create_page_with_inheritance_override(self):
for template in get_cms_setting('TEMPLATES'):
if (template[0] == constants.TEMPLATE_INHERITANCE_MAGIC):
return
self.assertRaises(
ImproperlyConfigured,
get_cms_setting, 'TEMPLATES'
)
@override_settings(CMS_TEMPLATE_INHERITANCE=False)
def test_create_page_without_inheritance_override(self):
for template in get_cms_setting('TEMPLATES'):
if (template[0] == constants.TEMPLATE_INHERITANCE_MAGIC):
self.assertRaises(
ImproperlyConfigured,
get_cms_setting, 'TEMPLATES'
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.