repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
MetSystem/PTVS
|
refs/heads/master
|
Python/Tests/TestData/DebuggerProject/EvalRawTest.py
|
18
|
import sys
try:
unicode
except:
unicode = str
class DerivedString(unicode):
def __new__(cls, *args, **kwargs):
return unicode.__new__(cls, *args, **kwargs)
n = 123
if sys.version[0] == '3':
s = 'fob'.encode('ascii')
u = 'fob'
else:
s = 'fob'
u = unicode('fob')
ds = DerivedString(u)
try:
ba = bytearray(u, 'ascii')
except:
pass
print('breakpoint')
|
detrout/debian-statsmodels
|
refs/heads/debian
|
statsmodels/examples/tsa/lagpolynomial.py
|
34
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 22 08:13:38 2010
Author: josef-pktd
License: BSD (3-clause)
"""
from __future__ import print_function
import numpy as np
from numpy import polynomial as npp
class LagPolynomial(npp.Polynomial):
#def __init__(self, maxlag):
def pad(self, maxlag):
return LagPolynomial(np.r_[self.coef, np.zeros(maxlag-len(self.coef))])
def padflip(self, maxlag):
return LagPolynomial(np.r_[self.coef, np.zeros(maxlag-len(self.coef))][::-1])
def flip(self):
'''reverse polynomial coefficients
'''
return LagPolynomial(self.coef[::-1])
def div(self, other, maxlag=None):
'''padded division, pads numerator with zeros to maxlag
'''
if maxlag is None:
maxlag = max(len(self.coef), len(other.coef)) + 1
return (self.padflip(maxlag) / other.flip()).flip()
def filter(self, arr):
return (self * arr).coef[:-len(self.coef)] #trim to end
ar = LagPolynomial([1, -0.8])
arpad = ar.pad(10)
ma = LagPolynomial([1, 0.1])
mapad = ma.pad(10)
unit = LagPolynomial([1])
|
aronparsons/spacewalk
|
refs/heads/master
|
client/rhel/rhnlib/rhn/SSL.py
|
2
|
#
# Higher-level SSL objects used by rpclib
#
# Copyright (c) 2002--2015 Red Hat, Inc.
#
# Author: Mihai Ibanescu <misa@redhat.com>
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
"""
rhn.SSL builds an abstraction on top of the objects provided by pyOpenSSL
"""
from OpenSSL import SSL
# SSL.crypto is provided to other modules
from OpenSSL import crypto
import os
import socket
import select
import sys
DEFAULT_TIMEOUT = 120
class SSLSocket:
"""
Class that wraps a pyOpenSSL Connection object, adding more methods
"""
def __init__(self, socket, trusted_certs=None):
# SSL.Context object
self._ctx = None
# SSL.Connection object
self._connection = None
self._sock = socket
self._trusted_certs = []
# convert None to empty list
trusted_certs = trusted_certs or []
for f in trusted_certs:
self.add_trusted_cert(f)
# SSL method to use
self._ssl_method = SSL.SSLv23_METHOD
# Flags to pass to the SSL layer
self._ssl_verify_flags = SSL.VERIFY_PEER
# Buffer size for reads
self._buffer_size = 8192
# Position, for tell()
self._pos = 0
# Buffer
self._buffer = ""
# Flag to show if makefile() was called
self._makefile_called = 0
self._closed = None
def add_trusted_cert(self, file):
"""
Adds a trusted certificate to the certificate store of the SSL context
object.
"""
if not os.access(file, os.R_OK):
raise ValueError("Unable to read certificate file %s" % file)
self._trusted_certs.append(file.encode("utf-8"))
def init_ssl(self):
"""
Initializes the SSL connection.
"""
self._check_closed()
# Get a context
self._ctx = SSL.Context(self._ssl_method)
if self._trusted_certs:
# We have been supplied with trusted CA certs
for f in self._trusted_certs:
self._ctx.load_verify_locations(f)
else:
# Reset the verify flags
self._ssl_verify_flags = 0
self._ctx.set_verify(self._ssl_verify_flags, ssl_verify_callback)
if hasattr(SSL, "OP_DONT_INSERT_EMPTY_FRAGMENTS"):
# Certain SSL implementations break when empty fragments are
# initially sent (even if sending them is compliant to
# SSL 3.0 and TLS 1.0 specs). Play it safe and disable this
# feature (openssl 0.9.6e and later)
self._ctx.set_options(SSL.OP_DONT_INSERT_EMPTY_FRAGMENTS)
# Init the connection
self._connection = SSL.Connection(self._ctx, self._sock)
# Place the connection in client mode
self._connection.set_connect_state()
def makefile(self, mode, bufsize=None):
"""
Returns self, since we are a file-like object already
"""
if bufsize:
self._buffer_size = bufsize
# Increment the counter with the number of times we've called makefile
# - we don't want close to actually close things until all the objects
# that originally called makefile() are gone
self._makefile_called = self._makefile_called + 1
return self
def close(self):
"""
Closes the SSL connection
"""
# XXX Normally sock.makefile does a dup() on the socket file
# descriptor; httplib relies on this, but there is no dup for an ssl
# connection; so we have to count how may times makefile() was called
if self._closed:
# Nothing to do
return
if not self._makefile_called:
self._really_close()
return
self._makefile_called = self._makefile_called - 1
def _really_close(self):
self._connection.shutdown()
self._connection.close()
self._closed = 1
def _check_closed(self):
if self._closed:
raise ValueError("I/O operation on closed file")
def __getattr__(self, name):
if hasattr(self._connection, name):
return getattr(self._connection, name)
raise AttributeError(name)
# File methods
def isatty(self):
"""
Returns false always.
"""
return 0
def tell(self):
return self._pos
def seek(self, pos, mode=0):
raise NotImplementedError("seek")
def read(self, amt=None):
"""
Reads up to amt bytes from the SSL connection.
"""
self._check_closed()
# Initially, the buffer size is the default buffer size.
# Unfortunately, pending() does not return meaningful data until
# recv() is called, so we only adjust the buffer size after the
# first read
buffer_size = self._buffer_size
buffer_length = len(self._buffer)
# Read only the specified amount of data
while buffer_length < amt or amt is None:
# if amt is None (read till the end), fills in self._buffer
if amt is not None:
buffer_size = min(amt - buffer_length, buffer_size)
try:
data = self._connection.recv(buffer_size)
self._buffer = self._buffer + data
buffer_length = len(self._buffer)
# More bytes to read?
pending = self._connection.pending()
if pending == 0:
# we're done here
break
except SSL.ZeroReturnError:
# Nothing more to be read
break
except SSL.SysCallError:
e = sys.exc_info()[1]
print("SSL exception", e.args)
break
except SSL.WantWriteError:
self._poll(select.POLLOUT, 'read')
except SSL.WantReadError:
self._poll(select.POLLIN, 'read')
if amt:
ret = self._buffer[:amt]
self._buffer = self._buffer[amt:]
else:
ret = self._buffer
self._buffer = ""
self._pos = self._pos + len(ret)
return ret
def _poll(self, filter_type, caller_name):
poller = select.poll()
poller.register(self._sock, filter_type)
res = poller.poll(self._sock.gettimeout() * 1000)
if res == []:
raise TimeoutException("Connection timed out on %s" % caller_name)
def write(self, data):
"""
Writes to the SSL connection.
"""
self._check_closed()
# XXX Should use sendall
# sent = self._connection.sendall(data)
origlen = len(data)
while True:
try:
sent = self._connection.send(data)
if sent == len(data):
break
data = data[sent:]
except SSL.WantWriteError:
self._poll(select.POLLOUT, 'write')
except SSL.WantReadError:
self._poll(select.POLLIN, 'write')
return origlen
def recv(self, amt):
return self.read(amt)
send = write
sendall = write
def readline(self, length=None):
"""
Reads a single line (up to `length' characters long) from the SSL
connection.
"""
self._check_closed()
while True:
# charcount contains the number of chars to be outputted (or None
# if none to be outputted at this time)
charcount = None
i = self._buffer.find('\n')
if i >= 0:
# Go one char past newline
charcount = i + 1
elif length and len(self._buffer) >= length:
charcount = length
if charcount is not None:
ret = self._buffer[:charcount]
self._buffer = self._buffer[charcount:]
self._pos = self._pos + len(ret)
return ret
# Determine the number of chars to be read next
bufsize = self._buffer_size
if length:
# we know length > len(self._buffer)
bufsize = min(self._buffer_size, length - len(self._buffer))
try:
data = self._connection.recv(bufsize)
self._buffer = self._buffer + data
except SSL.ZeroReturnError:
# Nothing more to be read
break
except SSL.WantWriteError:
self._poll(select.POLLOUT, 'readline')
except SSL.WantReadError:
self._poll(select.POLLIN, 'readline')
# We got here if we're done reading, so return everything
ret = self._buffer
self._buffer = ""
self._pos = self._pos + len(ret)
return ret
def ssl_verify_callback(conn, cert, errnum, depth, ok):
"""
Verify callback, which will be called for each certificate in the
certificate chain.
"""
# Nothing by default
return ok
class TimeoutException(SSL.Error, socket.timeout):
def __init__(self, *args):
self.args = args
def __str__(self):
return "Timeout Exception"
|
home-assistant/home-assistant
|
refs/heads/dev
|
tests/components/wilight/__init__.py
|
2
|
"""Tests for the WiLight component."""
from pywilight.const import DOMAIN
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_MANUFACTURER,
ATTR_UPNP_MODEL_NAME,
ATTR_UPNP_MODEL_NUMBER,
ATTR_UPNP_SERIAL,
)
from homeassistant.components.wilight.config_flow import (
CONF_MODEL_NAME,
CONF_SERIAL_NUMBER,
)
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
HOST = "127.0.0.1"
WILIGHT_ID = "000000000099"
SSDP_LOCATION = "http://127.0.0.1/"
UPNP_MANUFACTURER = "All Automacao Ltda"
UPNP_MODEL_NAME_P_B = "WiLight 0102001800010009-10010010"
UPNP_MODEL_NAME_DIMMER = "WiLight 0100001700020009-10010010"
UPNP_MODEL_NAME_COLOR = "WiLight 0107001800020009-11010"
UPNP_MODEL_NAME_LIGHT_FAN = "WiLight 0104001800010009-10"
UPNP_MODEL_NAME_COVER = "WiLight 0103001800010009-10"
UPNP_MODEL_NUMBER = "123456789012345678901234567890123456"
UPNP_SERIAL = "000000000099"
UPNP_MAC_ADDRESS = "5C:CF:7F:8B:CA:56"
UPNP_MANUFACTURER_NOT_WILIGHT = "Test"
CONF_COMPONENTS = "components"
MOCK_SSDP_DISCOVERY_INFO_P_B = {
ATTR_SSDP_LOCATION: SSDP_LOCATION,
ATTR_UPNP_MANUFACTURER: UPNP_MANUFACTURER,
ATTR_UPNP_MODEL_NAME: UPNP_MODEL_NAME_P_B,
ATTR_UPNP_MODEL_NUMBER: UPNP_MODEL_NUMBER,
ATTR_UPNP_SERIAL: UPNP_SERIAL,
}
MOCK_SSDP_DISCOVERY_INFO_WRONG_MANUFACTORER = {
ATTR_SSDP_LOCATION: SSDP_LOCATION,
ATTR_UPNP_MANUFACTURER: UPNP_MANUFACTURER_NOT_WILIGHT,
ATTR_UPNP_MODEL_NAME: UPNP_MODEL_NAME_P_B,
ATTR_UPNP_MODEL_NUMBER: UPNP_MODEL_NUMBER,
ATTR_UPNP_SERIAL: ATTR_UPNP_SERIAL,
}
MOCK_SSDP_DISCOVERY_INFO_MISSING_MANUFACTORER = {
ATTR_SSDP_LOCATION: SSDP_LOCATION,
ATTR_UPNP_MODEL_NAME: UPNP_MODEL_NAME_P_B,
ATTR_UPNP_MODEL_NUMBER: UPNP_MODEL_NUMBER,
ATTR_UPNP_SERIAL: ATTR_UPNP_SERIAL,
}
async def setup_integration(
hass: HomeAssistant,
) -> MockConfigEntry:
"""Mock ConfigEntry in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=WILIGHT_ID,
data={
CONF_HOST: HOST,
CONF_SERIAL_NUMBER: UPNP_SERIAL,
CONF_MODEL_NAME: UPNP_MODEL_NAME_P_B,
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
SantosDevelopers/sborganicos
|
refs/heads/master
|
venv/lib/python3.5/site-packages/pkg_resources/_vendor/packaging/specifiers.py
|
1107
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
thodoris/djangoPharma
|
refs/heads/master
|
djangoPharma/env/Lib/site-packages/django/contrib/sites/shortcuts.py
|
615
|
from __future__ import unicode_literals
from django.apps import apps
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
# Imports are inside the function because its point is to avoid importing
# the Site models when django.contrib.sites isn't installed.
if apps.is_installed('django.contrib.sites'):
from .models import Site
return Site.objects.get_current(request)
else:
from .requests import RequestSite
return RequestSite(request)
|
petewarden/tensorflow
|
refs/heads/master
|
tensorflow/python/distribute/distribute_lib_test.py
|
5
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy, ReplicaContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
class _TestReplicaContext(distribute_lib.ReplicaContext):
def merge_call(self, fn, *args, **kwargs):
return kwargs["test_arg"]
def _get_test_variable(name, synchronization, aggregation):
return {
"name": name,
"synchronization": synchronization,
"aggregation": aggregation
}
def _test_input_fn(input_context):
del input_context
return dataset_ops.DatasetV2.from_tensors(1.).repeat()
class _TestStrategy(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy, self).__init__(_TestExtended(self))
class _TestExtended(distribute_lib.StrategyExtendedV1):
def __init__(self, distribute):
super(_TestExtended, self).__init__(distribute)
worker_device_pairs = [("", ["/device:CPU:0"])]
self._input_workers = input_lib.InputWorkers(worker_device_pairs)
def _call_for_each_replica(self, fn, args, kwargs):
with _TestReplicaContext(
self._container_strategy(), replica_id_in_sync_group=0):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, **kwargs):
return _get_test_variable(kwargs["name"], kwargs["synchronization"],
kwargs["aggregation"])
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _distribute_datasets_from_function(self, dataset_fn, options):
return dataset_fn(distribute_lib.InputContext())
def _local_results(self, value):
return (value,)
def _reduce_to(self, reduce_op, value, destinations, options):
del reduce_op, destinations, options
return value
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
# TODO(tomhennigan) This is missing many things (e.g. ctx.run_op).
ctx = input_lib.MultiStepContext()
for _ in range(iterations):
fn(ctx, iterator.get_next())
return ctx
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
def _assert_in_default_state(t):
t.assertIs(ds_context._get_default_replica_context(),
ds_context.get_replica_context())
t.assertIs(None, ds_context.get_cross_replica_context())
t.assertFalse(ds_context.in_cross_replica_context())
t.assertIs(ds_context._get_default_strategy(), ds_context.get_strategy())
t.assertFalse(ds_context.has_strategy())
def _run_in_and_out_of_scope(unbound_test_method):
def wrapper(test_case):
dist = _TestStrategy()
# Running in the default (replica) scope should be supported.
_assert_in_default_state(test_case)
unbound_test_method(test_case, dist)
# As well as running in the strategy scope.
with dist.scope():
unbound_test_method(test_case, dist)
_assert_in_default_state(test_case)
# When run under a different strategy the test method should fail.
another_strategy = _TestStrategy()
msg = "Mixing different .*Strategy objects"
with test_case.assertRaisesRegex(RuntimeError, msg):
with another_strategy.scope():
unbound_test_method(test_case, dist)
return wrapper
class TestStrategyTest(test.TestCase):
def testCallForEachReplica(self):
_assert_in_default_state(self)
dist = _TestStrategy()
def run_fn():
replica_context = ds_context.get_replica_context()
self.assertIsNotNone(replica_context)
self.assertIs(None, ds_context.get_cross_replica_context())
self.assertFalse(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertEqual("foo", replica_context.merge_call(None, test_arg="foo"))
expected_value = _get_test_variable(
"bar", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="bar"))
dist.extended.call_for_each_replica(run_fn)
with dist.scope():
dist.extended.call_for_each_replica(run_fn)
_assert_in_default_state(self)
def testScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
_assert_in_default_state(self)
def testScopeDeviceNestingError(self):
_assert_in_default_state(self)
dist = _TestStrategy()
# Open a device scope with dist.scope().
dist.extended._default_device = "/device:GPU:0"
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with ops.device("/device:CPU:0"):
with self.assertRaisesRegex(RuntimeError, "Device scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_creator_scope(creator):
with self.assertRaisesRegex(RuntimeError,
"Variable creator scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarScopeNestingError(self):
# We create a new graph here to simplify clean-up, since the error
# we are triggering happens in the middle of scope.__exit__() and
# leaves us in a weird state.
with ops.Graph().as_default():
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_scope("AA"):
with self.assertRaisesRegex(RuntimeError,
"Variable scope nesting error"):
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testSettingSynchronizationAndAggregation(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.ON_WRITE,
variable_scope.VariableAggregation.MEAN)
self.assertDictEqual(
expected_value,
variable_scope.variable(
1.0,
name="baz",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN))
_assert_in_default_state(self)
def testSetStrategy(self):
_assert_in_default_state(self)
dist = _TestStrategy()
dist2 = _TestStrategy()
ds_context.experimental_set_strategy(dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
ds_context.experimental_set_strategy(dist2)
self.assertIs(dist2, ds_context.get_strategy())
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSetStrategyInScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
with self.assertRaisesRegex(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(_TestStrategy())
with self.assertRaisesRegex(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(dist)
with self.assertRaisesRegex(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSameScopeNesting(self):
_assert_in_default_state(self)
dist = _TestStrategy()
scope_a = dist.scope()
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
scope_b = dist.scope()
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
dist2 = _TestStrategy()
scope2 = dist2.scope()
with self.assertRaisesRegex(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
with scope2:
pass
_assert_in_default_state(self)
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
_assert_in_default_state(self)
@_run_in_and_out_of_scope
def testMakeInputFnIterator(self, dist):
self.assertIsNotNone(dist.make_input_fn_iterator(_test_input_fn))
@_run_in_and_out_of_scope
def testReduce(self, dist):
x = constant_op.constant(1.)
x_r = dist.reduce(reduce_util.ReduceOp.MEAN, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
def testReductions_acceptStringOps(self):
dist = _TestStrategy()
for op in ("mean", "MEAN", "sum", "SUM"):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r = dist.reduce(op, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r = dist.extended.reduce_to(op, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r, y_r = dist.extended.batch_reduce_to(op,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testExperimentalRunStepsOnIterator(self, dist):
all_inputs = []
dataset = dataset_ops.Dataset.from_tensors(1.).repeat()
dist.extended.experimental_run_steps_on_iterator(
lambda _, inputs: all_inputs.append(self.evaluate(inputs)),
dataset_ops.make_one_shot_iterator(dataset))
self.assertEqual(all_inputs, [1.])
@_run_in_and_out_of_scope
def testReduceTo(self, dist):
x = constant_op.constant(1.)
x_r = dist.extended.reduce_to(reduce_util.ReduceOp.MEAN, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
@_run_in_and_out_of_scope
def testBatchReduceTo(self, dist):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r, y_r = dist.extended.batch_reduce_to(reduce_util.ReduceOp.MEAN,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testUpdate(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(vv, tt):
self.assertIs(vv, v)
self.assertIs(tt, t)
dist.extended.update(v, assign_fn, (t,))
@_run_in_and_out_of_scope
def testUpdateAutoGraph(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(unused_vv, unused_tt):
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update(v, assign_fn, (t,))
test_fn()
@_run_in_and_out_of_scope
def testUpdateNonSlot(self, dist):
t = constant_op.constant(2.)
update_calls = []
dist.extended.update_non_slot(t, lambda: update_calls.append(1))
self.assertEqual(len(update_calls), 1)
@_run_in_and_out_of_scope
def testUpdateNonSlotAutoGraph(self, dist):
t = constant_op.constant(2.)
def update_fn():
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update_non_slot(t, update_fn)
test_fn()
def testClusterResolverDefaultNotImplemented(self):
dist = _TestStrategy()
self.assertIsNone(dist.cluster_resolver)
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
cluster_resolver = SimpleClusterResolver(base_cluster_spec)
dist.extended._cluster_resolver = cluster_resolver
self.assertIs(dist.cluster_resolver, cluster_resolver)
# _TestStrategy2 is like _TestStrategy, except it doesn't change variable
# creation.
class _TestStrategy2(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy2, self).__init__(_TestExtended2(self))
class _TestExtended2(_TestExtended):
def _create_variable(self, next_creator, **kwargs):
return next_creator(**kwargs)
class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
def testMergeCall(self):
_assert_in_default_state(self)
def merge_fn(dist, s):
self.assertIs(ds_context._get_default_strategy(), dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertIs(dist, ds_context.get_strategy())
self.assertFalse(ds_context.has_strategy())
return "foo_" + s
replica_ctx = ds_context.get_replica_context()
self.assertIs(ds_context._get_default_replica_context(), replica_ctx)
self.assertEqual("foo_bar", replica_ctx.merge_call(merge_fn, args=("bar",)))
_assert_in_default_state(self)
def testMergeCallAutoGraph(self):
_assert_in_default_state(self)
def merge_fn(_, s):
self.assertTrue(converter_testing.is_inside_generated_code())
return s
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
replica_ctx = ds_context.get_replica_context()
replica_ctx.merge_call(merge_fn, args=("bar",))
test_fn()
def testScopeMostlyNoOp(self):
_assert_in_default_state(self)
test_strategy = _TestStrategy2()
with test_strategy.scope():
variable_scope.variable(1.0, name="before")
default_strategy = ds_context._get_default_strategy()
scope = default_strategy.scope()
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegex(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="error")
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegex(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="also_error")
_assert_in_default_state(self)
_assert_in_default_state(self)
with test_strategy.scope():
variable_scope.variable(1.0, name="after")
def testExperimentalRunV2(self):
default_strategy = ds_context._get_default_strategy()
dataset = dataset_ops.Dataset.range(10).batch(2)
iterator = default_strategy.extended._make_dataset_iterator(dataset)
next_val = iterator.get_next()
def train_step(input_data):
return input_data
for _ in range(2):
default_strategy.run(train_step, args=(next_val,))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasets(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
next_val = next(iter(dist_dataset))
else:
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
iterator = dist_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_val = iterator.get_next()
self.assertAllEqual([0, 1], self.evaluate(next_val))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasetsFromFunction(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.distribute_datasets_from_function(
dataset_fn)
next_val = next(iter(dist_dataset_from_func))
self.assertAllEqual([0, 1], self.evaluate(next_val))
else:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.distribute_datasets_from_function(
dataset_fn)
dataset_ops.make_initializable_iterator(dist_dataset_from_func)
@combinations.generate(combinations.combine(tf_api_version=1))
def testV1(self):
self.assertIsInstance(ds_context.get_strategy(), distribute_lib.StrategyV1)
@combinations.generate(combinations.combine(tf_api_version=2))
def testV2(self):
self.assertIsInstance(ds_context.get_strategy(), distribute_lib.Strategy)
class InputContextTest(test.TestCase):
def testProperties(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(6, input_context.num_replicas_in_sync)
self.assertEqual(1, input_context.input_pipeline_id)
self.assertEqual(2, input_context.num_input_pipelines)
def testPerReplicaBatchSize(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(2, input_context.get_per_replica_batch_size(12))
with self.assertRaises(ValueError):
input_context.get_per_replica_batch_size(13)
def testStr(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=1, input_pipeline_id=0, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 0, total: 1)",
str(input_context))
input_context = distribute_lib.InputContext(
num_input_pipelines=3, input_pipeline_id=1, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 1, total: 3)",
str(input_context))
if __name__ == "__main__":
test.main()
|
zhanghenry/stocks
|
refs/heads/master
|
tests/template_tests/filter_tests/test_pluralize.py
|
430
|
from decimal import Decimal
from django.template.defaultfilters import pluralize
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_integers(self):
self.assertEqual(pluralize(1), '')
self.assertEqual(pluralize(0), 's')
self.assertEqual(pluralize(2), 's')
def test_floats(self):
self.assertEqual(pluralize(0.5), 's')
self.assertEqual(pluralize(1.5), 's')
def test_decimals(self):
self.assertEqual(pluralize(Decimal(1)), '')
self.assertEqual(pluralize(Decimal(0)), 's')
self.assertEqual(pluralize(Decimal(2)), 's')
def test_lists(self):
self.assertEqual(pluralize([1]), '')
self.assertEqual(pluralize([]), 's')
self.assertEqual(pluralize([1, 2, 3]), 's')
def test_suffixes(self):
self.assertEqual(pluralize(1, 'es'), '')
self.assertEqual(pluralize(0, 'es'), 'es')
self.assertEqual(pluralize(2, 'es'), 'es')
self.assertEqual(pluralize(1, 'y,ies'), 'y')
self.assertEqual(pluralize(0, 'y,ies'), 'ies')
self.assertEqual(pluralize(2, 'y,ies'), 'ies')
self.assertEqual(pluralize(0, 'y,ies,error'), '')
|
cerebrumaize/leetcode
|
refs/heads/master
|
78.Subsets/1.py
|
1
|
#!/usr/bin/env python
'''code description'''
# pylint: disable = I0011, E0401, C0103, C0321
class Solution(object):
'''Solution description'''
def func(self, nums):
'''Solution function description'''
from itertools import combinations
if len(nums) == 1: return[[], nums]
res = [[]]
for i in range(1, len(nums)):
for t in combinations(nums, i):
res.append(list(t))
res.append(nums)
return res
def main():
'''main function'''
_solution = Solution()
inp = [[0]]
for i in inp:
print(_solution.func(i))
if __name__ == "__main__":
main()
|
developerQuinnZ/this_will_work
|
refs/heads/master
|
labeler/migrations/0004_auto_20170803_2308.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-04 06:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labeler', '0003_auto_20170803_2247'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='uploaded_date',
),
migrations.AddField(
model_name='image',
name='taken_date',
field=models.DateTimeField(default=None, null=True, verbose_name='Date photo was taken.'),
),
migrations.AddField(
model_name='image',
name='updated_date',
field=models.DateTimeField(auto_now=True, verbose_name='Date photo was changed.'),
),
migrations.AlterField(
model_name='image',
name='created_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date photo was uploaded.'),
),
]
|
playfulgod/kernel_lge_dory
|
refs/heads/cm-11.0
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
minhphung171093/OpenERP_V8
|
refs/heads/master
|
openerp/report/render/makohtml2html/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from makohtml2html import parseNode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
KarolBedkowski/tbviewer
|
refs/heads/master
|
tbviewer/main.py
|
1
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © Karol Będkowski, 2015-2020
#
# This file is part of tbviewer
# Distributed under terms of the GPLv3 license.
"""Main module."""
import optparse
import logging
import tempfile
import time
import os
from . import version
_LOG = logging.getLogger(__name__)
def _parse_opt():
"""Parse cli options."""
optp = optparse.OptionParser(version=version.NAME + version.VERSION)
group = optparse.OptionGroup(optp, "Debug options")
group.add_option("--debug", "-d", action="store_true", default=False,
help="enable debug messages")
group.add_option("--shell", action="store_true", default=False,
help="start shell")
optp.add_option_group(group)
return optp.parse_args()
def run_viewer():
"""Run viewer application."""
# parse options
options, args = _parse_opt()
# logowanie
from .logging_setup import logging_setup
logdir = tempfile.mkdtemp("_log_" + str(int(time.time())), "tbviewer_")
logging_setup(os.path.join(logdir, "tbviewer.log"), options.debug)
if options.shell:
# starting interactive shell
from IPython.terminal import ipapp
app = ipapp.TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
return
from . import wnd_viewer
fname = args[0] if args and args[0] else None
window = wnd_viewer.WndViewer(fname)
window.mainloop()
def run_calibrate():
"""Run application."""
# parse options
options, args = _parse_opt()
# logowanie
from .logging_setup import logging_setup
logdir = tempfile.mkdtemp("_log_" + str(int(time.time())), "tbviewer_")
logging_setup(os.path.join(logdir, "tbviewer.log"), options.debug)
if options.shell:
# starting interactive shell
from IPython.terminal import ipapp
app = ipapp.TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
return
from . import wnd_calibrate
fname = args[0] if args else None
mapfname = args[1] if args and len(args) > 1 else None
window = wnd_calibrate.WndCalibrate(fname, mapfname)
window.mainloop()
|
flux3dp/fluxghost
|
refs/heads/master
|
fluxghost/__init__.py
|
3
|
__version__ = "1.9.0"
|
rbaindourov/v8-inspector
|
refs/heads/master
|
Source/chrome/tools/gyp/test/many-actions/gyptest-many-actions-unsorted.py
|
54
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure lots of actions in the same target don't cause exceeding command
line length.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('many-actions-unsorted.gyp')
test.build('many-actions-unsorted.gyp', test.ALL)
for i in range(15):
test.built_file_must_exist('generated_%d.h' % i)
# Make sure the optimized cygwin setup doesn't cause problems for incremental
# builds.
test.touch('file1')
test.build('many-actions-unsorted.gyp', test.ALL)
test.touch('file0')
test.build('many-actions-unsorted.gyp', test.ALL)
test.touch('file2')
test.touch('file3')
test.touch('file4')
test.build('many-actions-unsorted.gyp', test.ALL)
test.pass_test()
|
simon-andrews/roboclone
|
refs/heads/master
|
roboclone/main.py
|
1
|
#!/usr/bin/python
import argparse
import os
from .eclipse import write_metadata_files
from .git import clone
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("remote_url", help="url of the remote git repository (https or ssh)")
parser.add_argument("destination", help="directory to clone repository to")
parser.add_argument("--gitargs", metavar="args", type=str, help="extra arguments for git")
parser.add_argument("--force", nargs="?", help="write metadata files even if they already exist in the repo",
default=False, type=bool)
return parser.parse_args()
def main():
args = parse_args()
repo = clone(args.remote_url, args.destination)
write_metadata_files(args.destination, os.path.split(args.destination)[1], args.force)
if __name__ == "__main__":
main()
|
bateman/mood-c2a
|
refs/heads/master
|
moodc2a/converter.py
|
1
|
# author: bateman
# date: nov. 13, 2015
# version: 0.1
import csv
import argparse
import string
class Csv2Aiken:
"""
CSV (input) file must be formatted as follows:
Question;Answer;Index;Correct
What is the correct answer to this question?;Is it this one;A;
;Maybe this answer;B;
;Possibly this one;C;OK
...
Aiken (output) file will look like these:
What is the correct answer to this question?
A. Is it this one
B. Maybe this answer
C. Possibly this one
ANSWER: C
...
"""
_ANSWER = 'ANSWER:'
_INDEX_SEP = '.'
_INDEX_DICT = {'1': 'A', '2': 'B', '3': 'C', '4': 'D'}
def __init__(self):
pass
def convert(self, infile, outfile):
_out = open(outfile, mode='wb')
with open(infile, mode='rU') as _in:
csvreader = csv.DictReader(_in, dialect='excel', delimiter=';')
i = 0
for row in csvreader:
i += 1
_question = '{0}\n'.format(row['Question'])
if _question != '\n':
_out.write(_question)
_out.write('{0}{1} {2}\n'.format(row['Index'], self._INDEX_SEP, row['Answer']))
if string.lower(row['Correct']) == 'ok':
_solution = self._INDEX_DICT[str(i)]
if i == 3:
_out.write('{0} {1}\n\n'.format(self._ANSWER, _solution))
i = 0
_in.close()
_out.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Acquire input CSV and output AIKEN files.')
parser.add_argument('-i', type=str, nargs=1, action='store', dest='_in', help='CSV input file to convert')
parser.add_argument('-o', type=str, nargs=1, action='store', dest='_out', help='AIKEN converted output file')
args = parser.parse_args()
c2a = Csv2Aiken()
c2a.convert(infile=args._in[0], outfile=args._out[0])
|
sparkslabs/kamaelia_
|
refs/heads/master
|
Sketches/JL/AIM_/protneg.py
|
3
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#protocol negotiation
import pickle
import socket
from struct import pack, unpack
from aimUtil import *
fle = open("bos_auth.dat")
BOS_server, port, auth_cookie = pickle.load(fle)
fle.close()
seq = getseqnum()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((BOS_server, port))
### ================== send CLI_COOKIE ========================== ###
protocol_version = 1
protocol_version = chrs(protocol_version, 4)
flap_body = protocol_version
flap_body = appendTLV(0x06, auth_cookie, flap_body)
flap = '*' + channel1 + seq.next() + chrs(len(flap_body), 2) + flap_body
sock.send(flap)
### ============= get supported services from server ============ ###
reply = sock.recv(1000) # this should be the acknowledgement
printHex(reply)
reply = sock.recv(1000) # this should be the "server ready" message
printHex(reply)
fle = open("snac0103.dat", "wb")
pickle.dump(reply, fle)
fle.close()
snac_body = reply[flap_header_len + snac_header_len:]
unit_len = 2
codes_len = len(snac_body)/unit_len
fmt = '>%ih' % codes_len
supported_services_codes = unpack(fmt, snac_body)
snac_body = ""
for code in supported_services_codes:
if code in desired_service_versions:
snac_body += Double(code) + Double(desired_service_versions[code])
snac = makeSnac((0x01, 0x17), snac_body)
flap = makeFlap(0x02, seq.next(), snac)
sock.send(flap)
### ============== extract accepted services ================== ###
reply = sock.recv(1000)
printHex(reply)
fle = open("snac0118.dat", "wb")
pickle.dump(reply, fle)
fle.close()
snac_body = reply[flap_header_len + snac_header_len:]
fmt = '!%ih' % (len(snac_body)/2)
snac_body = unpack(fmt, snac_body)
services = snac_body[::2]
versions = snac_body[1::2]
accepted_services = dict(zip(services, versions))
### ============= request rate limits ========================== ###
RATE_LIMITS_REQUEST = 2
snac = makeSnac((0x01, 0x06), "", reqid = RATE_LIMITS_REQUEST)
flap = makeFlap(0x02, seq.next(), snac)
sock.send(flap)
### ================ extract rate limits ======================== ###
reply = sock.recv(1000)
fle = open("snac0107.dat", "wb")
pickle.dump(reply, fle)
fle.close()
snac_body = reply[flap_header_len + snac_header_len:]
num_rate_classes, = unpack('!h', snac_body[0:2])
snac_body = snac_body[2:]
rate_class_len = 2 + 4*8 + 1
rate_classes = snac_body[:num_rate_classes * rate_class_len]
rate_groups = snac_body[num_rate_classes * rate_class_len:]
rgcopy = rate_groups
rate_groups_hash = {}
LEN_PAIR = 4
while len(rgcopy) > 0:
group_id, npairs = unpack('!hh', rgcopy[0:4])
rawdata = rgcopy[4:4+npairs*LEN_PAIR]
group_data = unpackFour(rawdata)
group_data = zip(group_data[::2], group_data[1::2])
rate_groups_hash[group_id] = group_data
rgcopy = rgcopy[4+group_data_len:]
### =============== send rate limits acknowledged =============== ###
fmt = '!%ih' % len(rate_groups_hash.keys())
snac_body = pack(fmt, *rate_groups_hash.keys())
snac = makeSnac((0x01, 0x08), snac_body)
flap = makeFlap(2, seq.next(), snac)
sock.send(flap)
### set privacy flags
#SNAC (01, 14)
## request personal info
#SNAC (01, 0e)
## service request
#SNAC (01, 04)
### ============== request rights ============================= ###
# we're goint to skip this section right now, because, with the barely
# functioning client this will be, we're not going to use any services
sock.close()
|
ChanduERP/odoo
|
refs/heads/8.0
|
doc/_extensions/html_domain.py
|
254
|
# -*- coding: utf-8 -*-
"""
Defines a "raw HTML" domain with a ``div[classes]`` and a number of roles
rendered more or less directly to HTML.
.. warning::
the purpose of this domain is *not* to document HTML or components
"""
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.body import LineBlock
import sphinx.roles
from sphinx.domains import Domain
def setup(app):
app.add_domain(HtmlDomain)
app.add_node(div, html=(
lambda self, node: self.body.append(self.starttag(node, 'div')),
lambda self, node: self.body.append('</div>\n')))
app.add_node(address, html=(
lambda self, node: self.body.append(self.starttag(node, 'address')),
lambda self, node: self.body.append('</address>\n')
))
app.add_node(cite, html=(visit_cite, depart_cite))
for name, node in [('mark', mark), ('ins', insert), ('del', delete),
('s', strikethrough), ('u', underline), ('small', small),
('kbd', kbd), ('var', var), ('samp', samp)]:
addnode(app, node, name)
class div(nodes.General, nodes.Element): pass
class Div(Directive):
optional_arguments = 1
final_argument_whitespace = 1
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = div(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class address(nodes.General, nodes.Element): pass
class Address(LineBlock):
def run(self):
[node] = super(Address, self).run()
ad = address(node.rawsource, *node.children)
return [ad]
class mark(nodes.Inline, nodes.TextElement): pass
class insert(nodes.Inline, nodes.TextElement): pass
class delete(nodes.Inline, nodes.TextElement): pass
class strikethrough(nodes.Inline, nodes.TextElement): pass
class underline(nodes.Inline, nodes.TextElement): pass
class small(nodes.Inline, nodes.TextElement): pass
class kbd(nodes.Inline, nodes.FixedTextElement): pass
class var(nodes.Inline, nodes.FixedTextElement): pass
class samp(nodes.Inline, nodes.FixedTextElement): pass
def makerole(node):
return lambda name, rawtext, text, lineno, inliner, options=None, content=None:\
([node(rawtext.strip(), text.strip())], [])
def addnode(app, node, nodename):
app.add_node(node, html=(
lambda self, n: self.body.append(self.starttag(n, nodename)),
lambda self, n: self.body.append('</%s>' % nodename)
))
def initialism(*args, **kwargs):
nodes, _ = sphinx.roles.abbr_role(*args, **kwargs)
[abbr] = nodes
abbr.attributes.setdefault('classes', []).append('initialism')
return [abbr], []
def cite_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
text = utils.unescape(text)
m = sphinx.roles._abbr_re.search(text)
if m is None:
return [cite(text, text, **(options or {}))], []
content = text[:m.start()].strip()
source = m.group(1)
return [cite(content, content, source=source)], []
class cite(nodes.Inline, nodes.TextElement): pass
def visit_cite(self, node):
attrs = {}
if node.hasattr('source'):
attrs['title'] = node['source']
self.body.append(self.starttag(node, 'cite', '', **attrs))
def depart_cite(self, node):
self.body.append('</abbr>')
class HtmlDomain(Domain):
name = 'h'
label = 'HTML'
directives = {
'div': Div,
'address': Address,
}
roles = {
'mark': makerole(mark),
'ins': makerole(insert),
'del': makerole(delete),
's': makerole(strikethrough),
'u': makerole(underline),
'small': makerole(small),
'initialism': initialism,
'cite': cite_role,
'kbd': makerole(kbd),
'var': makerole(var),
'samp': makerole(samp),
}
|
kbrebanov/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/csvfile.py
|
22
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
options:
col:
description: column to return (0 index).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
description: field separator in the file, for a tab you can specify "TAB" or "t".
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimeted) not CSV (comma delimted) ... yes the name is misleading.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
"""
import codecs
import csv
from collections import MutableSequence
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_native, to_text
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
f = CSVRecoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [to_text(s) for s in row]
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'r')
creader = CSVReader(f, delimiter=to_bytes(delimiter), encoding=encoding)
for row in creader:
if row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col': "1", # column to return
'default': None,
'delimiter': "TAB",
'file': 'ansible.csv',
'encoding': 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
|
saturngod/pyWebTest-gitbook
|
refs/heads/master
|
book/js/Lib/itertools.py
|
630
|
import operator
class accumulate:
def __init__(self, iterable, func = operator.add):
self.it = iter(iterable)
self._total = None
self.func = func
def __iter__(self):
return self
def __next__(self):
if not self._total:
self._total = next(self.it)
return self._total
else:
element = next(self.it)
try:
self._total = self.func(self._total, element)
except:
raise TypeError("unsupported operand type")
return self._total
## Adapted from:
## https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-34
class chain:
def __init__(self, *iterables):
self._iterables_iter = iter(map(iter, iterables))
# little trick for the first chain.__next__() call
self._cur_iterable_iter = iter([])
def __iter__(self):
return self
def __next__(self):
while True:
try:
return next(self._cur_iterable_iter)
except StopIteration:
self._cur_iterable_iter = next(self._iterables_iter)
@classmethod
def from_iterable(cls, iterable):
for it in iterable:
for element in it:
yield element
class combinations:
def __init__(self, iterable, r):
self.pool = tuple(iterable)
self.n = len(self.pool)
self.r = r
self.indices = list(range(self.r))
self.zero = False
def __iter__(self):
return self
def __next__(self):
if self.r > self.n:
raise StopIteration
if not self.zero:
self.zero = True
return tuple(self.pool[i] for i in self.indices)
else:
try:
for i in reversed(range(self.r)):
if self.indices[i] != i + self.n - self.r:
break
self.indices[i] += 1
for j in range(i+1, self.r):
self.indices[j] = self.indices[j-1] + 1
return tuple(self.pool[i] for i in self.indices)
except:
raise StopIteration
class combinations_with_replacement:
def __init__(self, iterable, r):
self.pool = tuple(iterable)
self.n = len(self.pool)
self.r = r
self.indices = [0] * self.r
self.zero = False
def __iter__(self):
return self
def __next__(self):
if not self.n and self.r:
raise StopIteration
if not self.zero:
self.zero = True
return tuple(self.pool[i] for i in self.indices)
else:
try:
for i in reversed(range(self.r)):
if self.indices[i] != self.n - 1:
break
self.indices[i:] = [self.indices[i] + 1] * (self.r - i)
return tuple(self.pool[i] for i in self.indices)
except:
raise StopIteration
## Literally copied from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-63
class compress:
def __init__(self, data, selectors):
self.data = iter(data)
self.selectors = iter(selectors)
def __iter__(self):
return self
def __next__(self):
while True:
next_item = next(self.data)
next_selector = next(self.selectors)
if bool(next_selector):
return next_item
## Adapted from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-79
## I mimicked the > python3.1 behavior
class count:
"""
Input is an int or a float. The original Python 3 implementation
includes also complex numbers... but it still is not implemented
in Brython as complex type is NotImplemented
"""
def __init__(self, start = 0, step = 1):
if not isinstance(start, (int, float)):
raise TypeError('a number is required')
self.times = start - step
self.step = step
def __iter__(self):
return self
def __next__(self):
self.times += self.step
return self.times
def __repr__(self):
return 'count(%d)' % (self.times + self.step)
## Literally copied from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-112
class cycle:
def __init__(self, iterable):
self._cur_iter = iter(iterable)
self._saved = []
self._must_save = True
def __iter__(self):
return self
def __next__(self):
try:
next_elt = next(self._cur_iter)
if self._must_save:
self._saved.append(next_elt)
except StopIteration:
self._cur_iter = iter(self._saved)
next_elt = next(self._cur_iter)
self._must_save = False
return next_elt
## Literally copied from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-149
class dropwhile:
def __init__(self, predicate, iterable):
self._predicate = predicate
self._iter = iter(iterable)
self._dropped = False
def __iter__(self):
return self
def __next__(self):
value = next(self._iter)
if self._dropped:
return value
while self._predicate(value):
value = next(self._iter)
self._dropped = True
return value
## Adapted from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-261
class filterfalse:
def __init__(self, predicate, iterable):
# Make sure iterable *IS* iterable
self._iter = iter(iterable)
if predicate is None:
self._predicate = bool
else:
self._predicate = predicate
def __iter__(self):
return self
def __next__(self):
next_elt = next(self._iter)
while True:
if not self._predicate(next_elt):
return next_elt
next_elt = next(self._iter)
class groupby:
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
## adapted from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-323
class islice:
def __init__(self, iterable, *args):
s = slice(*args)
self.start, self.stop, self.step = s.start or 0, s.stop, s.step
if not isinstance(self.start, int):
raise ValueError("Start argument must be an integer")
if self.stop != None and not isinstance(self.stop, int):
raise ValueError("Stop argument must be an integer or None")
if self.step is None:
self.step = 1
if self.start<0 or (self.stop != None and self.stop<0
) or self.step<=0:
raise ValueError("indices for islice() must be positive")
self.it = iter(iterable)
self.donext = None
self.cnt = 0
def __iter__(self):
return self
def __next__(self):
nextindex = self.start
if self.stop != None and nextindex >= self.stop:
raise StopIteration
while self.cnt <= nextindex:
nextitem = next(self.it)
self.cnt += 1
self.start += self.step
return nextitem
class permutations:
def __init__(self, iterable, r = None):
self.pool = tuple(iterable)
self.n = len(self.pool)
self.r = self.n if r is None else r
self.indices = list(range(self.n))
self.cycles = list(range(self.n, self.n - self.r, -1))
self.zero = False
self.stop = False
def __iter__(self):
return self
def __next__(self):
indices = self.indices
if self.r > self.n:
raise StopIteration
if not self.zero:
self.zero = True
return tuple(self.pool[i] for i in indices[:self.r])
i = self.r - 1
while i >= 0:
j = self.cycles[i] - 1
if j > 0:
self.cycles[i] = j
indices[i], indices[-j] = indices[-j], indices[i]
return tuple(self.pool[i] for i in indices[:self.r])
self.cycles[i] = len(indices) - i
n1 = len(indices) - 1
assert n1 >= 0
num = indices[i]
for k in range(i, n1):
indices[k] = indices[k+1]
indices[n1] = num
i -= 1
raise StopIteration
# copied from Python documentation on itertools.product
def product(*args, repeat=1):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = [tuple(pool) for pool in args] * repeat
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
## adapted from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-392
## (Brython)
## renamed to _product : the implementation fails for product('abc', [])
## with CPython 3.x
class _product:
def __init__(self, *args, **kw):
if len(kw) > 1:
raise TypeError("product() takes at most 1 argument (%d given)" %
len(kw))
self.repeat = kw.get('repeat', 1)
if not isinstance(self.repeat, int):
raise TypeError("integer argument expected, got %s" %
type(self.repeat))
self.gears = [x for x in args] * self.repeat
self.num_gears = len(self.gears)
# initialization of indicies to loop over
self.indicies = [(0, len(self.gears[x]))
for x in range(0, self.num_gears)]
self.cont = True
self.zero = False
def roll_gears(self):
# Starting from the end of the gear indicies work to the front
# incrementing the gear until the limit is reached. When the limit
# is reached carry operation to the next gear
should_carry = True
for n in range(0, self.num_gears):
nth_gear = self.num_gears - n - 1
if should_carry:
count, lim = self.indicies[nth_gear]
count += 1
if count == lim and nth_gear == 0:
self.cont = False
if count == lim:
should_carry = True
count = 0
else:
should_carry = False
self.indicies[nth_gear] = (count, lim)
else:
break
def __iter__(self):
return self
def __next__(self):
if self.zero:
raise StopIteration
if self.repeat > 0:
if not self.cont:
raise StopIteration
l = []
for x in range(0, self.num_gears):
index, limit = self.indicies[x]
print('itertools 353',self.gears,x,index)
l.append(self.gears[x][index])
self.roll_gears()
return tuple(l)
elif self.repeat == 0:
self.zero = True
return ()
else:
raise ValueError("repeat argument cannot be negative")
## Literally copied from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-441
class repeat:
def __init__(self, obj, times=None):
self._obj = obj
if times is not None:
range(times) # Raise a TypeError
if times < 0:
times = 0
self._times = times
def __iter__(self):
return self
def __next__(self):
# __next__() *need* to decrement self._times when consumed
if self._times is not None:
if self._times <= 0:
raise StopIteration()
self._times -= 1
return self._obj
def __repr__(self):
if self._times is not None:
return 'repeat(%r, %r)' % (self._obj, self._times)
else:
return 'repeat(%r)' % (self._obj,)
def __len__(self):
if self._times == -1 or self._times is None:
raise TypeError("len() of uniszed object")
return self._times
## Adapted from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-489
class starmap(object):
def __init__(self, function, iterable):
self._func = function
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
t = next(self._iter)
return self._func(*t)
## Literally copied from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-520
class takewhile(object):
def __init__(self, predicate, iterable):
self._predicate = predicate
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
value = next(self._iter)
if not self._predicate(value):
raise StopIteration()
return value
## Almost literal from
##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-547
class TeeData(object):
def __init__(self, iterator):
self.data = []
self._iter = iterator
def __getitem__(self, i):
# iterates until 'i' if not done yet
while i>= len(self.data):
self.data.append(next(self._iter))
return self.data[i]
class TeeObject(object):
def __init__(self, iterable=None, tee_data=None):
if tee_data:
self.tee_data = tee_data
self.pos = 0
# <=> Copy constructor
elif isinstance(iterable, TeeObject):
self.tee_data = iterable.tee_data
self.pos = iterable.pos
else:
self.tee_data = TeeData(iter(iterable))
self.pos = 0
def __next__(self):
data = self.tee_data[self.pos]
self.pos += 1
return data
def __iter__(self):
return self
def tee(iterable, n=2):
if isinstance(iterable, TeeObject):
return tuple([iterable] +
[TeeObject(tee_data=iterable.tee_data) for i in range(n - 1)])
tee_data = TeeData(iter(iterable))
return tuple([TeeObject(tee_data=tee_data) for i in range(n)])
class zip_longest:
def __init__(self, *args, fillvalue = None):
self.args = args
self.fillvalue = fillvalue
self.max_length = max([len(arg) for arg in self.args])
self.units = len(args)
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter == self.max_length:
raise StopIteration
else:
temp = []
for i in range(self.units):
try:
temp.append(self.args[i][self.counter])
except:
temp.append(self.fillvalue)
self.counter = self.counter + 1
return tuple(temp)
|
nop33/indico
|
refs/heads/master
|
bin/utils/apiProxy.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import hashlib
import hmac
import optparse
import requests
import sys
import time
import urllib
from contextlib import closing
from flask import Flask, request, Response, abort
from werkzeug.datastructures import MultiDict
app = Flask(__name__)
def build_indico_request(path, params, api_key=None, secret_key=None, only_public=False):
items = params.items() if hasattr(params, 'items') else list(params)
if api_key:
items.append(('apikey', api_key))
if only_public:
items.append(('onlypublic', 'yes'))
if secret_key:
items.append(('timestamp', str(int(time.time()))))
items = sorted(items, key=lambda x: x[0].lower())
url = '%s?%s' % (path, urllib.urlencode(items))
signature = hmac.new(secret_key, url, hashlib.sha1).hexdigest()
items.append(('signature', signature))
return items
def indico_request(path):
request_values = MultiDict(request.values)
method = request_values.pop('_method', request.method).upper()
params = request_values.items(multi=True)
data = build_indico_request(path, params, app.config['INDICO_API_KEY'], app.config['INDICO_SECRET_KEY'])
request_args = {'params': data} if method == 'GET' else {'data': data}
try:
response = requests.request(method, app.config['INDICO_URL'] + path, verify=False, **request_args)
except requests.HTTPError as e:
response = e.response
except requests.ConnectionError as e:
return 'text/plain', str(e)
content_type = response.headers.get('Content-Type', '').split(';')[0]
with closing(response):
return content_type, response.text
@app.route('/')
def index():
return 'Please add an Indico HTTP API request to the path.'
@app.route('/<path:path>', methods=('GET', 'POST'))
def indicoproxy(path):
if app.config['ALLOWED_IPS'] and request.remote_addr not in app.config['ALLOWED_IPS']:
abort(403)
content_type, resp = indico_request('/' + path)
if not content_type:
print 'WARNING: Did not receive a content type, falling back to text/plain'
content_type = 'text/plain'
return Response(resp, mimetype=content_type)
def main():
parser = optparse.OptionParser()
parser.add_option('-H', '--host', dest='host', default='127.0.0.1',
help='Host to listen on')
parser.add_option('-p', '--port', type='int', dest='port', default=10081,
help='Port to listen on')
parser.add_option('-d', '--debug', action='store_true', dest='debug', help='Debug mode')
parser.add_option('--force-evalex', action='store_true', dest='evalex',
help='Enable evalex (remote code execution) even when listening on a host' +
' that is not localhost - use with care!')
parser.add_option('-I', '--allow-ip', dest='allowed_ips', action='append',
help='Only allow the given IP to access the script. Can be used multiple times.')
parser.add_option('-i', '--indico', dest='indico_url', default='https://indico.cern.ch',
help='The base URL of your indico installation')
parser.add_option('-a', '--apikey', dest='api_key', help='The API key to use')
parser.add_option('-s', '--secretkey', dest='secret_key', help='The secret key to use')
options, args = parser.parse_args()
use_evalex = options.debug
if options.host not in ('::1', '127.0.0.1'):
if not options.allowed_ips:
print 'Listening on a non-loopback interface is not permitted without IP restriction!'
sys.exit(1)
if use_evalex:
if options.evalex:
print 'Binding to non-loopback host with evalex enabled.'
print 'This means anyone with access to this app is able to execute arbitrary' \
' python code!'
else:
print 'Binding to non-loopback host; disabling evalex (aka remote code execution).'
use_evalex = False
app.config['ALLOWED_IPS'] = options.allowed_ips
app.config['INDICO_URL'] = options.indico_url.rstrip('/')
app.config['INDICO_API_KEY'] = options.api_key
app.config['INDICO_SECRET_KEY'] = options.secret_key
print ' * Using indico at {}'.format(app.config['INDICO_URL'])
print ' * To use this script, simply append a valid Indico HTTP API request to the URL shown' \
' below. It MUST NOT contain an API key, secret key or timestamp!'
app.debug = options.debug
app.run(host=options.host, port=options.port, use_evalex=use_evalex)
if __name__ == '__main__':
main()
|
EmmanuelJohnson/ssquiz
|
refs/heads/master
|
flask/lib/python2.7/encodings/shift_jis.py
|
816
|
#
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
noironetworks/group-based-policy
|
refs/heads/master
|
gbpservice/contrib/nfp/configurator/lib/generic_config_constants.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'generic_config'
EVENT_CONFIGURE_INTERFACES = 'CONFIGURE_INTERFACES'
EVENT_CLEAR_INTERFACES = 'CLEAR_INTERFACES'
EVENT_CONFIGURE_ROUTES = 'CONFIGURE_ROUTES'
EVENT_CLEAR_ROUTES = 'CLEAR_ROUTES'
EVENT_CONFIGURE_HEALTHMONITOR = 'CONFIGURE_HEALTHMONITOR'
EVENT_CLEAR_HEALTHMONITOR = 'CLEAR_HEALTHMONITOR'
# REVISIT: Need to make this configurable
MAX_FAIL_COUNT = 5
INITIAL = 'initial'
FOREVER = 'forever'
DEVICE_TO_BECOME_DOWN = 'DEVICE_TO_BECOME_DOWN'
DEVICE_TO_BECOME_UP = 'DEVICE_TO_BECOME_UP'
PERIODIC_HM = 'periodic_healthmonitor'
DEVICE_NOT_REACHABLE = 'PERIODIC_HM_DEVICE_NOT_REACHABLE'
DEVICE_REACHABLE = 'PERIODIC_HM_DEVICE_REACHABLE'
# POLLING EVENTS SPACING AND MAXRETRIES
EVENT_CONFIGURE_HEALTHMONITOR_SPACING = 10 # unit in sec.
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 100
|
Frenzie/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/walla.py
|
123
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
xpath_text,
int_or_none,
)
class WallaIE(InfoExtractor):
_VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
_TEST = {
'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
'info_dict': {
'id': '2642630',
'display_id': 'one-direction-all-for-one',
'ext': 'flv',
'title': 'וואן דיירקשן: ההיסטריה',
'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
}
}
_SUBTITLE_LANGS = {
'עברית': 'heb',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
video = self._download_xml(
'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
display_id)
item = video.find('./items/item')
title = xpath_text(item, './title', 'title')
description = xpath_text(item, './synopsis', 'description')
thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
duration = int_or_none(xpath_text(item, './duration', 'duration'))
subtitles = {}
for subtitle in item.findall('./subtitles/subtitle'):
lang = xpath_text(subtitle, './title')
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{
'ext': 'srt',
'url': xpath_text(subtitle, './src'),
}]
formats = []
for quality in item.findall('./qualities/quality'):
format_id = xpath_text(quality, './title')
fmt = {
'url': 'rtmp://wafla.walla.co.il/vod',
'play_path': xpath_text(quality, './src'),
'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
'page_url': url,
'ext': 'flv',
'format_id': xpath_text(quality, './title'),
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
onestarshang/flask_super_config
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py
|
928
|
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
|
hevel/goflow
|
refs/heads/master
|
docs/source/modules/gen_modules.py
|
10
|
import sys, os
from Cheetah.Template import Template
template ='''
.. _${mod.name}:
:mod:`${mod.name}` -- ${mod.synopsis}
================================================================================
.. module:: ${mod.name}
:synopsis: ${mod.synopsis}
'''
template_ ='''
.. _${mod.name}:
:mod:`${mod.name}` -- ${mod.synopsis}
================================================================================
.. automodule:: ${mod.name}
:members:
:undoc-members:
:inherited-members:
'''
lst = [
("goflow.rst","primary module containing other goflow submodules."),
("goflow.graphics.rst","early goflow graphics module"),
("goflow.graphics.models.rst","datamodels for graphics processing"),
("goflow.graphics.views.rst","goflow graphics views"),
("goflow.graphics.urls_admin.rst","goflow graphics custom admin interface"),
("goflow.graphics2.rst","latest goflow graphics module"),
("goflow.graphics2.models.rst","datamodels for graphics2 processing"),
("goflow.graphics2.views.rst","view functions for graphics2 module"),
("goflow.instances.rst","goflow runtime"),
("goflow.instances.api.rst","goflow runtime api"),
("goflow.instances.forms.rst","goflow runtime forms"),
("goflow.instances.models.rst","goflow runtime models"),
("goflow.instances.views.rst","goflow runtime views"),
("goflow.instances.urls.rst","goflow runtime urls"),
("goflow.instances.urls_admin.rst","goflow runtime custom admin interface"),
("goflow.workflow.rst","goflow core workflow functionality"),
("goflow.workflow.api.rst","key functions for workflow management"),
("goflow.workflow.applications.rst","key application function for workflow mgmt"),
("goflow.workflow.decorators.rst","goflow decorator library"),
("goflow.workflow.forms.rst","goflow form utility functions"),
("goflow.workflow.logger.rst","logging capability"),
("goflow.workflow.models.rst","workflow models"),
("goflow.workflow.notification.rst","workflow notification library"),
("goflow.workflow.pushapps.rst","example goflow pushapps"),
("goflow.workflow.views.rst","views for goflow worklow module"),
]
def main():
results=[]
for fname, synopsis in lst:
mod = dict(name=fname[:-4], file=fname, synopsis=synopsis)
out = file(fname, 'w')
out.write(str(Template(template, searchList=[dict(mod=mod)])))
out.close()
if __name__ == '__main__':
main()
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/distinct_on_fields/tests.py
|
117
|
from __future__ import absolute_import
from django.db.models import Max
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from .models import Tag, Celebrity, Fan, Staff, StaffTag
class DistinctOnTests(TestCase):
def setUp(self):
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2', parent=t1)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
p1_o1.coworkers.add(p2_o1, p3_o1)
StaffTag.objects.create(staff=p1_o1, tag=t1)
StaffTag.objects.create(staff=p1_o1, tag=t1)
celeb1 = Celebrity.objects.create(name="c1")
celeb2 = Celebrity.objects.create(name="c2")
self.fan1 = Fan.objects.create(fan_of=celeb1)
self.fan2 = Fan.objects.create(fan_of=celeb1)
self.fan3 = Fan.objects.create(fan_of=celeb2)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_basic_distinct_on(self):
"""QuerySet.distinct('field', ...) works"""
# (qset, expected) tuples
qsets = (
(
Staff.objects.distinct().order_by('name'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('name').order_by('name'),
['<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('organisation').order_by('organisation', 'name'),
['<Staff: p1>', '<Staff: p1>'],
),
(
Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).\
distinct('name').order_by('name'),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
# Does combining querysets work?
(
(Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).\
distinct('name').order_by('name')
|Celebrity.objects.filter(fan__in=[self.fan3]).\
distinct('name').order_by('name')),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
(
StaffTag.objects.distinct('staff','tag'),
['<StaffTag: t1 -> p1>'],
),
(
Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
['<Tag: t2>', '<Tag: t4>', '<Tag: t1>'],
),
(
StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
['<StaffTag: t1 -> p1>'],
),
# Fetch the alphabetically first coworker for each worker
(
(Staff.objects.distinct('id').order_by('id', 'coworkers__name').
values_list('id', 'coworkers__name')),
[str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
str_prefix("(3, %(_)s'p1')"), "(4, None)"]
),
)
for qset, expected in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
# Combining queries with different distinct_fields is not allowed.
base_qs = Celebrity.objects.all()
self.assertRaisesMessage(
AssertionError,
"Cannot combine queries with different distinct fields.",
lambda: (base_qs.distinct('id') & base_qs.distinct('name'))
)
# Test join unreffing
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_not_implemented_checks(self):
# distinct + annotate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.annotate(Max('id')).distinct('id')[0]
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').annotate(Max('id'))[0]
# However this check is done only when the query executes, so you
# can use distinct() to remove the fields before execution.
Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
# distinct + aggregate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').aggregate(Max('id'))
|
minixalpha/SourceLearning
|
refs/heads/master
|
wsgiref-0.1.2/wsgi_demo/server.py
|
1
|
#!/usr/bin/env python
#coding: utf-8
# Demo of WSGI Server
def run(application):
# environ must be a Python dict object
environ = {}
cur_response_headers = []
# set environ
def write(data):
pass
def _response_headers_legal(response_headers):
return True
def start_response(status, response_headers, exc_info=None):
if _response_headers_legal(response_headers):
pass
cur_response_headers = response_headers
return write
try:
result = application(environ, start_response)
finally:
if hasattr(result, 'close'):
result.close()
if hasattr(result, '__len__'):
# result must be accumulated
pass
for data in result:
write(data)
|
flyapen/UgFlu
|
refs/heads/master
|
flumotion/test/test_component_feeder.py
|
4
|
# -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import time
from flumotion.common import testsuite
from twisted.internet import defer, reactor
from flumotion.component import feeder
class TestFeeder(testsuite.TestCase):
def setUp(self):
self.feeder = feeder.Feeder('video:default')
def test_clientConnected(self):
clientId = '/default/muxer-video'
client = self.feeder.clientConnected(clientId, 3, lambda _: None)
clients = self.feeder.getClients()
self.failUnless(client in clients)
self.assertEquals(client.uiState.get('client-id'), clientId)
def testReconnect(self):
clientId = '/default/muxer-video'
# This needed because disconnecting clients only updates the stats in
# a callFromThread (which is roughly the same as a callLater).
d = defer.Deferred()
def checkClientDisconnected():
self.clientAssertStats(c, 0, 0, 10, 1, 1)
# connect again
self.feeder.clientConnected(clientId, 3, lambda _: None)
self.clientAssertStats(c, 0, 0, 10, 1, 2)
# read 20 bytes, drop 2 buffers
c.setStats((20, None, None, None, time.time(), 2))
self.clientAssertStats(c, 20, 2, 30, 3, 2)
d.callback(None)
# connect
c = self.feeder.clientConnected(clientId, 3, lambda _: None)
# verify some stuff
self.clientAssertStats(c, 0, None, 0, None, 1)
# read 10 bytes, drop 1 buffer
c.setStats((10, None, None, None, time.time(), 1))
self.clientAssertStats(c, 10, 1, 10, 1, 1)
# remove client
self.feeder.clientDisconnected(3)
reactor.callLater(0, checkClientDisconnected)
return d
def clientAssertEquals(self, client, key, value):
self.assertEquals(client.uiState.get(key), value)
def clientAssertStats(self, client, brc, bdc, brt, bdt, reconnects):
self.clientAssertEquals(client, 'bytes-read-current', brc)
self.clientAssertEquals(client, 'buffers-dropped-current', bdc)
self.clientAssertEquals(client, 'bytes-read-total', brt)
self.clientAssertEquals(client, 'buffers-dropped-total', bdt)
self.clientAssertEquals(client, 'reconnects', reconnects)
|
bgnori/gnubg
|
refs/heads/master
|
scripts/db_import.py
|
1
|
#
# db_import.py -- batch import of multiple sgf files into relational database
#
# by Jon Kinsey <Jon_Kinsey@hotmail.com>, 2004
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 3 or later of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: db_import.py,v 1.2 2007/07/02 12:50:13 ace Exp $
"""
db_import.py -- batch import of multiple sgf files into relational database
by Jon Kinsey <Jon_Kinsey@hotmail.com>, 2004
\n"""
import os
def GetFiles(dir):
"Look for gnubg import files in dir"
try:
files = os.listdir(dir)
except:
print " ** Directory not found **"
return 0
fileList = []
foundAnyFile = False
foundBGFile = False
# Check each file in dir
for file in files:
# Check it's a file (not a directory)
if os.path.isfile(dir + file):
foundAnyFile = True
# Check has supported extension
dot = file.rfind('.')
if dot != -1:
ext = file[dot + 1 : ].lower()
if ext == "sgf":
foundBGFile = True
fileList.append(file)
if foundBGFile:
return fileList
else:
if not foundAnyFile:
print " ** No files in directory **"
else:
print " ** No sgf files found in directory **"
return 0
def ImportFile(prompt, file, dir):
"Run commands to import stats into gnubg"
print prompt + " Importing " + file
gnubg.command('load match "' + dir + file + '"')
gnubg.command('relational add match')
def GetYN(prompt):
confirm = '';
while len(confirm) == 0 or (confirm[0] != 'y' and confirm[0] != 'n'):
confirm = raw_input(prompt + " (y/n): ").lower()
return confirm
def GetDir(prompt):
dir = raw_input(prompt)
if dir:
# Make sure dir ends in a slash
if (dir[-1] != '\\' and dir[-1] != '/'):
dir = dir + '/'
return dir
def BatchImport():
"Import stats for all sgf files in a directory"
inFiles = []
while not inFiles:
# Get directory with original files in
dir = GetDir("Directory containing files to import (enter-exit): ")
if not dir:
return
# Look for some files
inFiles = GetFiles(dir)
# Display files that will be analyzed
for file in inFiles:
print " " + file
print "\n", len(inFiles), "files found\n"
# Check user wants to continue
if GetYN("Continue") == 'n':
return
# Get stats for each file
num = 0
for file in inFiles:
num = num + 1
prompt = "(%d/%d)" % (num, len(inFiles))
ImportFile(prompt, file, dir)
print "\n** Finished **"
return
# Run batchimport on load
try:
print __doc__
BatchImport()
except Exception, (e):
print "Error:", e
|
lightos/LaZagne
|
refs/heads/master
|
Windows/src/LaZagne/config/dico.py
|
22
|
def get_dico():
return [
"password",
"123456",
"12345678",
"1234",
"qwerty",
"12345",
"dragon",
"pussy",
"baseball",
"football",
"letmein",
"monkey",
"696969",
"abc123",
"mustang",
"michael",
"shadow",
"master",
"jennifer",
"111111",
"2000",
"jordan",
"superman",
"harley",
"1234567",
"fuckme",
"hunter",
"fuckyou",
"trustno1",
"ranger",
"buster",
"thomas",
"tigger",
"robert",
"soccer",
"fuck",
"batman",
"test",
"pass",
"killer",
"hockey",
"george",
"charlie",
"andrew",
"michelle",
"love",
"sunshine",
"jessica",
"asshole",
"6969",
"pepper",
"daniel",
"access",
"123456789",
"654321",
"joshua",
"maggie",
"starwars",
"silver",
"william",
"dallas",
"yankees",
"123123",
"ashley",
"666666",
"hello",
"amanda",
"orange",
"biteme",
"freedom",
"computer",
"sexy",
"thunder",
"nicole",
"ginger",
"heather",
"hammer",
"summer",
"corvette",
"taylor",
"fucker",
"austin",
"1111",
"merlin",
"matthew",
"121212",
"golfer",
"cheese",
"princess",
"martin",
"chelsea",
"patrick",
"richard",
"diamond",
"yellow",
"bigdog",
"secret",
"asdfgh",
"sparky",
"cowboy",
"camaro",
"anthony",
"matrix",
"falcon",
"iloveyou",
"bailey",
"guitar",
"jackson",
"purple",
"scooter",
"phoenix",
"aaaaaa",
"morgan",
"tigers",
"porsche",
"mickey",
"maverick",
"cookie",
"nascar",
"peanut",
"justin",
"131313",
"money",
"horny",
"samantha",
"panties",
"steelers",
"joseph",
"snoopy",
"boomer",
"whatever",
"iceman",
"smokey",
"gateway",
"dakota",
"cowboys",
"eagles",
"chicken",
"dick",
"black",
"zxcvbn",
"please",
"andrea",
"ferrari",
"knight",
"hardcore",
"melissa",
"compaq",
"coffee",
"booboo",
"bitch",
"johnny",
"bulldog",
"xxxxxx",
"welcome",
"james",
"player",
"ncc1701",
"wizard",
"scooby",
"charles",
"junior",
"internet",
"bigdick",
"mike",
"brandy",
"tennis",
"blowjob",
"banana",
"monster",
"spider",
"lakers",
"miller",
"rabbit",
"enter",
"mercedes",
"brandon",
"steven",
"fender",
"john",
"yamaha",
"diablo",
"chris",
"boston",
"tiger",
"marine",
"chicago",
"rangers",
"gandalf",
"winter",
"bigtits",
"barney",
"edward",
"raiders",
"porn",
"badboy",
"blowme",
"spanky",
"bigdaddy",
"johnson",
"chester",
"london",
"midnight",
"blue",
"fishing",
"000000",
"hannah",
"slayer",
"11111111",
"rachel",
"sexsex",
"redsox",
"thx1138",
"asdf",
"marlboro",
"panther",
"zxcvbnm",
"arsenal",
"oliver",
"qazwsx",
"mother",
"victoria",
"7777777",
"jasper",
"angel",
"david",
"winner",
"crystal",
"golden",
"butthead",
"viking",
"jack",
"iwantu",
"shannon",
"murphy",
"angels",
"prince",
"cameron",
"girls",
"madison",
"wilson",
"carlos",
"hooters",
"willie",
"startrek",
"captain",
"maddog",
"jasmine",
"butter",
"booger",
"angela",
"golf",
"lauren",
"rocket",
"tiffany",
"theman",
"dennis",
"liverpoo",
"flower",
"forever",
"green",
"jackie",
"muffin",
"turtle",
"sophie",
"danielle",
"redskins",
"toyota",
"jason",
"sierra",
"winston",
"debbie",
"giants",
"packers",
"newyork",
"jeremy",
"casper",
"bubba",
"112233",
"sandra",
"lovers",
"mountain",
"united",
"cooper",
"driver",
"tucker",
"helpme",
"fucking",
"pookie",
"lucky",
"maxwell",
"8675309",
"bear",
"suckit",
"gators",
"5150",
"222222",
"shithead",
"fuckoff",
"jaguar",
"monica",
"fred",
"happy",
"hotdog",
"tits",
"gemini",
"lover",
"xxxxxxxx",
"777777",
"canada",
"nathan",
"victor",
"florida",
"88888888",
"nicholas",
"rosebud",
"metallic",
"doctor",
"trouble",
"success",
"stupid",
"tomcat",
"warrior",
"peaches",
"apples",
"fish",
"qwertyui",
"magic",
"buddy",
"dolphins",
"rainbow",
"gunner",
"987654",
"freddy",
"alexis",
"braves",
"cock",
"2112",
"1212",
"cocacola",
"xavier",
"dolphin",
"testing",
"bond007",
"member",
"calvin",
"voodoo",
"7777",
"samson",
"alex",
"apollo",
"fire",
"tester",
"walter",
"beavis",
"voyager",
"peter",
"porno",
"bonnie",
"rush2112",
"beer",
"apple",
"scorpio",
"jonathan",
"skippy",
"sydney",
"scott",
"red123",
"power",
"gordon",
"travis",
"beaver",
"star",
"jackass",
"flyers",
"boobs",
"232323",
"zzzzzz",
"steve",
"rebecca",
"scorpion",
"doggie",
"legend",
"ou812",
"yankee",
"blazer",
"bill",
"runner",
"birdie",
"bitches",
"555555",
"parker",
"topgun",
"asdfasdf",
"heaven",
"viper",
"animal",
"2222",
"bigboy",
"4444",
"arthur",
"baby",
"private",
"godzilla",
"donald",
"williams",
"lifehack",
"phantom",
"dave",
"rock",
"august",
"sammy",
"cool",
"brian",
"platinum",
"jake",
"bronco",
"paul",
"mark",
"frank",
"heka6w2",
"copper",
"billy",
"cumshot",
"garfield",
"willow",
"cunt",
"little",
"carter",
"slut",
"albert",
"69696969",
"kitten",
"super",
"jordan23",
"eagle1",
"shelby",
"america",
"11111",
"jessie",
"house",
"free",
"123321",
"chevy",
"bullshit",
"white",
"broncos",
"horney",
"surfer",
"nissan",
"999999",
"saturn",
"airborne",
"elephant",
"marvin",
"shit",
"action",
"adidas",
"qwert",
"kevin",
"1313",
"explorer",
"walker",
"police",
"christin",
"december",
"benjamin",
"wolf",
"sweet",
"therock",
"king",
"online",
"dickhead",
"brooklyn",
"teresa",
"cricket",
"sharon",
"dexter",
"racing",
"penis",
"gregory",
"0000",
"teens",
"redwings",
"dreams",
"michigan",
"hentai",
"magnum",
"87654321",
"nothing",
"donkey",
"trinity",
"digital",
"333333",
"stella",
"cartman",
"guinness",
"123abc",
"speedy",
"buffalo",
"kitty"]
|
zencodex/csip
|
refs/heads/master
|
CSipSimple/jni/pjsip/sources/tests/pjsua/scripts-sendto/361_non_sip_uri.py
|
59
|
# $Id: 361_non_sip_uri.py 2392 2008-12-22 18:54:58Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# No SIP URI in Contact header
#
complete_msg = \
"""INVITE sip:localhost SIP/2.0
Via: SIP/2.0/UDP 192.168.0.14:5060;rport;branch=z9hG4bKPj9db9
Max-Forwards: 70
From: <sip:192.168.0.14>;tag=08cd5bfc2d8a4fddb1f5e59c6961d298
To: <sip:localhost>
Call-ID: 3373d9eb32aa458db7e69c7ea51e0bd7
CSeq: 0 INVITE
Contact: mailto:dontspam@pjsip.org
Contact: <mailto:dontspam@pjsip.org>
Contact: http://www.pjsip.org/the%20path.cgi?pname=pvalue
User-Agent: PJSUA v0.9.0-trunk/win32
Content-Length: 0
"""
sendto_cfg = sip.SendtoCfg( "No SIP URI in Contact",
"--null-audio --auto-answer 200",
"", 500, complete_msg=complete_msg)
|
hcsturix74/django
|
refs/heads/master
|
tests/messages_tests/test_cookie.py
|
299
|
import json
from django.contrib.messages import constants
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.cookie import (
CookieStorage, MessageDecoder, MessageEncoder,
)
from django.test import SimpleTestCase, override_settings
from django.utils.safestring import SafeData, mark_safe
from .base import BaseTests
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Sets ``request.COOKIES`` with the encoded data and removes the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Returns an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
if data[-1] == CookieStorage.not_finished:
data.pop()
return len(data)
@override_settings(SESSION_COOKIE_DOMAIN='.example.com', SESSION_COOKIE_SECURE=True, SESSION_COOKIE_HTTPONLY=True)
class CookieTest(BaseTests, SimpleTestCase):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_cookie_setings(self):
"""
Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN, SESSION_COOKIE_SECURE and SESSION_COOKIE_HTTPONLY
Refs #15618 and #20972.
"""
# Test before the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
storage.update(response)
self.assertIn('test', response.cookies['messages'].value)
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], '')
self.assertEqual(response.cookies['messages']['secure'], True)
self.assertEqual(response.cookies['messages']['httponly'], True)
# Test deletion of the cookie (storing with an empty value) after the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
for m in storage:
pass # Iterate through the storage to simulate consumption of messages.
storage.update(response)
self.assertEqual(response.cookies['messages'].value, '')
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
Tests that, if the data exceeds what is allowed in a cookie, older
messages are removed before saving (and returned by the ``update``
method).
"""
storage = self.get_storage()
response = self.get_response()
# When storing as a cookie, the cookie has constant overhead of approx
# 54 chars, and each message has a constant overhead of about 37 chars
# and a variable overhead of zero in the best case. We aim for a message
# size which will fit 4 messages into the cookie, but not 5.
# See also FallbackTest.test_session_fallback
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assertEqual(unstored_messages[0].message, '0' * msg_size)
def test_json_encoder_decoder(self):
"""
Tests that a complex nested data structure containing Message
instances is properly encoded/decoded by the custom JSON
encoder/decoder classes.
"""
messages = [
{
'message': Message(constants.INFO, 'Test message'),
'message_list': [Message(constants.INFO, 'message %s')
for x in range(5)] + [{'another-message':
Message(constants.ERROR, 'error')}],
},
Message(constants.INFO, 'message %s'),
]
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
decoded_messages = json.loads(value, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
def encode_decode(data):
message = Message(constants.DEBUG, data)
encoded = storage._encode(message)
decoded = storage._decode(encoded)
return decoded.message
storage = self.get_storage()
self.assertIsInstance(
encode_decode(mark_safe("<b>Hello Django!</b>")), SafeData)
self.assertNotIsInstance(
encode_decode("<b>Hello Django!</b>"), SafeData)
def test_pre_1_5_message_format(self):
"""
For ticket #22426. Tests whether messages that were set in the cookie
before the addition of is_safedata are decoded correctly.
"""
# Encode the messages using the current encoder.
messages = [Message(constants.INFO, 'message %s') for x in range(5)]
encoder = MessageEncoder(separators=(',', ':'))
encoded_messages = encoder.encode(messages)
# Remove the is_safedata flag from the messages in order to imitate
# the behavior of before 1.5 (monkey patching).
encoded_messages = json.loads(encoded_messages)
for obj in encoded_messages:
obj.pop(1)
encoded_messages = json.dumps(encoded_messages, separators=(',', ':'))
# Decode the messages in the old format (without is_safedata)
decoded_messages = json.loads(encoded_messages, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/CommentsPrecedingSourceStatement.before.py
|
30
|
x = 42
# print('commented')
# comment 1
# comment 2
print(<selection>x ** 2</selection>)
|
k-okada/naoqi_driver
|
refs/heads/master
|
doc/source/ToggleDirective.py
|
10
|
# Directive that shows a toggle link between sections.
# Those sections cannot contain titles
# The first one is initially showed. Here is the syntax:
# .. toggle_table::
# :arg1: text in button 1
# :arg2: text in button 2
#
# .. toggle:: text in button 1
#
# some RST text
#
#.. toggle:: text in button 2
#
# some more RST text
import docutils
from docutils import nodes
from docutils.parsers.rst import directives, roles, states
def setup(app):
app.add_directive('toggle', ToggleDirective)
app.add_directive('toggle_table', ToggleTableDirective)
class ToggleDirective(docutils.parsers.rst.Directive):
"""
Base class that will be used by the two toggle directives
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = nodes.container
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
# Create the node, to be populated by `nested_parse`.
node = self.node_class(rawsource=text)
label = self.arguments[0]
label_strip = label.replace(' ', '')
node += nodes.raw(self.arguments[0], '<div class="toggleable_div label_%s">' % label_strip, format="html")
# Parse the directive contents.
self.state.nested_parse(self.content, self.content_offset, node)
node += nodes.raw(self.arguments[0], '</div>', format="html")
return [node]
class ToggleTableDirective(docutils.parsers.rst.Directive):
"""
Class used to create a set of buttons to toggle different sections
"""
required_arguments = 0
optional_arguments = 10
final_argument_whitespace = True
option_spec = {}
for i in xrange(0, 100):
option_spec['arg' + str(i)] = str
has_content = True
node_class = nodes.container
def run(self):
js_toggle = """
function toggle(label) {
$('.toggleable_button').css({border: '2px outset', 'border-radius': '4px'});
$('.toggleable_button.label_' + label).css({border: '2px inset', 'border-radius': '4px'});
$('.toggleable_div').css('display', 'none');
$('.toggleable_div.label_' + label).css('display', 'block');
};
"""
js_ready = """
<script>
%s
$(document).ready(function() {
var classList =$('.toggleable_button').attr('class').split(/\s+/);
$.each( classList, function(index, item){
if (item.substring(0, 5) === 'label') {
toggle(item.substring(6));
};
});
});
</script>
""" % js_toggle
# Create the node, to be populated by `nested_parse`.
node = self.node_class()
for key in self.options.keys():
if key not in self.option_spec:
raise RuntimeError(key + ' not in the contructor of ToggleTableDirective, use arg0 to arg99')
label = self.options[key]
label_strip = label.replace(' ', '')
str1 = '<button class="toggleable_button label_%s" onclick="' % label_strip
str2 = js_toggle + "toggle('%s')" % label_strip
str3 = '">%s</button>' % label
node += nodes.raw(key, str1 + str2 + str3 + js_ready, format="html")
return [node]
|
arthurdarcet/tornado
|
refs/heads/master
|
tornado/platform/twisted.py
|
23
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module has been tested with Twisted versions 11.0.0 and newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.interfaces import \
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
from twisted.python import failure, log
from twisted.internet import error
import twisted.names.cache
import twisted.names.client
import twisted.names.hosts
import twisted.names.resolve
from zope.interface import implementer
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
# Since this class is intended to be used in applications
# where the top-level event loop is ``io_loop.start()`` rather
# than ``reactor.run()``, it is implemented a little
# differently than other Twisted reactors. We override
# ``mainLoop`` instead of ``doIteration`` and must implement
# timed call functionality on top of `.IOLoop.add_timeout`
# rather than using the implementation in
# ``PosixReactorBase``.
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
before most other twisted-related imports. Conversely, because it
initializes the `.IOLoop`, it cannot be called before
`.fork_processes` or multi-process `~.TCPServer.start`. These
conflicting requirements make it difficult to use `.TornadoReactor`
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
the Twisted reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
Uses the global Twisted reactor by default. To create multiple
``TwistedIOLoops`` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
resolved.raiseException()
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred)
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f
|
costa23c/rdmw_cloudkit
|
refs/heads/master
|
xbgw_dashboard/__init__.py
|
35
|
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc., All Rights Reserved.
#
|
citrix-openstack-build/heat
|
refs/heads/master
|
doc/source/conf.py
|
5
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Heat documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 13 11:23:35 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""Return a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
#print(result)
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SRCS = {'heat': ROOT}
EXCLUDED_MODULES = ('heat.tests',
'heat.testing',
'heat.cmd',
'heat.common',
'heat.cloudinit',
'heat.cfn_client',
'heat.doc',
'heat.db',
'heat.engine.resources',
'heat.locale',
'heat.openstack')
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst', '.gitignore']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for modulename, path in SRCS.items():
sys.stdout.write("Generating source documentation for %s\n" %
modulename)
INDEXOUT.write("\n%s\n" % modulename.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(modulename),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, modulename)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.mkdir(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([module.startswith(exclude)
for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (modulename, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print("Module %s updated, generating new documentation."
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'oslo.sphinx',
'heat.doc.resources']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Heat'
copyright = u'2012,2013 Heat Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Heatdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'Heat.tex', u'Heat Documentation',
u'Heat Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/heat-api', 'heat-api',
u'REST API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cfn', 'heat-api-cfn',
u'CloudFormation compatible API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cloudwatch', 'heat-api-cloudwatch',
u'CloudWatch alike API service to the heat project',
[u'Heat Developers'], 1),
('man/heat-db-setup', 'heat-db-setup',
u'Command line utility to setup the Heat database',
[u'Heat Developers'], 1),
('man/heat-engine', 'heat-engine',
u'Service which performs the actions from the API calls made by the user',
[u'Heat Developers'], 1),
('man/heat-keystone-setup', 'heat-keystone-setup',
u'Script which sets up keystone for usage by Heat',
[u'Heat Developers'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Heat', u'Heat Documentation',
u'Heat Developers', 'Heat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
zahodi/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/language/bundler.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION='''
---
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
description:
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
version_added: "2.0.0"
options:
executable:
description:
- The path to the bundler executable
required: false
default: null
state:
description:
- The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
required: false
choices: [present, latest]
default: present
chdir:
description:
- The directory to execute the bundler commands from. This directoy
needs to contain a valid Gemfile or .bundle/ directory
required: false
default: temporary working directory
exclude_groups:
description:
- A list of Gemfile groups to exclude during operations. This only
applies when state is C(present). Bundler considers this
a 'remembered' property for the Gemfile and will automatically exclude
groups in future operations even if C(exclude_groups) is not set
required: false
default: null
clean:
description:
- Only applies if state is C(present). If set removes any gems on the
target host that are not in the gemfile
required: false
choices: [yes, no]
default: "no"
gemfile:
description:
- Only applies if state is C(present). The path to the gemfile to use to install gems.
required: false
default: Gemfile in current directory
local:
description:
- If set only installs gems from the cache on the target host
required: false
choices: [yes, no]
default: "no"
deployment_mode:
description:
- Only applies if state is C(present). If set it will only install gems
that are in the default or production groups. Requires a Gemfile.lock
file to have been created prior
required: false
choices: [yes, no]
default: "no"
user_install:
description:
- Only applies if state is C(present). Installs gems in the local user's cache or for all users
required: false
choices: [yes, no]
default: "yes"
gem_path:
description:
- Only applies if state is C(present). Specifies the directory to
install the gems into. If C(chdir) is set then this path is relative to
C(chdir)
required: false
default: RubyGems gem paths
binstub_directory:
description:
- Only applies if state is C(present). Specifies the directory to
install any gem bins files to. When executed the bin files will run
within the context of the Gemfile and fail if any required gem
dependencies are not installed. If C(chdir) is set then this path is
relative to C(chdir)
required: false
default: null
extra_args:
description:
- A space separated string of additional commands that can be applied to
the Bundler command. Refer to the Bundler documentation for more
information
required: false
default: null
author: "Tim Hoiberg (@thoiberg)"
'''
EXAMPLES='''
# Installs gems from a Gemfile in the current directory
- bundler:
state: present
executable: ~/.rvm/gems/2.1.5/bin/bundle
# Excludes the production group from installing
- bundler:
state: present
exclude_groups: production
# Only install gems from the default and production groups
- bundler:
state: present
deployment_mode: yes
# Installs gems using a Gemfile in another directory
- bundler:
state: present
gemfile: ../rails_project/Gemfile
# Updates Gemfile in another directory
- bundler:
state: latest
chdir: ~/rails_project
'''
def get_bundler_executable(module):
if module.params.get('executable'):
return module.params.get('executable').split(' ')
else:
return [ module.get_bin_path('bundle', True) ]
def main():
module = AnsibleModule(
argument_spec=dict(
executable=dict(default=None, required=False),
state=dict(default='present', required=False, choices=['present', 'latest']),
chdir=dict(default=None, required=False, type='path'),
exclude_groups=dict(default=None, required=False, type='list'),
clean=dict(default=False, required=False, type='bool'),
gemfile=dict(default=None, required=False, type='path'),
local=dict(default=False, required=False, type='bool'),
deployment_mode=dict(default=False, required=False, type='bool'),
user_install=dict(default=True, required=False, type='bool'),
gem_path=dict(default=None, required=False, type='path'),
binstub_directory=dict(default=None, required=False, type='path'),
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
)
executable = module.params.get('executable')
state = module.params.get('state')
chdir = module.params.get('chdir')
exclude_groups = module.params.get('exclude_groups')
clean = module.params.get('clean')
gemfile = module.params.get('gemfile')
local = module.params.get('local')
deployment_mode = module.params.get('deployment_mode')
user_install = module.params.get('user_install')
gem_path = module.params.get('gem_path')
binstub_directory = module.params.get('binstub_directory')
extra_args = module.params.get('extra_args')
cmd = get_bundler_executable(module)
if module.check_mode:
cmd.append('check')
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
if state == 'present':
cmd.append('install')
if exclude_groups:
cmd.extend(['--without', ':'.join(exclude_groups)])
if clean:
cmd.append('--clean')
if gemfile:
cmd.extend(['--gemfile', gemfile])
if local:
cmd.append('--local')
if deployment_mode:
cmd.append('--deployment')
if not user_install:
cmd.append('--system')
if gem_path:
cmd.extend(['--path', gem_path])
if binstub_directory:
cmd.extend(['--binstubs', binstub_directory])
else:
cmd.append('update')
if local:
cmd.append('--local')
if extra_args:
cmd.extend(extra_args.split(' '))
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/completion/py3668/foo/bar.py
|
12133432
| |
wisonwang/django-lfs
|
refs/heads/master
|
lfs/order/migrations/__init__.py
|
12133432
| |
wolfv/AutobahnPython
|
refs/heads/master
|
autobahn/autobahn/twisted/resource.py
|
11
|
###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("WebSocketResource",
"HTTPChannelHixie76Aware",
"WSGIRootResource",)
from zope.interface import implementer
from twisted.protocols.policies import ProtocolWrapper
try:
from twisted.web.error import NoResource
except:
## starting from Twisted 12.2, NoResource has moved
from twisted.web.resource import NoResource
from twisted.web.resource import IResource, Resource
## The following imports reactor at module level
## See: https://twistedmatrix.com/trac/ticket/6849
from twisted.web.http import HTTPChannel
## .. and this also, since it imports t.w.http
##
from twisted.web.server import NOT_DONE_YET
class HTTPChannelHixie76Aware(HTTPChannel):
"""
Hixie-76 is deadly broken. It includes 8 bytes of body, but then does not
set content-length header. This hacked HTTPChannel injects the missing
HTTP header upon detecting Hixie-76. We need this since otherwise
Twisted Web will silently ignore the body.
To use this, set `protocol = HTTPChannelHixie76Aware` on your
`twisted.web.server.Site <http://twistedmatrix.com/documents/current/api/twisted.web.server.Site.html>`_ instance.
See:
* `Autobahn Twisted Web site example <https://github.com/tavendo/AutobahnPython/tree/master/examples/twisted/websocket/echo_site>`_
"""
def headerReceived(self, line):
header = line.split(':')[0].lower()
if header == "sec-websocket-key1" and not self._transferDecoder:
HTTPChannel.headerReceived(self, "Content-Length: 8")
HTTPChannel.headerReceived(self, line)
class WSGIRootResource(Resource):
"""
Root resource when you want a WSGI resource be the default serving
resource for a Twisted Web site, but have subpaths served by
different resources.
This is a hack needed since
`twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
does not provide a `putChild()` method.
See also:
* `Autobahn Twisted Web WSGI example <https://github.com/tavendo/AutobahnPython/tree/master/examples/twisted/websocket/echo_wsgi>`_
* `Original hack <http://blog.vrplumber.com/index.php?/archives/2426-Making-your-Twisted-resources-a-url-sub-tree-of-your-WSGI-resource....html>`_
"""
def __init__(self, wsgiResource, children):
"""
Creates a Twisted Web root resource.
:param wsgiResource:
:type wsgiResource: Instance of `twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
:param children: A dictionary with string keys constituting URL subpaths, and Twisted Web resources as values.
:type children: dict
"""
Resource.__init__(self)
self._wsgiResource = wsgiResource
self.children = children
def getChild(self, path, request):
request.prepath.pop()
request.postpath.insert(0, path)
return self._wsgiResource
@implementer(IResource)
class WebSocketResource(object):
"""
A Twisted Web resource for WebSocket. This resource needs to be instantiated
with a factory derived from WebSocketServerFactory.
"""
isLeaf = True
def __init__(self, factory):
"""
Ctor.
:param factory: An instance of :class:`autobahn.twisted.websocket.WebSocketServerFactory`.
:type factory: obj
"""
self._factory = factory
def getChildWithDefault(self, name, request):
"""
This resource cannot have children, hence this will always fail.
"""
return NoResource("No such child resource.")
def putChild(self, path, child):
"""
This resource cannot have children, hence this is always ignored.
"""
pass
def render(self, request):
"""
Render the resource. This will takeover the transport underlying
the request, create a WebSocketServerProtocol and let that do
any subsequent communication.
"""
## Create Autobahn WebSocket protocol.
##
protocol = self._factory.buildProtocol(request.transport.getPeer())
if not protocol:
## If protocol creation fails, we signal "internal server error"
request.setResponseCode(500)
return ""
## Take over the transport from Twisted Web
##
transport, request.transport = request.transport, None
## Connect the transport to our protocol. Once #3204 is fixed, there
## may be a cleaner way of doing this.
## http://twistedmatrix.com/trac/ticket/3204
##
if isinstance(transport, ProtocolWrapper):
## i.e. TLS is a wrapping protocol
transport.wrappedProtocol = protocol
else:
transport.protocol = protocol
protocol.makeConnection(transport)
## We recreate the request and forward the raw data. This is somewhat
## silly (since Twisted Web already did the HTTP request parsing
## which we will do a 2nd time), but it's totally non-invasive to our
## code. Maybe improve this.
##
data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.uri)
for h in request.requestHeaders.getAllRawHeaders():
data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1]))
data += "\x0d\x0a"
data += request.content.read() # we need this for Hixie-76
protocol.dataReceived(data)
return NOT_DONE_YET
|
bzbarsky/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/py/testing/process/test_cmdexec.py
|
163
|
import py
from py.process import cmdexec
def exvalue():
return py.std.sys.exc_info()[1]
class Test_exec_cmd:
def test_simple(self):
out = cmdexec('echo hallo')
assert out.strip() == 'hallo'
assert py.builtin._istext(out)
def test_simple_newline(self):
import sys
out = cmdexec(r"""%s -c "print ('hello')" """ % sys.executable)
assert out == 'hello\n'
assert py.builtin._istext(out)
def test_simple_error(self):
py.test.raises (cmdexec.Error, cmdexec, 'exit 1')
def test_simple_error_exact_status(self):
try:
cmdexec('exit 1')
except cmdexec.Error:
e = exvalue()
assert e.status == 1
assert py.builtin._istext(e.out)
assert py.builtin._istext(e.err)
def test_err(self):
try:
cmdexec('echoqweqwe123 hallo')
raise AssertionError("command succeeded but shouldn't")
except cmdexec.Error:
e = exvalue()
assert hasattr(e, 'err')
assert hasattr(e, 'out')
assert e.err or e.out
|
landscape-test/all-messages
|
refs/heads/master
|
messages/pylint/W0301.py
|
1
|
"""
W0301
Unnecessary semicolon
"""
|
googleapis/python-billingbudgets
|
refs/heads/master
|
scripts/fixup_budgets_v1beta1_keywords.py
|
4
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class budgetsCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_budget': ('parent', 'budget', ),
'delete_budget': ('name', ),
'get_budget': ('name', ),
'list_budgets': ('parent', 'page_size', 'page_token', ),
'update_budget': ('budget', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=budgetsCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the budgets client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
dhermes/endpoints-proto-datastore
|
refs/heads/master
|
endpoints_proto_datastore/ndb/properties.py
|
8
|
# Copyright 2012 Google Inc. All Rights Reserved.
"""Custom properties for hybrid NDB/ProtoRPC models.
Custom properties are defined to allow custom interactions with complex
types and custom serialization of these values into ProtoRPC fields.
Defined here:
EndpointsAliasProperty:
A local only property used for including custom properties in messages
without having to persist these properties in the datastore and for creating
custom setters based on values parsed from requests.
EndpointsUserProperty:
For getting the user the same way an endpoints method does.
EndpointsDateTimeProperty,EndpointsDateProperty,EndpointsTimeProperty:
For custom serialization of date and/or time stamps.
EndpointsVariantIntegerProperty,EndpointsVariantFloatProperty:
For allowing ProtoRPC type variants for fields which allow it, e.g. a 32-bit
integer instead of the default 64-bit.
EndpointsComputedProperty:
a subclass of ndb.ComputedProperty; this property class is needed since one
cannot readily determine the type desired of the output.
"""
import datetime
import warnings
warnings.simplefilter('default') # To allow DeprecationWarning
from . import utils as ndb_utils
from .. import utils
try:
import endpoints
except ImportError:
from google.appengine.ext import endpoints
from protorpc import messages
from google.appengine.ext import ndb
__all__ = [
'EndpointsAliasProperty', 'EndpointsUserProperty',
'EndpointsDateTimeProperty', 'EndpointsDateProperty',
'EndpointsTimeProperty', 'EndpointsVariantIntegerProperty',
'EndpointsVariantFloatProperty', 'EndpointsComputedProperty',
]
DEFAULT_PROPERTY_TYPE = messages.StringField
DATETIME_STRING_FORMAT = utils.DATETIME_STRING_FORMAT
DATE_STRING_FORMAT = utils.DATE_STRING_FORMAT
TIME_STRING_FORMAT = utils.TIME_STRING_FORMAT
def ComputedPropertyToProto(prop, index):
"""Converts a computed property to the corresponding message field.
Args:
prop: The NDB property to be converted.
index: The index of the property within the message.
Returns:
A ProtoRPC field. If the property_type of prop is a field, then a field of
that type will be returned. If the property_type of prop is an enum
class, then an enum field using that enum class is returned. If the
property_type of prop is a message class, then a message field using
that message class is returned.
Raises:
TypeError: if the property_type manages to pass CheckValidPropertyType
without an exception but does not match any of the parent types
messages.Field, messages.Enum or messages.Message. NOTE: This should
not occur, given the behavior of CheckValidPropertyType.
"""
kwargs = ndb_utils.GetKeywordArgs(prop)
property_type = prop.property_type
utils.CheckValidPropertyType(property_type)
if utils.IsSubclass(property_type, messages.Field):
return property_type(index, **kwargs)
elif utils.IsSubclass(property_type, messages.Enum):
return messages.EnumField(property_type, index, **kwargs)
elif utils.IsSubclass(property_type, messages.Message):
# No default for {MessageField}s
kwargs.pop('default', None)
return messages.MessageField(property_type, index, **kwargs)
else:
# Should never occur due to utils.CheckValidPropertyType.
raise TypeError('Unexpected property type: %s.' % (property_type,))
class EndpointsAliasProperty(property):
"""A custom property that also considers the type of the response.
Allows Python properties to be used in an EndpointsModel by also
specifying a property type. These properties can be derived from the rest
of the model and included in a ProtoRPC message definition, but will not need
to be persisted in the datastore.
This class can be used directly to define properties or as a decorator.
Attributes:
message_field: a value used to register the property in the property class
to proto dictionary for any model class with this property. The method
ComputedPropertyToProto is used here.
"""
message_field = ComputedPropertyToProto
@utils.positional(2)
def __init__(self, func=None, setter=None, fdel=None, doc=None,
repeated=False, required=False, default=None, name=None,
variant=None, property_type=DEFAULT_PROPERTY_TYPE):
"""Constructor for property.
Attributes:
__saved_property_args: A dictionary that can be stored on the instance if
used as a decorator rather than directly as a property.
__initialized: A boolean corresponding to whether or not the instance has
completed initialization or needs to continue when called as a
decorator.
_required: A boolean attribute for ProtoRPC conversion, denoting whether
this property is required in a message class.
_repeated: A boolean attribute for ProtoRPC conversion, denoting whether
this property is repeated in a message class.
_name: The true name of the property.
_code_name: The attribute name of the property on the model that
instantiated it.
_variant: An optional variant that can be used for ProtoRPC conversion,
since some ProtoRPC fields allow variants. Will not always be set on
alias properties.
property_type: A ProtoRPC field, message class or enum class that
describes the output of the alias property.
Args:
func: The method that outputs the value of the property. If None,
we use this as a signal the instance is being used as a decorator.
setter: The (optional) method that will allow the property to be set.
Passed to the property constructor as fset. Defaults to None.
fdel: The (optional) method that will be called when the property is
deleted. Passed to the property constructor as fdel. Defaults to None.
doc: The (optional) docstring for the property. Defaults to None.
repeated: Optional boolean, defaults to False. Indicates whether or not
the ProtoRPC field is repeated.
required: Optional boolean, defaults to False. Indicates whether or not
the ProtoRPC field should be required.
default: Optional default value for the property. Only set on the property
instance if not None. Will be validated when a corresponding message
field is created.
name: A custom name that can be used to describe the property.
variant: A variant of that can be used to augment the ProtoRPC field. Will
be validated when a corresponding message field is created.
property_type: A ProtoRPC field, message class or enum class that
describes the output of the alias property.
"""
self._required = required
self._repeated = repeated
self._name = name
self._code_name = None
if default is not None:
self._default = default
if variant is not None:
self._variant = variant
utils.CheckValidPropertyType(property_type)
self.property_type = property_type
property_args = {'fset': setter, 'fdel': fdel, 'doc': doc}
if func is None:
self.__initialized = False
self.__saved_property_args = property_args
else:
self.__initialized = True
super(EndpointsAliasProperty, self).__init__(func, **property_args)
def __call__(self, func):
"""Callable method to be used when instance is used as a decorator.
If called as a decorator, passes the saved keyword arguments and the func
to the constructor to complete initialization.
Args:
func: The method that outputs the value of the property.
Returns:
The property instance.
Raises:
TypeError: if the instance has already been initialized, either directly
as a property or as a decorator elsewhere.
"""
if self.__initialized:
raise TypeError('EndpointsAliasProperty is not callable.')
super(EndpointsAliasProperty, self).__init__(func,
**self.__saved_property_args)
del self.__saved_property_args
# Return the property created
return self
def _FixUp(self, code_name):
"""Internal helper called to tell the property its name.
Intended to allow a similar name interface as provided by NDB properties.
Used during class creation in EndpointsMetaModel.
Args:
code_name: The attribute name of the property as set on a class.
"""
self._code_name = code_name
if self._name is None:
self._name = self._code_name
class EndpointsUserProperty(ndb.UserProperty):
"""A custom user property for interacting with user ID tokens.
Uses the tools provided in the endpoints module to detect the current user.
In addition, has an optional parameter raise_unauthorized which will return
a 401 to the endpoints API request if a user can't be detected.
"""
def __init__(self, *args, **kwargs):
"""Constructor for User property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
_raise_unauthorized: An optional boolean, defaulting to False. If True,
the property will return a 401 to the API request if a user can't
be deteced.
"""
self._raise_unauthorized = kwargs.pop('raise_unauthorized', False)
super(EndpointsUserProperty, self).__init__(*args, **kwargs)
def _set_value(self, entity, value):
"""Internal helper to set value on model entity.
If the value to be set is null, will try to retrieve the current user and
will return a 401 if a user can't be found and raise_unauthorized is True.
Args:
entity: An instance of some NDB model.
value: The value of this property to be set on the instance.
"""
if value is None:
value = endpoints.get_current_user()
if self._raise_unauthorized and value is None:
raise endpoints.UnauthorizedException('Invalid token.')
super(EndpointsUserProperty, self)._set_value(entity, value)
def _fix_up(self, cls, code_name):
"""Internal helper called to register the property with the model class.
Overrides the _set_attributes method on the model class to interject this
attribute in to the keywords passed to it. Since the method _set_attributes
is called by the model class constructor to set values, this -- in congress
with the custom defined _set_value -- will make sure this property always
gets set when an instance is created, even if not passed in.
Args:
cls: The model class that owns the property.
code_name: The name of the attribute on the model class corresponding
to the property.
"""
original_set_attributes = cls._set_attributes
def CustomSetAttributes(setattr_self, kwds):
"""Custom _set_attributes which makes sure this property is always set."""
if self._code_name not in kwds:
kwds[self._code_name] = None
original_set_attributes(setattr_self, kwds)
cls._set_attributes = CustomSetAttributes
super(EndpointsUserProperty, self)._fix_up(cls, code_name)
class EndpointsDateTimeProperty(ndb.DateTimeProperty):
"""A custom datetime property.
Allows custom serialization of a datetime.datetime stamp when used to create
a message field.
"""
def __init__(self, *args, **kwargs):
"""Constructor for datetime property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
_string_format: An optional string, defaulting to DATETIME_STRING_FORMAT.
This is used to serialize using strftime and deserialize using strptime
when the datetime stamp is turned into a message.
"""
self._string_format = kwargs.pop('string_format', DATETIME_STRING_FORMAT)
super(EndpointsDateTimeProperty, self).__init__(*args, **kwargs)
def ToValue(self, value):
"""A custom method to override the typical ProtoRPC message serialization.
Uses the string_format set on the property to serialize the datetime stamp.
Args:
value: A datetime stamp, the value of the property.
Returns:
The serialized string value of the datetime stamp.
"""
return value.strftime(self._string_format)
def FromValue(self, value):
"""A custom method to override the typical ProtoRPC message deserialization.
Uses the string_format set on the property to deserialize the datetime
stamp.
Args:
value: A serialized datetime stamp as a string.
Returns:
The deserialized datetime.datetime stamp.
"""
return datetime.datetime.strptime(value, self._string_format)
class EndpointsDateProperty(ndb.DateProperty):
"""A custom date property.
Allows custom serialization of a datetime.date stamp when used to create a
message field.
"""
def __init__(self, *args, **kwargs):
"""Constructor for date property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
_string_format: An optional string, defaulting to DATE_STRING_FORMAT. This
is used to serialize using strftime and deserialize using strptime when
the date stamp is turned into a message.
"""
self._string_format = kwargs.pop('string_format', DATE_STRING_FORMAT)
super(EndpointsDateProperty, self).__init__(*args, **kwargs)
def ToValue(self, value):
"""A custom method to override the typical ProtoRPC message serialization.
Uses the string_format set on the property to serialize the date stamp.
Args:
value: A date stamp, the value of the property.
Returns:
The serialized string value of the date stamp.
"""
return value.strftime(self._string_format)
def FromValue(self, value):
"""A custom method to override the typical ProtoRPC message deserialization.
Uses the string_format set on the property to deserialize the date stamp.
Args:
value: A serialized date stamp as a string.
Returns:
The deserialized datetime.date stamp.
"""
return datetime.datetime.strptime(value, self._string_format).date()
class EndpointsTimeProperty(ndb.TimeProperty):
"""A custom time property.
Allows custom serialization of a datetime.time stamp when used to create a
message field.
"""
def __init__(self, *args, **kwargs):
"""Constructor for time property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
string_format: An optional string, defaulting to TIME_STRING_FORMAT. This
is used to serialize using strftime and deserialize using strptime when
the time stamp is turned into a message.
"""
self._string_format = kwargs.pop('string_format', TIME_STRING_FORMAT)
super(EndpointsTimeProperty, self).__init__(*args, **kwargs)
def ToValue(self, value):
"""A custom method to override the typical ProtoRPC message serialization.
Uses the string_format set on the property to serialize the date stamp.
Args:
value: A date stamp, the value of the property.
Returns:
The serialized string value of the time stamp.
"""
return value.strftime(self._string_format)
def FromValue(self, value):
"""A custom method to override the typical ProtoRPC message deserialization.
Uses the string_format set on the property to deserialize the time stamp.
Args:
value: A serialized time stamp as a string.
Returns:
The deserialized datetime.time stamp.
"""
return datetime.datetime.strptime(value, self._string_format).time()
class EndpointsVariantIntegerProperty(ndb.IntegerProperty):
"""A custom integer property.
Allows custom serialization of a integers by allowing variant types when used
to create a message field.
"""
def __init__(self, *args, **kwargs):
"""Constructor for integer property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
variant: A variant of integer types, defaulting to the default variant for
a ProtoRPC IntegerField.
"""
# The value of variant will be verified when the message field is created
self._variant = kwargs.pop('variant', messages.IntegerField.DEFAULT_VARIANT)
super(EndpointsVariantIntegerProperty, self).__init__(*args, **kwargs)
class EndpointsVariantFloatProperty(ndb.FloatProperty):
"""A custom float property.
Allows custom serialization of a float by allowing variant types when used
to create a message field.
"""
def __init__(self, *args, **kwargs):
"""Constructor for float property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
variant: A variant of float types, defaulting to the default variant for
a ProtoRPC FloatField.
"""
# The value of variant be verified when the message field is created
self._variant = kwargs.pop('variant', messages.FloatField.DEFAULT_VARIANT)
super(EndpointsVariantFloatProperty, self).__init__(*args, **kwargs)
class EndpointsComputedProperty(ndb.ComputedProperty):
"""A custom computed property that also considers the type of the response.
Allows NDB computed properties to be used in an EndpointsModel by also
specifying a property type.
This class can be used directly to define properties or as a decorator.
Attributes:
message_field: a value used to register the property in the property class
to proto dictionary for any model class with this property. The method
ComputedPropertyToProto is used here.
"""
message_field = ComputedPropertyToProto
@utils.positional(2)
def __init__(self, func=None, **kwargs):
"""Constructor for computed property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
_variant: A variant of that can be used to augment the ProtoRPC field.
property_type: A ProtoRPC field, message class or enum class that
describes the output of the alias property.
__saved_kwargs: A dictionary that can be stored on the instance if used
as a decorator rather than directly as a property.
__initialized: A boolean corresponding to whether or not the instance has
completed initialization or needs to continue when called as a
decorator.
Args:
func: The method that outputs the value of the computed property. If None,
we use this as a signal the instance is being used as a decorator.
"""
variant = kwargs.pop('variant', None)
# The value of variant will be verified when the message field is created
if variant is not None:
self._variant = variant
property_type = kwargs.pop('property_type', DEFAULT_PROPERTY_TYPE)
utils.CheckValidPropertyType(property_type)
self.property_type = property_type
if func is None:
self.__initialized = False
self.__saved_kwargs = kwargs
else:
self.__initialized = True
super(EndpointsComputedProperty, self).__init__(func, **kwargs)
def __call__(self, func):
"""Callable method to be used when instance is used as a decorator.
If called as a decorator, passes the saved keyword arguments and the func
to the constructor to complete initialization.
Args:
func: The method that outputs the value of the computed property.
Returns:
The property instance.
Raises:
TypeError: if the instance has already been initialized, either directly
as a property or as a decorator elsewhere.
"""
if self.__initialized:
raise TypeError('EndpointsComputedProperty is not callable.')
super(EndpointsComputedProperty, self).__init__(func, **self.__saved_kwargs)
del self.__saved_kwargs
# Return the property created
return self
def _set_value(self, unused_entity, unused_value):
"""Internal helper to set a value in an entity for a ComputedProperty.
Typically, on a computed property, an ndb.model.ComputedPropertyError
exception is raised when we try to set the property.
In endpoints, since we will be deserializing messages to entities, we want
to be able to call entity.some_computed_property_name = some_value without
halting code, hence this will simply do nothing.
"""
warnings.warn('Cannot assign to a ComputedProperty.', DeprecationWarning)
|
luisgg/iteexe
|
refs/heads/master
|
twisted/test/process_reader.py
|
166
|
"""Script used by test_process.TestTwoProcesses"""
# run until stdin is closed, then quit
import sys
while 1:
d = sys.stdin.read()
if len(d) == 0:
sys.exit(0)
|
tashaxe/Red-DiscordBot
|
refs/heads/develop
|
lib/PIL/ImageQt.py
|
14
|
#
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com)
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL._util import isPath
from io import BytesIO
qt_is_installed = True
qt_version = None
try:
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
qt_version = '5'
except (ImportError, RuntimeError):
try:
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
qt_version = '4'
except (ImportError, RuntimeError):
try:
from PySide.QtGui import QImage, qRgba, QPixmap
from PySide.QtCore import QBuffer, QIODevice
qt_version = 'side'
except ImportError:
qt_is_installed = False
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def fromqimage(im):
"""
:param im: A PIL Image object, or a file name
(given either as Python string or a PyQt string object)
"""
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
# preserve alha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
im.save(buffer, 'png')
else:
im.save(buffer, 'ppm')
b = BytesIO()
try:
b.write(buffer.data())
except TypeError:
# workaround for Python 2
b.write(str(buffer.data()))
buffer.close()
b.seek(0)
return Image.open(b)
def fromqpixmap(im):
return fromqimage(im)
# buffer = QBuffer()
# buffer.open(QIODevice.ReadWrite)
# # im.save(buffer)
# # What if png doesn't support some image features like animation?
# im.save(buffer, 'ppm')
# bytes_io = BytesIO()
# bytes_io.write(buffer.data())
# buffer.close()
# bytes_io.seek(0)
# return Image.open(bytes_io)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] + b'\x00' * extra_padding)
return b''.join(new_data)
def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if str is bytes:
im = unicode(im.toUtf8(), "utf-8")
else:
im = str(im.toUtf8(), "utf-8")
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
if qt_is_installed:
class ImageQt(QImage):
def __init__(self, im):
"""
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
class.
:param im: A PIL Image object, or a file name (given either as Python
string or a PyQt string object).
"""
im_data = _toqclass_helper(im)
# must keep a reference, or Qt will crash!
# All QImage constructors that take data operate on an existing
# buffer, so this buffer has to hang on for the life of the image.
# Fixes https://github.com/python-pillow/Pillow/issues/1370
self.__data = im_data['data']
QImage.__init__(self,
self.__data, im_data['im'].size[0],
im_data['im'].size[1], im_data['format'])
if im_data['colortable']:
self.setColorTable(im_data['colortable'])
def toqimage(im):
return ImageQt(im)
def toqpixmap(im):
# # This doesn't work. For now using a dumb approach.
# im_data = _toqclass_helper(im)
# result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
# result.loadFromData(im_data['data'])
# Fix some strange bug that causes
if im.mode == 'RGB':
im = im.convert('RGBA')
qimage = toqimage(im)
return QPixmap.fromImage(qimage)
|
tanmaythakur/django
|
refs/heads/master
|
django/core/cache/backends/locmem.py
|
586
|
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
blitz-io/blitz-python
|
refs/heads/master
|
src/blitz/sprint.py
|
1
|
__author__="ghermeto"
__date__ ="$27/07/2011 23:23:38$"
import base64
from blitz.api import Curl, ValidationError
from blitz.validation import validate_list, validate
class Request:
"""Represents the request object generated by the sprint. Contains all
of the headers and POST/PUT data, if any."""
def __init__(self, request):
""" line: The entire response line (HTTP/1.1 200 Okay, for example)
method: The method used in the request
url: The URL, including path, query arguments and hash fragments
content: All of the response headers (as a Hash of name/value pairs)
headers: The response content, if any """
self.line = request['line'] if 'line' in request else None
self.method = request['method'] if 'method' in request else None
self.url = request['url'] if 'url' in request else None
if 'content' in request:
content = bytearray(request['content'], "ascii")
self.content = base64.b64decode(content).decode('UTF-8')
else:
self.content = None
self.headers = request['headers'] if 'headers' in request else None
class Response:
""" Represents the response object generated by the sprint. Contains all
of the headers and the response payload, if any."""
def __init__(self, response):
""" line: The entire response line (HTTP/1.1 200 Okay, for example)
status: The response status
message: The message in the response line
content: All of the response headers (as a Hash of name/value pairs)
headers: The response content, if any """
self.line = response['line'] if 'line' in response else None
self.status = response['status'] if 'status' in response else None
self.message = response['message'] if 'message' in response else None
if 'content' in response:
content = bytearray(response['content'], "ascii")
self.content = base64.b64decode(content).decode('UTF-8')
else:
self.content = None
self.headers = response['headers'] if 'headers' in response else None
class Step:
""" Represents a step in the transaction (even if there's only one). Each
step contains the request and response objects as well as the stats
associated with them. """
def __init__(self, step):
""" connect: The time it took for the TCP connection
duration: The time it took for this step (connect, send and receive)
request: Object with the URL, headers and content
response: Object containing the status code, headers and content """
request = step['request']
response = step['response']
self.connect = step['connect'] if 'connect' in step else None
self.duration = step['duration'] if 'duration' in step else None
self.request = Request(request) if 'request' in step else None
self.response = Response(response) if 'response' in step else None
class Result:
""" Contains the result from a successful sprint. """
def __init__(self, result):
""" region: The region from which this sprint was executed
duration: The overall response time for the successful hit
steps: Stats about the individual steps """
self.region = result['region'] if 'region' in result else None
self.duration = result['duration'] if 'duration' in result else None
if 'steps' in result and validate_list(result['steps']):
def step(s):
return Step(s)
self.steps = list(map(step, result['steps']))
else:
self.steps = None
class Sprint(Curl):
""" Use this to run a sprint against your app. The return values include the
response time, the region from which the sprint was run along with the
full request and response headers and the response body. """
def _validate(self, options):
""" Raises a ValidationError if validation fails. """
failed = validate(options)
if len(failed) > 0:
raise ValidationError('Validation error.', failed)
def _format_result(self, result):
""" Return the sprint result object to be passed to the callback. """
return Result(result)
|
kirca/odoo
|
refs/heads/master
|
addons/stock/report/stock_graph.py
|
112
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pychart import *
import pychart.legend
import time
from openerp.report.misc import choice_colors
from openerp import tools
#
# Draw a graph for stocks
#
class stock_graph(object):
def __init__(self, io):
self._datas = {}
self._canvas = canvas.init(fname=io, format='pdf')
self._canvas.set_author("OpenERP")
self._canvas.set_title("Stock Level Forecast")
self._names = {}
self.val_min = ''
self.val_max = ''
def add(self, product_id, product_name, datas):
if hasattr(product_name, 'replace'):
product_name=product_name.replace('/', '//')
if product_id not in self._datas:
self._datas[product_id] = {}
self._names[product_id] = tools.ustr(product_name)
for (dt,stock) in datas:
if not dt in self._datas[product_id]:
self._datas[product_id][dt]=0
self._datas[product_id][dt]+=stock
if self.val_min:
self.val_min = min(self.val_min,dt)
else:
self.val_min = dt
self.val_max = max(self.val_max,dt)
def draw(self):
colors = choice_colors(len(self._datas.keys()))
user_color = {}
for user in self._datas.keys():
user_color[user] = colors.pop()
val_min = int(time.mktime(time.strptime(self.val_min,'%Y-%m-%d')))
val_max = int(time.mktime(time.strptime(self.val_max,'%Y-%m-%d')))
plots = []
for product_id in self._datas:
f = fill_style.Plain()
f.bgcolor = user_color[user]
datas = self._datas[product_id].items()
datas = map(lambda x: (int(time.mktime(time.strptime(x[0],'%Y-%m-%d'))),x[1]), datas)
datas.sort()
datas2 = []
val = 0
for d in datas:
val+=d[1]
if len(datas2):
d2 = d[0]-60*61*24
if datas2[-1][0]<d2-1000:
datas2.append((d2,datas2[-1][1]))
datas2.append((d[0],val))
if len(datas2) and datas2[-1][0]<val_max-100:
datas2.append((val_max, datas2[-1][1]))
if len(datas2)==1:
datas2.append( (datas2[0][0]+100, datas2[0][1]) )
st = line_style.T()
st.color = user_color[product_id]
st.width = 1
st.cap_style=1
st.join_style=1
plot = line_plot.T(label=self._names[product_id], data=datas2, line_style=st)
plots.append(plot)
interval = max((val_max-val_min)/15, 86400)
x_axis = axis.X(format=lambda x:'/a60{}'+time.strftime('%Y-%m-%d',time.gmtime(x)), tic_interval=interval, label=None)
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bStock Level Forecast", line_style=None)
tb.draw()
ar = area.T(size = (620,435), x_range=(val_min,val_max+1), y_axis = axis.Y(format="%d", label="Virtual Stock (Unit)"), x_axis=x_axis)
for plot in plots:
ar.add_plot(plot)
ar.draw(self._canvas)
def close(self):
self._canvas.close()
if __name__ == '__main__':
gt = stock_graph('test.pdf')
gt.add(1, 'Pomme', [('2005-07-29', 6), ('2005-07-30', -2), ('2005-07-31', 4)])
gt.add(2, 'Cailloux', [('2005-07-29', 9), ('2005-07-30', -4), ('2005-07-31', 2)])
gt.draw()
gt.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
DailyActie/Surrogate-Model
|
refs/heads/master
|
01-codes/scikit-learn-master/sklearn/svm/tests/test_sparse.py
|
1
|
import numpy as np
from nose.tools import assert_raises, assert_true, assert_false
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from scipy import sparse
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.exceptions import ConvergenceWarning
from sklearn.svm.tests import test_svm
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
aidanhs/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/logger.py
|
489
|
class NoOpLogger(object):
def critical(self, msg):
pass
def error(self, msg):
pass
def info(self, msg):
pass
def warning(self, msg):
pass
def debug(self, msg):
pass
logger = NoOpLogger()
_set_logger = False
def set_logger(new_logger):
global _set_logger
if _set_logger:
raise Exception("Logger must be set at most once")
global logger
logger = new_logger
_set_logger = True
def get_logger():
return logger
|
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
third_party/protobuf/python/google/protobuf/reflection.py
|
223
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message
_FieldDescriptor = descriptor_mod.FieldDescriptor
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from google.protobuf.internal.cpp import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import python_message
_NewMessage = python_message.NewMessage
_InitMessage = python_message.InitMessage
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
bases = _NewMessage(bases, descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
setattr(descriptor, '_concrete_class', new_class)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_InitMessage(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
class _ResultClass(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
new_msg = _ResultClass()
new_msg.ParseFromString(byte_str)
return new_msg
|
happyleavesaoc/home-assistant
|
refs/heads/dev
|
tests/components/tts/test_init.py
|
16
|
"""The tests for the TTS component."""
import ctypes
import os
import shutil
from unittest.mock import patch, PropertyMock
import pytest
import requests
import homeassistant.components.http as http
import homeassistant.components.tts as tts
from homeassistant.components.tts.demo import DemoProvider
from homeassistant.components.media_player import (
SERVICE_PLAY_MEDIA, MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE, DOMAIN as DOMAIN_MP)
from homeassistant.setup import setup_component
from tests.common import (
get_test_home_assistant, get_test_instance_port, assert_setup_component,
mock_service)
@pytest.fixture(autouse=True)
def mutagen_mock():
"""Mock writing tags."""
with patch('homeassistant.components.tts.SpeechManager.write_tags',
side_effect=lambda *args: args[1]):
yield
class TestTTS(object):
"""Test the Google speech component."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.demo_provider = DemoProvider('en')
self.default_tts_cache = self.hass.config.path(tts.DEFAULT_CACHE_DIR)
setup_component(
self.hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}})
def teardown_method(self):
"""Stop everything that was started."""
if os.path.isdir(self.default_tts_cache):
shutil.rmtree(self.default_tts_cache)
self.hass.stop()
def test_setup_component_demo(self):
"""Setup the demo platform with defaults."""
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
assert self.hass.services.has_service(tts.DOMAIN, 'demo_say')
assert self.hass.services.has_service(tts.DOMAIN, 'clear_cache')
@patch('os.mkdir', side_effect=OSError(2, "No access"))
def test_setup_component_demo_no_access_cache_folder(self, mock_mkdir):
"""Setup the demo platform with defaults."""
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
assert not setup_component(self.hass, tts.DOMAIN, config)
assert not self.hass.services.has_service(tts.DOMAIN, 'demo_say')
assert not self.hass.services.has_service(tts.DOMAIN, 'clear_cache')
def test_setup_component_and_test_service(self):
"""Setup the demo platform and call service."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_en_-_demo.mp3") \
!= -1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3"))
def test_setup_component_and_test_service_with_config_language(self):
"""Setup the demo platform and call service."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
'language': 'de'
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_de_-_demo.mp3") \
!= -1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3"))
def test_setup_component_and_test_service_with_wrong_conf_language(self):
"""Setup the demo platform and call service with wrong config."""
config = {
tts.DOMAIN: {
'platform': 'demo',
'language': 'ru'
}
}
with assert_setup_component(0, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_setup_component_and_test_service_with_service_language(self):
"""Setup the demo platform and call service."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "de",
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_de_-_demo.mp3") \
!= -1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3"))
def test_setup_component_test_service_with_wrong_service_language(self):
"""Setup the demo platform and call service."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "lang",
})
self.hass.block_till_done()
assert len(calls) == 0
assert not os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_lang_-_demo.mp3"))
def test_setup_component_and_test_service_with_service_options(self):
"""Setup the demo platform and call service with options."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {
'voice': 'alex'
}
})
self.hass.block_till_done()
opt_hash = ctypes.c_size_t(hash(frozenset({'voice': 'alex'}))).value
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_de_{0}_demo.mp3".format(opt_hash)) \
!= -1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format(
opt_hash)))
@patch('homeassistant.components.tts.demo.DemoProvider.default_options',
new_callable=PropertyMock(return_value={'voice': 'alex'}))
def test_setup_component_and_test_with_service_options_def(self, def_mock):
"""Setup the demo platform and call service with default options."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "de",
})
self.hass.block_till_done()
opt_hash = ctypes.c_size_t(hash(frozenset({'voice': 'alex'}))).value
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_de_{0}_demo.mp3".format(opt_hash)) \
!= -1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format(
opt_hash)))
def test_setup_component_and_test_service_with_service_options_wrong(self):
"""Setup the demo platform and call service with wrong options."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {
'speed': 1
}
})
self.hass.block_till_done()
opt_hash = ctypes.c_size_t(hash(frozenset({'speed': 1}))).value
assert len(calls) == 0
assert not os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format(
opt_hash)))
def test_setup_component_and_test_service_clear_cache(self):
"""Setup the demo platform and call service clear cache."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
assert os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3"))
self.hass.services.call(tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {})
self.hass.block_till_done()
assert not os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3"))
def test_setup_component_and_test_service_with_receive_voice(self):
"""Setup the demo platform and call service and receive voice."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.start()
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
req = requests.get(calls[0].data[ATTR_MEDIA_CONTENT_ID])
_, demo_data = self.demo_provider.get_tts_audio("bla", 'en')
demo_data = tts.SpeechManager.write_tags(
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3",
demo_data, self.demo_provider,
"I person is on front of your door.", 'en', None)
assert req.status_code == 200
assert req.content == demo_data
def test_setup_component_and_test_service_with_receive_voice_german(self):
"""Setup the demo platform and call service and receive voice."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
'language': 'de',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.start()
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
req = requests.get(calls[0].data[ATTR_MEDIA_CONTENT_ID])
_, demo_data = self.demo_provider.get_tts_audio("bla", "de")
demo_data = tts.SpeechManager.write_tags(
"265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3",
demo_data, self.demo_provider,
"I person is on front of your door.", 'de', None)
assert req.status_code == 200
assert req.content == demo_data
def test_setup_component_and_web_view_wrong_file(self):
"""Setup the demo platform and receive wrong file from web."""
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.start()
url = ("{}/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_en_-_demo.mp3").format(self.hass.config.api.base_url)
req = requests.get(url)
assert req.status_code == 404
def test_setup_component_and_web_view_wrong_filename(self):
"""Setup the demo platform and receive wrong filename from web."""
config = {
tts.DOMAIN: {
'platform': 'demo',
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.start()
url = ("{}/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd"
"_en_-_demo.mp3").format(self.hass.config.api.base_url)
req = requests.get(url)
assert req.status_code == 404
def test_setup_component_test_without_cache(self):
"""Setup demo platform without cache."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
'cache': False,
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
assert not os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3"))
def test_setup_component_test_with_cache_call_service_without_cache(self):
"""Setup demo platform with cache and call service without cache."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo',
'cache': True,
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_CACHE: False,
})
self.hass.block_till_done()
assert len(calls) == 1
assert not os.path.isfile(os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3"))
def test_setup_component_test_with_cache_dir(self):
"""Setup demo platform with cache and call service without cache."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
_, demo_data = self.demo_provider.get_tts_audio("bla", 'en')
cache_file = os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")
os.mkdir(self.default_tts_cache)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {
tts.DOMAIN: {
'platform': 'demo',
'cache': True,
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
with patch('homeassistant.components.tts.demo.DemoProvider.'
'get_tts_audio', return_value=(None, None)):
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(
"/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_en_-_demo.mp3") \
!= -1
@patch('homeassistant.components.tts.demo.DemoProvider.get_tts_audio',
return_value=(None, None))
def test_setup_component_test_with_error_on_get_tts(self, tts_mock):
"""Setup demo platform with wrong get_tts_audio."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {
tts.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(tts.DOMAIN, 'demo_say', {
tts.ATTR_MESSAGE: "I person is on front of your door.",
})
self.hass.block_till_done()
assert len(calls) == 0
def test_setup_component_load_cache_retrieve_without_mem_cache(self):
"""Setup component and load cache and get without mem cache."""
_, demo_data = self.demo_provider.get_tts_audio("bla", 'en')
cache_file = os.path.join(
self.default_tts_cache,
"265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")
os.mkdir(self.default_tts_cache)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {
tts.DOMAIN: {
'platform': 'demo',
'cache': True,
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.start()
url = ("{}/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd"
"_en_-_demo.mp3").format(self.hass.config.api.base_url)
req = requests.get(url)
assert req.status_code == 200
assert req.content == demo_data
|
IptvBrasilGroup/Cleitonleonelcreton.repository
|
refs/heads/master
|
plugin.video.iptvbrondemand.mobile/jsunpack.py
|
18
|
"""
urlresolver XBMC Addon
Copyright (C) 2013 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Adapted for use in xbmc from:
https://github.com/einars/js-beautify/blob/master/python/jsbeautifier/unpackers/packer.py
usage:
if detect(some_string):
unpacked = unpack(some_string)
Unpacker for Dean Edward's p.a.c.k.e.r
"""
import re
import string
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
source = source.replace(' ','')
if re.search('eval(function(p,a,c,k,e,(?:r|d)',source): return True
else: return False
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*?)'\.split\('\|'\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
52 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP',
54 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR',
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
class UnpackingError(Exception):
"""Badly packed source or general error. Argument is a
meaningful description."""
pass
if __name__ == "__main__":
test='''eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('4(\'30\').2z({2y:\'5://a.8.7/i/z/y/w.2x\',2w:{b:\'2v\',19:\'<p><u><2 d="20" c="#17">2u 19.</2></u><16/><u><2 d="18" c="#15">2t 2s 2r 2q.</2></u></p>\',2p:\'<p><u><2 d="20" c="#17">2o 2n b.</2></u><16/><u><2 d="18" c="#15">2m 2l 2k 2j.</2></u></p>\',},2i:\'2h\',2g:[{14:"11",b:"5://a.8.7/2f/13.12"},{14:"2e",b:"5://a.8.7/2d/13.12"},],2c:"11",2b:[{10:\'2a\',29:\'5://v.8.7/t-m/m.28\'},{10:\'27\'}],26:{\'25-3\':{\'24\':{\'23\':22,\'21\':\'5://a.8.7/i/z/y/\',\'1z\':\'w\',\'1y\':\'1x\'}}},s:\'5://v.8.7/t-m/s/1w.1v\',1u:"1t",1s:"1r",1q:\'1p\',1o:"1n",1m:"1l",1k:\'5\',1j:\'o\',});l e;l k=0;l 6=0;4().1i(9(x){f(6>0)k+=x.r-6;6=x.r;f(q!=0&&k>=q){6=-1;4().1h();4().1g(o);$(\'#1f\').j();$(\'h.g\').j()}});4().1e(9(x){6=-1});4().1d(9(x){n(x)});4().1c(9(){$(\'h.g\').j()});9 n(x){$(\'h.g\').1b();f(e)1a;e=1;}',36,109,'||font||jwplayer|http|p0102895|me|vidto|function|edge3|file|color|size|vvplay|if|video_ad|div||show|tt102895|var|player|doPlay|false||21600|position|skin|test||static|1y7okrqkv4ji||00020|01|type|360p|mp4|video|label|FFFFFF|br|FF0000||deleted|return|hide|onComplete|onPlay|onSeek|play_limit_box|setFullscreen|stop|onTime|dock|provider|391|height|650|width|over|controlbar|5110|duration|uniform|stretching|zip|stormtrooper|213|frequency|prefix||path|true|enabled|preview|timeslidertooltipplugin|plugins|html5|swf|src|flash|modes|hd_default|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkblz6sfgq6uz6zt77gxia|240p|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkba36sfgq6uzy3tv2oidq|hd|original|ratio|broken|is|link|Your|such|No|nofile|more|any|availabe|Not|File|OK|previw|jpg|image|setup|flvplayer'.split('|')))'''
|
FLYKingdom/MyCode
|
refs/heads/master
|
PycharmProjects/PythonTest/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.py
|
510
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
|
google/llvm-propeller
|
refs/heads/bb-clusters
|
lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py
|
8
|
"""
Test that thread plan listing, and deleting works.
"""
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class TestThreadPlanCommands(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
def test_thread_plan_actions(self):
self.build()
self.main_source_file = lldb.SBFileSpec("main.c")
self.thread_plan_test()
def check_list_output(self, command, active_plans = [], completed_plans = [], discarded_plans = []):
# Check the "thread plan list" output against a list of active & completed and discarded plans.
# If all three check arrays are empty, that means the command is expected to fail.
interp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
num_active = len(active_plans)
num_completed = len(completed_plans)
num_discarded = len(discarded_plans)
interp.HandleCommand(command, result)
print("Command: %s"%(command))
print(result.GetOutput())
if num_active == 0 and num_completed == 0 and num_discarded == 0:
self.assertFalse(result.Succeeded(), "command: '%s' succeeded when it should have failed: '%s'"%
(command, result.GetError()))
return
self.assertTrue(result.Succeeded(), "command: '%s' failed: '%s'"%(command, result.GetError()))
result_arr = result.GetOutput().splitlines()
num_results = len(result_arr)
# Now iterate through the results array and pick out the results.
result_idx = 0
self.assertIn("thread #", result_arr[result_idx], "Found thread header") ; result_idx += 1
self.assertIn("Active plan stack", result_arr[result_idx], "Found active header") ; result_idx += 1
self.assertIn("Element 0: Base thread plan", result_arr[result_idx], "Found base plan") ; result_idx += 1
for text in active_plans:
self.assertIn(text, result_arr[result_idx], "Didn't find active plan: %s"%(text)) ; result_idx += 1
if len(completed_plans) > 0:
# First consume any remaining active plans:
while not "Completed plan stack:" in result_arr[result_idx]:
result_idx += 1
if result_idx == num_results:
self.fail("There should have been completed plans, but I never saw the completed stack header")
# We are at the Completed header, skip it:
result_idx += 1
for text in completed_plans:
self.assertIn(text, result_arr[result_idx], "Didn't find completed plan: %s"%(text)) ; result_idx += 1
if len(discarded_plans) > 0:
# First consume any remaining completed plans:
while not "Discarded plan stack:" in result_arr[result_idx]:
result_idx += 1
if result_idx == num_results:
self.fail("There should have been discarded plans, but I never saw the discarded stack header")
# We are at the Discarded header, skip it:
result_idx += 1
for text in discarded_plans:
self.assertIn(text, result_arr[result_idx], "Didn't find discarded plan: %s"%(text)) ; result_idx += 1
def thread_plan_test(self):
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Set a breakpoint here", self.main_source_file)
# We need to have an internal plan so we can test listing one.
# The most consistent way to do that is to use a scripted thread plan
# that uses a sub-plan. Source that in now.
source_path = os.path.join(self.getSourceDir(), "wrap_step_over.py")
self.runCmd("command script import '%s'"%(source_path))
# Now set a breakpoint that we will hit by running our scripted step.
call_me_bkpt = target.BreakpointCreateBySourceRegex("Set another here", self.main_source_file)
self.assertTrue(call_me_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
thread.StepUsingScriptedThreadPlan("wrap_step_over.WrapStepOver")
threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
self.assertEqual(len(threads), 1, "Hit my breakpoint while stepping over")
current_id = threads[0].GetIndexID()
current_tid = threads[0].GetThreadID()
# Run thread plan list without the -i flag:
command = "thread plan list %d"%(current_id)
self.check_list_output (command, ["wrap_step_over.WrapStepOver"], [])
# Run thread plan list with the -i flag:
command = "thread plan list -i %d"%(current_id)
self.check_list_output(command, ["WrapStepOver", "Stepping over line main.c"])
# Run thread plan list providing TID, output should be the same:
command = "thread plan list -t %d"%(current_tid)
self.check_list_output(command, ["wrap_step_over.WrapStepOver"])
# Provide both index & tid, and make sure we only print once:
command = "thread plan list -t %d %d"%(current_tid, current_id)
self.check_list_output(command, ["wrap_step_over.WrapStepOver"])
# Try a fake TID, and make sure that fails:
fake_tid = 0
for i in range(100, 10000, 100):
fake_tid = current_tid + i
thread = process.GetThreadByID(fake_tid)
if not thread:
break
command = "thread plan list -t %d"%(fake_tid)
self.check_list_output(command)
# Now continue, and make sure we printed the completed plan:
process.Continue()
threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
self.assertEqual(len(threads), 1, "One thread completed a step")
# Run thread plan list - there aren't any private plans at this point:
command = "thread plan list %d"%(current_id)
self.check_list_output(command, [], ["wrap_step_over.WrapStepOver"])
# Set another breakpoint that we can run to, to try deleting thread plans.
second_step_bkpt = target.BreakpointCreateBySourceRegex("Run here to step over again",
self.main_source_file)
self.assertTrue(second_step_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
final_bkpt = target.BreakpointCreateBySourceRegex("Make sure we get here on last continue",
self.main_source_file)
self.assertTrue(final_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
threads = lldbutil.continue_to_breakpoint(process, second_step_bkpt)
self.assertEqual(len(threads), 1, "Hit the second step breakpoint")
threads[0].StepOver()
threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
result = lldb.SBCommandReturnObject()
interp = self.dbg.GetCommandInterpreter()
interp.HandleCommand("thread plan discard 1", result)
self.assertTrue(result.Succeeded(), "Deleted the step over plan: %s"%(result.GetOutput()))
# Make sure the plan gets listed in the discarded plans:
command = "thread plan list %d"%(current_id)
self.check_list_output(command, [], [], ["Stepping over line main.c:"])
process.Continue()
threads = lldbutil.get_threads_stopped_at_breakpoint(process, final_bkpt)
self.assertEqual(len(threads), 1, "Ran to final breakpoint")
threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
self.assertEqual(len(threads), 0, "Did NOT complete the step over plan")
|
sparcs-kaist/olim
|
refs/heads/master
|
olim/olim/apps/account/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
fuhongliang/odoo
|
refs/heads/8.0
|
addons/multi_company/__openerp__.py
|
259
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi-Company',
'version': '1.0',
'category': 'Tools',
'description': """
This module is for managing a multicompany environment.
=======================================================
This module is the base module for other multi-company modules.
""",
'author': 'OpenERP SA,SYLEAM',
'website': 'https://www.odoo.com',
'depends': [
'base',
'sale_stock',
'project',
],
'data': ['res_company_view.xml'],
'demo': ['multi_company_demo.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wehriam/awspider
|
refs/heads/master
|
static_analysis/pep8.py
|
48
|
#!/usr/bin/python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Check Python source code formatting, according to PEP 8:
http://www.python.org/dev/peps/pep-0008/
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://svn.browsershots.org/trunk/devtools/pep8/
http://trac.browsershots.org/browser/trunk/devtools/pep8/
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
You can add checks to this program by writing plugins. Each plugin is
a simple function that is called for each line of source code, either
physical or logical.
Physical line:
- Raw line of text from the input file.
Logical line:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with 'xxx' of same length.
- Comments removed.
The check function requests physical or logical lines by the name of
the first argument:
def maximum_line_length(physical_line)
def extraneous_whitespace(logical_line)
def blank_lines(logical_line, blank_lines, indent_level, line_number)
The last example above demonstrates how check plugins can request
additional information with extra arguments. All attributes of the
Checker object are available. Some examples:
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
blank_lines: blank lines before this one
indent_char: first indentation character in this file (' ' or '\t')
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
The docstring of each check function shall be the relevant part of
text from PEP 8. It is printed if the user enables --show-pep8.
"""
import os
import sys
import re
import time
import inspect
import tokenize
from optparse import OptionParser
from keyword import iskeyword
from fnmatch import fnmatch
__version__ = '0.2.0'
__revision__ = '$Rev$'
default_exclude = '.svn,CVS,*.pyc,*.pyo'
indent_match = re.compile(r'([ \t]*)').match
raise_comma_match = re.compile(r'raise\s+\w+\s*(,)').match
operators = """
+ - * / % ^ & | = < > >> <<
+= -= *= /= %= ^= &= |= == <= >= >>= <<=
!= <> :
in is or not and
""".split()
options = None
args = None
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
"""
Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
"""
indent = indent_match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
"""
For new projects, spaces-only are strongly recommended over tabs. Most
editors have features that make this easy to do.
"""
indent = indent_match(physical_line).group(1)
if indent.count('\t'):
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
"""
JCR: Trailing whitespace is superfluous.
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip()
if physical_line != stripped:
return len(stripped), "W291 trailing whitespace"
def trailing_blank_lines(physical_line, lines, line_number):
"""
JCR: Trailing blank lines are superfluous.
"""
if physical_line.strip() == '' and line_number == len(lines):
return 0, "W391 blank line at end of file"
def missing_newline(physical_line):
"""
JCR: The last line should have a newline.
"""
if physical_line.rstrip() == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line):
"""
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
"""
length = len(physical_line.rstrip())
if length > 79:
return 79, "E501 line too long (%d characters)" % length
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
previous_logical):
"""
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
"""
if line_number == 1:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
return # Don't expect blank lines after function decorator
if (logical_line.startswith('def ') or
logical_line.startswith('class ') or
logical_line.startswith('@')):
if indent_level > 0 and blank_lines != 1:
return 0, "E301 expected 1 blank line, found %d" % blank_lines
if indent_level == 0 and blank_lines != 2:
return 0, "E302 expected 2 blank lines, found %d" % blank_lines
if blank_lines > 2:
return 0, "E303 too many blank lines (%d)" % blank_lines
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
"""
line = logical_line
for char in '([{':
found = line.find(char + ' ')
if found > -1:
return found + 1, "E201 whitespace after '%s'" % char
for char in '}])':
found = line.find(' ' + char)
if found > -1 and line[found - 1] != ',':
return found, "E202 whitespace before '%s'" % char
for char in ',;:':
found = line.find(' ' + char)
if found > -1:
return found, "E203 whitespace before '%s'" % char
def missing_whitespace(logical_line):
"""
JCR: Each comma, semicolon or colon should be followed by whitespace.
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] != ' ':
before = line[:index]
if char == ':' and before.count('[') > before.count(']'):
continue # Slice syntax, no space required
return index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
"""
if indent_char == ' ' and indent_level % 4:
return 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
return 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
return 0, "E113 unexpected indentation"
def whitespace_before_parameters(logical_line, tokens):
"""
Avoid extraneous whitespace in the following situations:
- Immediately before the open parenthesis that starts the argument
list of a function call.
- Immediately before the open parenthesis that starts an indexing or
slicing.
"""
prev_type = tokens[0][0]
prev_text = tokens[0][1]
prev_end = tokens[0][3]
for index in range(1, len(tokens)):
token_type, text, start, end, line = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
prev_type == tokenize.NAME and
(index < 2 or tokens[index - 2][1] != 'class') and
(not iskeyword(prev_text))):
return prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
"""
line = logical_line
for operator in operators:
found = line.find(' ' + operator)
if found > -1:
return found, "E221 multiple spaces before operator"
found = line.find(operator + ' ')
if found > -1:
return found, "E222 multiple spaces after operator"
found = line.find('\t' + operator)
if found > -1:
return found, "E223 tab before operator"
found = line.find(operator + '\t')
if found > -1:
return found, "E224 tab after operator"
def whitespace_around_comma(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
JCR: This should also be applied around comma etc.
"""
line = logical_line
for separator in ',;:':
found = line.find(separator + ' ')
if found > -1:
return found + 1, "E241 multiple spaces after '%s'" % separator
found = line.find(separator + '\t')
if found > -1:
return found + 1, "E242 tab after '%s'" % separator
def imports_on_separate_lines(logical_line):
"""
Imports should usually be on separate lines.
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if found > -1:
return found, "E401 multiple imports on one line"
def compound_statements(logical_line):
"""
Compound statements (multiple statements on the same line) are
generally discouraged.
"""
line = logical_line
found = line.find(':')
if -1 < found < len(line) - 1:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
not re.search(r'\blambda\b', before)): # lambda x: x
return found, "E701 multiple statements on one line (colon)"
found = line.find(';')
if -1 < found:
return found, "E702 multiple statements on one line (semicolon)"
def python_3000_has_key(logical_line):
"""
The {}.has_key() method will be removed in the future version of
Python. Use the 'in' operation instead, like:
d = {"a": 1, "b": 2}
if "b" in d:
print d["b"]
"""
pos = logical_line.find('.has_key(')
if pos > -1:
return pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
"""
When raising an exception, use "raise ValueError('message')"
instead of the older form "raise ValueError, 'message'".
The paren-using form is preferred because when the exception arguments
are long or include string formatting, you don't need to use line
continuation characters thanks to the containing parentheses. The older
form will be removed in Python 3000.
"""
match = raise_comma_match(logical_line)
if match:
return match.start(1), "W602 deprecated form of raising exception"
##############################################################################
# Helper functions
##############################################################################
def expand_indent(line):
"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\\t')
8
>>> expand_indent(' \\t')
8
>>> expand_indent(' \\t')
8
>>> expand_indent(' \\t')
16
"""
result = 0
for char in line:
if char == '\t':
result = result / 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
##############################################################################
# Framework to run all checks
##############################################################################
def message(text):
"""Print a message."""
# print >> sys.stderr, options.prog + ': ' + text
# print >> sys.stderr, text
print text
def find_checks(argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name.
"""
checks = []
function_type = type(find_checks)
for name, function in globals().iteritems():
if type(function) is function_type:
args = inspect.getargspec(function)[0]
if len(args) >= 1 and args[0].startswith(argument_name):
checks.append((name, function, args))
checks.sort()
return checks
def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
start = 1
end = len(text) - 1
# String modifiers (e.g. u or r)
if text.endswith('"'):
start += text.index('"')
elif text.endswith("'"):
start += text.index("'")
# Triple quotes
if text.endswith('"""') or text.endswith("'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
class Checker:
"""
Load a Python source file, tokenize it, check coding style.
"""
def __init__(self, filename):
self.filename = filename
self.lines = file(filename).readlines()
self.physical_checks = find_checks('physical_line')
self.logical_checks = find_checks('logical_line')
options.counters['physical lines'] = \
options.counters.get('physical lines', 0) + len(self.lines)
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
return self.lines[self.line_number - 1]
def readline_check_physical(self):
"""
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
"""
line = self.readline()
if line:
self.check_physical(line)
return line
def run_check(self, check, argument_names):
"""
Run a check plugin.
"""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""
Run all physical checks on a raw input line.
"""
self.physical_line = line
if self.indent_char is None and len(line) and line[0] in ' \t':
self.indent_char = line[0]
for name, check, argument_names in self.physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
offset, text = result
self.report_error(self.line_number, offset, text, check)
def build_tokens_line(self):
"""
Build a logical line from tokens.
"""
self.mapping = []
logical = []
length = 0
previous = None
for token in self.tokens:
token_type, text = token[0:2]
if token_type in (tokenize.COMMENT, tokenize.NL,
tokenize.INDENT, tokenize.DEDENT,
tokenize.NEWLINE):
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if previous:
end_line, end = previous[3]
start_line, start = token[2]
if end_line != start_line: # different row
if self.lines[end_line - 1][end - 1] not in '{[(':
logical.append(' ')
length += 1
elif end != start: # different column
fill = self.lines[end_line - 1][end:start]
logical.append(fill)
length += len(fill)
self.mapping.append((length, token))
logical.append(text)
length += len(text)
previous = token
self.logical_line = ''.join(logical)
assert self.logical_line.lstrip() == self.logical_line
assert self.logical_line.rstrip() == self.logical_line
def check_logical(self):
"""
Build a line from tokens and run all logical checks on it.
"""
options.counters['logical lines'] = \
options.counters.get('logical lines', 0) + 1
self.build_tokens_line()
first_line = self.lines[self.mapping[0][1][2][0] - 1]
indent = first_line[:self.mapping[0][1][2][1]]
self.previous_indent_level = self.indent_level
self.indent_level = expand_indent(indent)
if options.verbose >= 2:
print self.logical_line[:80].rstrip()
for name, check, argument_names in self.logical_checks:
if options.verbose >= 3:
print ' ', name
result = self.run_check(check, argument_names)
if result is not None:
offset, text = result
if type(offset) is tuple:
original_number, original_offset = offset
else:
for token_offset, token in self.mapping:
if offset >= token_offset:
original_number = token[2][0]
original_offset = (token[2][1]
+ offset - token_offset)
self.report_error(original_number, original_offset,
text, check)
self.previous_logical = self.logical_line
def check_all(self):
"""
Run all checks on the input file.
"""
self.file_errors = 0
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_logical = ''
self.blank_lines = 0
self.tokens = []
parens = 0
for token in tokenize.generate_tokens(self.readline_check_physical):
# print tokenize.tok_name[token[0]], repr(token)
self.tokens.append(token)
token_type, text = token[0:2]
if token_type == tokenize.OP and text in '([{':
parens += 1
if token_type == tokenize.OP and text in '}])':
parens -= 1
if token_type == tokenize.NEWLINE and not parens:
self.check_logical()
self.blank_lines = 0
self.tokens = []
if token_type == tokenize.NL and not parens:
self.blank_lines += 1
self.tokens = []
if token_type == tokenize.COMMENT:
source_line = token[4]
token_start = token[2][1]
if source_line[:token_start].strip() == '':
self.blank_lines = 0
return self.file_errors
def report_error(self, line_number, offset, text, check):
"""
Report an error, according to options.
"""
if options.quiet == 1 and not self.file_errors:
message(self.filename)
self.file_errors += 1
code = text[:4]
options.counters[code] = options.counters.get(code, 0) + 1
options.messages[code] = text[5:]
if options.quiet:
return
if options.testsuite:
base = os.path.basename(self.filename)[:4]
if base == code:
return
if base[0] == 'E' and code[0] == 'W':
return
if ignore_code(code):
return
if options.counters[code] == 1 or options.repeat:
message("%s:%s:%d: %s" %
(self.filename, line_number, offset + 1, text))
if options.show_source:
line = self.lines[line_number - 1]
message(line.rstrip())
message(' ' * offset + '^')
if options.show_pep8:
message(check.__doc__.lstrip('\n').rstrip())
def input_file(filename):
"""
Run all checks on a Python source file.
"""
if excluded(filename) or not filename_match(filename):
return {}
if options.verbose:
message('checking ' + filename)
options.counters['files'] = options.counters.get('files', 0) + 1
errors = Checker(filename).check_all()
if options.testsuite and not errors:
message("%s: %s" % (filename, "no errors found"))
def input_dir(dirname):
"""
Check all Python source files in this directory and all subdirectories.
"""
dirname = dirname.rstrip('/')
if excluded(dirname):
return
for root, dirs, files in os.walk(dirname):
if options.verbose:
message('directory ' + root)
options.counters['directories'] = \
options.counters.get('directories', 0) + 1
dirs.sort()
for subdir in dirs:
if excluded(subdir):
dirs.remove(subdir)
files.sort()
for filename in files:
input_file(os.path.join(root, filename))
def excluded(filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
for pattern in options.exclude:
if fnmatch(basename, pattern):
# print basename, 'excluded because it matches', pattern
return True
def filename_match(filename):
"""
Check if options.filename contains a pattern that matches filename.
If options.filename is unspecified, this always returns True.
"""
if not options.filename:
return True
for pattern in options.filename:
if fnmatch(filename, pattern):
return True
def ignore_code(code):
"""
Check if options.ignore contains a prefix of the error code.
"""
for ignore in options.ignore:
if code.startswith(ignore):
return True
def get_error_statistics():
"""Get error statistics."""
return get_statistics("E")
def get_warning_statistics():
"""Get warning statistics."""
return get_statistics("W")
def get_statistics(prefix=''):
"""
Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
stats = []
keys = options.messages.keys()
keys.sort()
for key in keys:
if key.startswith(prefix):
stats.append('%-7s %s %s' %
(options.counters[key], key, options.messages[key]))
return stats
def print_statistics(prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in get_statistics(prefix):
print line
def print_benchmark(elapsed):
"""
Print benchmark numbers.
"""
print '%-7.2f %s' % (elapsed, 'seconds elapsed')
keys = ['directories', 'files',
'logical lines', 'physical lines']
for key in keys:
if key in options.counters:
print '%-7d %s per second (%d total)' % (
options.counters[key] / elapsed, key,
options.counters[key])
def process_options(arglist=None):
"""
Process options passed either via arglist or via command line args.
"""
global options, args
usage = "%prog [options] input ..."
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('--exclude', metavar='patterns', default=default_exclude,
help="skip matches (default %s)" % default_exclude)
parser.add_option('--filename', metavar='patterns',
help="only check matching files (e.g. *.py)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--repeat', action='store_true',
help="show all occurrences of the same error")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--benchmark', action='store_true',
help="measure processing speed")
parser.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
parser.add_option('--doctest', action='store_true',
help="run doctest on myself")
options, args = parser.parse_args(arglist)
if options.testsuite:
args.append(options.testsuite)
if len(args) == 0:
parser.error('input not specified')
options.prog = os.path.basename(sys.argv[0])
options.exclude = options.exclude.split(',')
for index in range(len(options.exclude)):
options.exclude[index] = options.exclude[index].rstrip('/')
if options.filename:
options.filename = options.filename.split(',')
if options.ignore:
options.ignore = options.ignore.split(',')
else:
options.ignore = []
options.counters = {}
options.messages = {}
return options, args
def _main():
"""
Parse options and run checks on Python source.
"""
options, args = process_options()
if options.doctest:
import doctest
return doctest.testmod()
start_time = time.time()
for path in args:
if os.path.isdir(path):
input_dir(path)
else:
input_file(path)
elapsed = time.time() - start_time
if options.statistics:
print_statistics()
if options.benchmark:
print_benchmark(elapsed)
if __name__ == '__main__':
_main()
|
amith01994/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/db/models/sql/query.py
|
72
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from django.utils.copycompat import deepcopy
from django.utils.tree import Node
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import select_related_descend, InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.validate_sql(sql)
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def validate_sql(self, sql):
if not sql.lower().strip().startswith('select'):
raise InvalidQuery('Raw queries are limited to SELECT queries. Use '
'connection.cursor directly for other types of queries.')
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
self.alias_map = {} # Maps alias to join information
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.rev_join_map = {} # Reverse of join_map.
self.quote_cache = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in.
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
return sql % params
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.rev_join_map = self.rev_join_map.copy()
obj.quote_cache = {}
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
new_alias = self.join(rhs.rev_join_map[alias],
(conjunction and not first), used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# the first join that is exclusive to the lhs (self) must be converted
# to an outer join.
if not conjunction:
for alias in self.tables[1:]:
if self.alias_refcount[alias] == 1:
self.promote_alias(alias, True)
break
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
columns = set()
orig_opts = self.model._meta
seen = {}
must_include = {self.model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = opts.get_field_by_name(name)[0].rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= 1
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias][LHS_ALIAS]
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.iteritems():
alias_data = list(self.alias_map[old_alias])
alias_data[RHS_ALIAS] = new_alias
t = self.rev_join_map[old_alias]
data = list(self.join_map[t])
data[data.index(old_alias)] = new_alias
self.join_map[t] = tuple(data)
self.rev_join_map[new_alias] = t
del self.rev_join_map[old_alias]
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = tuple(alias_data)
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data[TABLE_NAME]]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data[LHS_ALIAS]
if lhs in change_map:
data = list(data)
data[LHS_ALIAS] = change_map[lhs]
self.alias_map[alias] = tuple(data)
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = {}
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs][TABLE_NAME]
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias][LHS_ALIAS] != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = (table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
self.rev_join_map[alias] = t_ident
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
for field, model in opts.get_fields_with_model():
if model not in seen:
if model is proxied_model:
seen[model] = root_alias
else:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif hasattr(value, 'evaluate'):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
for alias, aggregate in self.aggregates.items():
if alias == parts[0]:
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, AND)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
col, alias, join_list = self.trim_joins(target, join_list, last, trim)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
if having_clause:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if not (lookup_type == 'in'
and not hasattr(value, 'as_sql')
and not hasattr(value, '_as_sql')
and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
# We also need to handle the case where a subquery is provided
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases)
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = get_proxied_model(opts)
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves less table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join[RHS_JOIN_COL]:
self.unref_alias(final_alias)
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(param_iter.next())
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = set(field_names).difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = set(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]][LHS_JOIN_COL]
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info[LHS_JOIN_COL] != select_col
or join_info[JOIN_TYPE] != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info[RHS_ALIAS]
select_col = join_info[RHS_JOIN_COL]
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def get_proxied_model(opts):
int_opts = opts
proxied_model = None
while int_opts.proxy:
proxied_model = int_opts.proxy_for_model
int_opts = proxied_model._meta
return proxied_model
|
vmturbo/nova
|
refs/heads/master
|
nova/api/openstack/compute/schemas/rescue.py
|
16
|
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
rescue = {
'type': 'object',
'properties': {
'rescue': {
'type': ['object', 'null'],
'properties': {
'adminPass': parameter_types.admin_password,
'rescue_image_ref': parameter_types.image_id,
},
'additionalProperties': False,
},
},
'required': ['rescue'],
'additionalProperties': False,
}
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortimanager/fmgr_fwobj_ippool6.py
|
38
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fwobj_ippool6
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Allows the editing of IP Pool Objects within FortiManager.
description:
- Allows users to add/edit/delete IPv6 Pool Objects.
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
startip:
description:
- First IPv6 address (inclusive) in the range for the address pool.
required: false
name:
description:
- IPv6 IP pool name.
required: false
endip:
description:
- Final IPv6 address (inclusive) in the range for the address pool.
required: false
comments:
description:
- Comment.
required: false
dynamic_mapping:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
dynamic_mapping_comments:
description:
- Dynamic Mapping clone of original suffixed parameter.
required: false
dynamic_mapping_endip:
description:
- Dynamic Mapping clone of original suffixed parameter.
required: false
dynamic_mapping_startip:
description:
- Dynamic Mapping clone of original suffixed parameter.
required: false
'''
EXAMPLES = '''
- name: ADD FMGR_FIREWALL_IPPOOL6
fmgr_firewall_ippool6:
mode: "add"
adom: "ansible"
startip:
name: "IPv6 IPPool"
endip:
comments: "Created by Ansible"
- name: DELETE FMGR_FIREWALL_IPPOOL6
fmgr_firewall_ippool6:
mode: "delete"
adom: "ansible"
name: "IPv6 IPPool"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
def fmgr_fwobj_ippool6_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/firewall/ippool6'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/firewall/ippool6/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
startip=dict(required=False, type="str"),
name=dict(required=False, type="str"),
endip=dict(required=False, type="str"),
comments=dict(required=False, type="str"),
dynamic_mapping=dict(required=False, type="list"),
dynamic_mapping_comments=dict(required=False, type="str"),
dynamic_mapping_endip=dict(required=False, type="str"),
dynamic_mapping_startip=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"startip": module.params["startip"],
"name": module.params["name"],
"endip": module.params["endip"],
"comments": module.params["comments"],
"dynamic_mapping": {
"comments": module.params["dynamic_mapping_comments"],
"endip": module.params["dynamic_mapping_endip"],
"startip": module.params["dynamic_mapping_startip"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['dynamic_mapping']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_fwobj_ippool6_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
jzoldak/edx-platform
|
refs/heads/master
|
docs/en_us/enrollment_api/source/conf.py
|
13
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: disable=redefined-builtin
# pylint: disable=protected-access
# pylint: disable=unused-argument
import os
from path import Path as path
import sys
import mock
MOCK_MODULES = [
'ipware',
'ip',
'ipware.ip',
'get_ip',
'pygeoip',
'ipaddr',
'django_countries',
'fields',
'django_countries.fields',
'opaque_keys',
'opaque_keys.edx',
'opaque_keys.edx.keys',
'CourseKey',
'UsageKey',
'BlockTypeKey',
'opaque_keys.edx.locations',
'SlashSeparatedCourseKey',
'Location',
'opaque_keys.edx.locator',
'Locator',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../../').abspath()
sys.path.insert(0, root)
sys.path.append(root / "common/djangoapps")
sys.path.append('.')
#sys.path.insert(
# 0,
# os.path.abspath(
# os.path.normpath(
# os.path.dirname(__file__) + '/../../../..'
# )
# )
#)
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'links.rst']
project = 'edX Enrollment API Version 1'
|
illfelder/compute-image-packages
|
refs/heads/master
|
packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_executor.py
|
6
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute user provided metadata scripts."""
import os
import stat
import subprocess
class ScriptExecutor(object):
"""A class for executing user provided metadata scripts."""
def __init__(self, logger, script_type, default_shell=None):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the type of the script we are running.
default_shell: string, the default shell to execute the script.
"""
self.logger = logger
self.script_type = script_type
self.default_shell = default_shell or '/bin/bash'
def _MakeExecutable(self, metadata_script):
"""Add executable permissions to a file.
Args:
metadata_script: string, the path to the executable file.
"""
mode = os.stat(metadata_script).st_mode
os.chmod(metadata_script, mode | stat.S_IEXEC)
def _RunScript(self, metadata_key, metadata_script):
"""Run a script and log the streamed script output.
Args:
metadata_key: string, the key specifing the metadata script.
metadata_script: string, the file location of an executable script.
"""
process = subprocess.Popen(
metadata_script, shell=True,
executable=self.default_shell,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
message = line.decode('utf-8', 'replace').rstrip('\n')
if message:
self.logger.info('%s: %s', metadata_key, message)
if process.poll() is not None:
break
self.logger.info('%s: Return code %s.', metadata_key, process.returncode)
def RunScripts(self, script_dict):
"""Run the metadata scripts; execute a URL script first if one is provided.
Args:
script_dict: a dictionary mapping metadata keys to script files.
"""
metadata_types = ['%s-script-url', '%s-script']
metadata_keys = [key % self.script_type for key in metadata_types]
metadata_keys = [key for key in metadata_keys if script_dict.get(key)]
if not metadata_keys:
self.logger.info('No %s scripts found in metadata.', self.script_type)
for metadata_key in metadata_keys:
metadata_script = script_dict.get(metadata_key)
self._MakeExecutable(metadata_script)
self._RunScript(metadata_key, metadata_script)
|
ssvsergeyev/ZenPacks.zenoss.AWS
|
refs/heads/develop
|
src/boto/boto/dynamodb/exceptions.py
|
185
|
"""
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
from boto.exception import DynamoDBResponseError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
class DynamoDBNumberError(BotoClientError):
"""
Raised in the event of incompatible numeric type casting.
"""
pass
class DynamoDBConditionalCheckFailedError(DynamoDBResponseError):
"""
Raised when a ConditionalCheckFailedException response is received.
This happens when a conditional check, expressed via the expected_value
paramenter, fails.
"""
pass
class DynamoDBValidationError(DynamoDBResponseError):
"""
Raised when a ValidationException response is received. This happens
when one or more required parameter values are missing, or if the item
has exceeded the 64Kb size limit.
"""
pass
class DynamoDBThroughputExceededError(DynamoDBResponseError):
"""
Raised when the provisioned throughput has been exceeded.
Normally, when provisioned throughput is exceeded the operation
is retried. If the retries are exhausted then this exception
will be raised.
"""
pass
|
emarinizquierdo/xentinels
|
refs/heads/master
|
openid/__init__.py
|
139
|
"""
This package is an implementation of the OpenID specification in
Python. It contains code for both server and consumer
implementations. For information on implementing an OpenID consumer,
see the C{L{openid.consumer.consumer}} module. For information on
implementing an OpenID server, see the C{L{openid.server.server}}
module.
@contact: U{http://openid.net/developers/dev-mailing-lists/
<http://openid.net/developers/dev-mailing-lists/}
@copyright: (C) 2005-2008 JanRain, Inc.
@license: Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
U{http://www.apache.org/licenses/LICENSE-2.0}
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
"""
__version__ = '[library version:2.2.1]'[17:-1]
__all__ = [
'association',
'consumer',
'cryptutil',
'dh',
'extension',
'extensions',
'fetchers',
'kvform',
'message',
'oidutil',
'server',
'sreg',
'store',
'urinorm',
'yadis',
]
# Parse the version info
try:
version_info = map(int, __version__.split('.'))
except ValueError:
version_info = (None, None, None)
else:
if len(version_info) != 3:
version_info = (None, None, None)
else:
version_info = tuple(version_info)
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/common/py_utils/py_utils/refactor/module.py
|
11
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from py_utils.refactor import annotated_symbol
class Module(object):
def __init__(self, file_path):
self._file_path = file_path
with open(self._file_path, 'r') as f:
self._snippet = annotated_symbol.Annotate(f)
@property
def file_path(self):
return self._file_path
@property
def modified(self):
return self._snippet.modified
def FindAll(self, snippet_type):
return self._snippet.FindAll(snippet_type)
def FindChildren(self, snippet_type):
return self._snippet.FindChildren(snippet_type)
def Write(self):
"""Write modifications to the file."""
if not self.modified:
return
# Stringify before opening the file for writing.
# If we fail, we won't truncate the file.
string = str(self._snippet)
with open(self._file_path, 'w') as f:
f.write(string)
|
DirtyUnicorns/android_external_chromium-org
|
refs/heads/kitkat
|
build/android/gyp/apk_install.py
|
28
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Installs an APK.
"""
import optparse
import os
import re
import subprocess
import sys
from util import build_device
from util import build_utils
from util import md5_check
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def GetNewMetadata(device, apk_package):
"""Gets the metadata on the device for the apk_package apk."""
output = device.RunShellCommand('ls -l /data/app/')
# Matches lines like:
# -rw-r--r-- system system 7376582 2013-04-19 16:34 org.chromium.chrome.testshell.apk
# -rw-r--r-- system system 7376582 2013-04-19 16:34 org.chromium.chrome.testshell-1.apk
apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
matches = filter(apk_matcher, output)
return matches[0] if matches else None
def HasInstallMetadataChanged(device, apk_package, metadata_path):
"""Checks if the metadata on the device for apk_package has changed."""
if not os.path.exists(metadata_path):
return True
with open(metadata_path, 'r') as expected_file:
return expected_file.read() != device.GetInstallMetadata(apk_package)
def RecordInstallMetadata(device, apk_package, metadata_path):
"""Records the metadata from the device for apk_package."""
metadata = GetNewMetadata(device, apk_package)
if not metadata:
raise Exception('APK install failed unexpectedly.')
with open(metadata_path, 'w') as outfile:
outfile.write(metadata)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--apk-path',
help='Path to .apk to install.')
parser.add_option('--install-record',
help='Path to install record (touched only when APK is installed).')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--stamp',
help='Path to touch on success.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
constants.SetBuildType(options.configuration_name)
serial_number = device.GetSerialNumber()
apk_package = apk_helper.GetPackageName(options.apk_path)
metadata_path = '%s.%s.device.time.stamp' % (options.apk_path, serial_number)
# If the APK on the device does not match the one that was last installed by
# the build, then the APK has to be installed (regardless of the md5 record).
force_install = HasInstallMetadataChanged(device, apk_package, metadata_path)
def Install():
device.Install(options.apk_path, reinstall=True)
RecordInstallMetadata(device, apk_package, metadata_path)
build_utils.Touch(options.install_record)
record_path = '%s.%s.md5.stamp' % (options.apk_path, serial_number)
md5_check.CallAndRecordIfStale(
Install,
record_path=record_path,
input_paths=[options.apk_path],
force=force_install)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
PitMaker/Orion
|
refs/heads/master
|
drone_lib.py
|
1
|
#bibliotheques de fonctions relatives au drone
|
BraichuSoft/pyafipws
|
refs/heads/master
|
cot.py
|
11
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Based on MultipartPostHandler.py (C) 02/2006 Will Holcomb <wholcomb@gmail.com>
# Ejemplos iniciales gracias a "Matias Gieco matigro@gmail.com"
"Módulo para obtener remito electrónico automático (COT)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.02h"
import os, sys, traceback
from pysimplesoap.simplexml import SimpleXMLElement
from utils import WebClient
HOMO = False
CACERT = "conf/arba.crt" # establecimiento de canal seguro (en producción)
##URL = "https://cot.ec.gba.gob.ar/TransporteBienes/SeguridadCliente/presentarRemitos.do"
# Nuevo servidor para el "Remito Electrónico Automático"
URL = "http://cot.test.arba.gov.ar/TransporteBienes/SeguridadCliente/presentarRemitos.do" # testing
#URL = "https://cot.arba.gov.ar/TransporteBienes/SeguridadCliente/presentarRemitos.do" # prod.
class COT:
"Interfaz para el servicio de Remito Electronico ARBA"
_public_methods_ = ['Conectar', 'PresentarRemito', 'LeerErrorValidacion',
'LeerValidacionRemito',
'AnalizarXml', 'ObtenerTagXml']
_public_attrs_ = ['Usuario', 'Password', 'XmlResponse',
'Version', 'Excepcion', 'Traceback', 'InstallDir',
'CuitEmpresa', 'NumeroComprobante', 'CodigoIntegridad', 'NombreArchivo',
'TipoError', 'CodigoError', 'MensajeError',
'NumeroUnico', 'Procesado',
]
_reg_progid_ = "COT"
_reg_clsid_ = "{7518B2CF-23E9-4821-BC55-D15966E15620}"
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
def __init__(self):
self.Usuario = self.Password = None
self.TipoError = self.CodigoError = self.MensajeError = ""
self.LastID = self.LastCMP = self.CAE = self.CAEA = self.Vencimiento = ''
self.InstallDir = INSTALL_DIR
self.client = None
self.xml = None
self.limpiar()
def limpiar(self):
self.remitos = []
self.errores = []
self.XmlResponse = ""
self.Excepcion = self.Traceback = ""
self.TipoError = self.CodigoError = self.MensajeError = ""
self.CuitEmpresa = self.NumeroComprobante = ""
self.NombreArchivo = self.CodigoIntegridad = ""
self.NumeroUnico = self.Procesado = ""
def Conectar(self, url=None, proxy="", wrapper=None, cacert=None, trace=False):
if HOMO or not url:
url = URL
self.client = WebClient(location=url, trace=trace, cacert=cacert)
def PresentarRemito(self, filename, testing=""):
self.limpiar()
try:
if not os.path.exists(filename):
self.Excepcion = "Archivo no encontrado: %s" % filename
return False
archivo = open(filename,"rb")
if not testing:
response = self.client(user=self.Usuario, password=self.Password,
file=archivo)
else:
response = open(testing).read()
self.XmlResponse = response
self.xml = SimpleXMLElement(response)
if 'tipoError' in self.xml:
self.TipoError = str(self.xml.tipoError)
self.CodigoError = str(self.xml.codigoError)
self.MensajeError = str(self.xml.mensajeError).decode('latin1').encode("ascii", "replace")
if 'cuitEmpresa' in self.xml:
self.CuitEmpresa = str(self.xml.cuitEmpresa)
self.NumeroComprobante = str(self.xml.numeroComprobante)
self.NombreArchivo = str(self.xml.nombreArchivo)
self.CodigoIntegridad = str(self.xml.codigoIntegridad)
if 'validacionesRemitos' in self.xml:
for remito in self.xml.validacionesRemitos.remito:
d = {
'NumeroUnico': str(remito.numeroUnico),
'Procesado': str(remito.procesado),
'Errores': [],
}
if 'errores' in remito:
for error in remito.errores.error:
d['Errores'].append((
str(error.codigo),
str(error.descripcion).decode('latin1').encode("ascii", "replace")))
self.remitos.append(d)
# establecer valores del primer remito (sin eliminarlo)
self.LeerValidacionRemito(pop=False)
return True
except Exception, e:
ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback)
self.Traceback = ''.join(ex)
try:
self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0]
except:
self.Excepcion = u"<no disponible>"
return False
def LeerValidacionRemito(self, pop=True):
"Leeo el próximo remito"
# por compatibilidad hacia atras, la primera vez no remueve de la lista
# (llamado de PresentarRemito con pop=False)
if self.remitos:
remito = self.remitos[0]
if pop:
del self.remitos[0]
self.NumeroUnico = remito['NumeroUnico']
self.Procesado = remito['Procesado']
self.errores = remito['Errores']
return True
else:
self.NumeroUnico = ""
self.Procesado = ""
self.errores = []
return False
def LeerErrorValidacion(self):
if self.errores:
error = self.errores.pop()
self.TipoError = ""
self.CodigoError = error[0]
self.MensajeError = error[1]
return True
else:
self.TipoError = ""
self.CodigoError = ""
self.MensajeError = ""
return False
def AnalizarXml(self, xml=""):
"Analiza un mensaje XML (por defecto la respuesta)"
try:
if not xml:
xml = self.XmlResponse
self.xml = SimpleXMLElement(xml)
return True
except Exception, e:
self.Excepcion = u"%s" % (e)
return False
def ObtenerTagXml(self, *tags):
"Busca en el Xml analizado y devuelve el tag solicitado"
# convierto el xml a un objeto
try:
if self.xml:
xml = self.xml
# por cada tag, lo busco segun su nombre o posición
for tag in tags:
xml = xml(tag) # atajo a getitem y getattr
# vuelvo a convertir a string el objeto xml encontrado
return str(xml)
except Exception, e:
self.Excepcion = u"%s" % (e)
# busco el directorio de instalación (global para que no cambie si usan otra dll)
if not hasattr(sys, "frozen"):
basepath = __file__
elif sys.frozen=='dll':
import win32api
basepath = win32api.GetModuleFileName(sys.frozendllhandle)
else:
basepath = sys.executable
INSTALL_DIR = os.path.dirname(os.path.abspath(basepath))
if __name__=="__main__":
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(COT)
sys.exit(0)
elif len(sys.argv)<4:
print "Se debe especificar el nombre de archivo, usuario y clave como argumentos!"
sys.exit(1)
cot = COT()
filename = sys.argv[1] # TB_20111111112_000000_20080124_000001.txt
cot.Usuario = sys.argv[2] # 20267565393
cot.Password = sys.argv[3] # 23456
if '--testing' in sys.argv:
test_response = "cot_response_multiple_errores.xml"
#test_response = "cot_response_2_errores.xml"
#test_response = "cot_response_3_sinerrores.xml"
else:
test_response = ""
if not HOMO:
for i, arg in enumerate(sys.argv):
if arg.startswith("--prod"):
URL = URL.replace("http://cot.test.arba.gov.ar",
"https://cot.arba.gov.ar")
print "Usando URL:", URL
break
if arg.startswith("https"):
URL = arg
print "Usando URL:", URL
break
cot.Conectar(URL, trace='--trace' in sys.argv, cacert=CACERT)
cot.PresentarRemito(filename, testing=test_response)
if cot.Excepcion:
print "Excepcion:", cot.Excepcion
print "Traceback:", cot.Traceback
# datos generales:
print "CUIT Empresa:", cot.CuitEmpresa
print "Numero Comprobante:", cot.NumeroComprobante
print "Nombre Archivo:", cot.NombreArchivo
print "Codigo Integridad:", cot.CodigoIntegridad
print "Error General:", cot.TipoError, "|", cot.CodigoError, "|", cot.MensajeError
# recorro los remitos devueltos e imprimo sus datos por cada uno:
while cot.LeerValidacionRemito():
print "Numero Unico:", cot.NumeroUnico
print "Procesado:", cot.Procesado
while cot.LeerErrorValidacion():
print "Error Validacion:", "|", cot.CodigoError, "|", cot.MensajeError
# Ejemplos de uso ObtenerTagXml
if False:
print "cuit", cot.ObtenerTagXml('cuitEmpresa')
print "p0", cot.ObtenerTagXml('validacionesRemitos', 'remito', 0, 'procesado')
print "p1", cot.ObtenerTagXml('validacionesRemitos', 'remito', 1, 'procesado')
|
adlnet-archive/edx-platform
|
refs/heads/master
|
common/djangoapps/edxmako/templatetag_helpers.py
|
250
|
from django.template import loader
from django.template.base import Template, Context
from django.template.loader import get_template, select_template
def django_template_include(file_name, mako_context):
"""
This can be used within a mako template to include a django template
in the way that a django-style {% include %} does. Pass it context
which can be the mako context ('context') or a dictionary.
"""
dictionary = dict(mako_context)
return loader.render_to_string(file_name, dictionary=dictionary)
def render_inclusion(func, file_name, takes_context, django_context, *args, **kwargs):
"""
This allows a mako template to call a template tag function (written
for django templates) that is an "inclusion tag". These functions are
decorated with @register.inclusion_tag.
-func: This is the function that is registered as an inclusion tag.
You must import it directly using a python import statement.
-file_name: This is the filename of the template, passed into the
@register.inclusion_tag statement.
-takes_context: This is a parameter of the @register.inclusion_tag.
-django_context: This is an instance of the django context. If this
is a mako template rendered through the regular django rendering calls,
a copy of the django context is available as 'django_context'.
-*args and **kwargs are the arguments to func.
"""
if takes_context:
args = [django_context] + list(args)
_dict = func(*args, **kwargs)
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
nodelist = t.nodelist
new_context = Context(_dict)
csrf_token = django_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return nodelist.render(new_context)
|
pahaz/prospector
|
refs/heads/master
|
prospector/tools/frosted/__init__.py
|
3
|
from __future__ import absolute_import
from frosted.api import check_path
from prospector.message import Location, Message
from prospector.tools.base import ToolBase
__all__ = (
'FrostedTool',
)
class ProspectorReporter(object):
def __init__(self, ignore=None):
self._messages = []
self.ignore = ignore or ()
# pylint: disable=too-many-arguments
def record_message(
self,
filename=None,
line=None,
character=None,
code=None,
message=None):
if code in self.ignore:
return
location = Location(
path=filename,
module=None,
function=None,
line=line,
character=character,
)
message = Message(
source='frosted',
code=code,
location=location,
message=message,
)
self._messages.append(message)
def unexpected_error(self, filename, msg):
self.record_message(
filename=filename,
code='U999',
message=msg,
)
def flake(self, message):
filename, _, msg = message.message.split(':', 2)
self.record_message(
filename=filename,
line=message.lineno,
character=(message.col + 1),
code=message.type.error_code,
message=msg,
)
def get_messages(self):
return self._messages
class FrostedTool(ToolBase):
def __init__(self, *args, **kwargs):
super(FrostedTool, self).__init__(*args, **kwargs)
self.ignore_codes = ()
def configure(self, prospector_config, _):
self.ignore_codes = prospector_config.get_disabled_messages('frosted')
return None
def run(self, found_files):
reporter = ProspectorReporter(ignore=self.ignore_codes)
for filepath in found_files.iter_module_paths():
# Frosted cannot handle non-utf-8 encoded files at the moment -
# see https://github.com/timothycrosley/frosted/issues/53
# Therefore (since pyflakes overlaps heavily and does not have the same
# problem) we will simply suppress that error. If you do get it working
# correctly, you only end up with a "CannotDecodeFile" error anyway which
# is not useful to the user of prospector, nor is it actually a problem
# of the file but rather of frosted.
try:
check_path(filepath, reporter)
except UnicodeDecodeError:
# pylint:disable=pointless-except
pass
return reporter.get_messages()
|
BassantMorsi/finderApp
|
refs/heads/master
|
lib/python2.7/site-packages/django/conf/locale/et/__init__.py
|
12133432
| |
amitaekbote/dcos
|
refs/heads/master
|
packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/__init__.py
|
12133432
| |
Vagab0nd/SiCKRAGE
|
refs/heads/master
|
sickchill/providers/subtitle/wizdom.py
|
1
|
import bisect
import io
import logging
import os
import zipfile
from babelfish import Language
from guessit import guessit
from requests import Session
from subliminal.cache import region, SHOW_EXPIRATION_TIME
from subliminal.exceptions import ProviderError
from subliminal.matches import guess_matches
from subliminal.providers import Provider
from subliminal.subtitle import fix_line_ending, Subtitle
from subliminal.utils import sanitize
from subliminal.video import Episode, Movie
from sickchill import settings
logger = logging.getLogger(__name__)
class WizdomSubtitle(Subtitle):
"""Wizdom Subtitle."""
provider_name = 'wizdom'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, imdb_id, subtitle_id,
releases):
super().__init__(language, hearing_impaired, page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.imdb_id = imdb_id
self.subtitle_id = subtitle_id
self.downloaded = 0
self.releases = releases
@property
def id(self):
return str(self.subtitle_id)
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
# series
if video.series and sanitize(self.series) == sanitize(video.series):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'episode'}))
# movie
elif isinstance(video, Movie):
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'movie'}))
# title
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
return matches
class WizdomProvider(Provider):
"""Wizdom Provider."""
languages = {Language.fromalpha2(l) for l in ['he']}
server_url = 'wizdom.xyz'
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
def terminate(self):
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_imdb_id(self, title, year, is_movie):
"""Search the IMDB ID for the given `title` and `year`.
:param str title: title to search for.
:param int year: year to search for (or 0 if not relevant).
:param bool is_movie: If True, IMDB ID will be searched for in TMDB instead of Wizdom.
:return: the IMDB ID for the given title and year (or None if not found).
:rtype: str
"""
# make the search
logger.info('Searching IMDB ID for %r%r', title, '' if not year else ' ({})'.format(year))
category = 'movie' if is_movie else 'tv'
title = title.replace('\'', '')
# get TMDB ID first
r = self.session.get('http://api.tmdb.org/3/search/{}?api_key={}&query={}{}&language=en'.format(
category, settings.TMDB_API_KEY, title, '' if not year else '&year={}'.format(year)))
r.raise_for_status()
tmdb_results = r.json().get('results')
if tmdb_results:
tmdb_id = tmdb_results[0].get('id')
if tmdb_id:
# get actual IMDB ID from TMDB
r = self.session.get('http://api.tmdb.org/3/{}/{}{}?api_key={}&language=en'.format(
category, tmdb_id, '' if is_movie else '/external_ids', settings.TMDB_API_KEY))
r.raise_for_status()
return str(r.json().get('imdb_id', '')) or None
return None
def query(self, title, season=None, episode=None, year=None, filename=None, imdb_id=None):
# search for the IMDB ID if needed.
is_movie = not (season and episode)
imdb_id = imdb_id or self._search_imdb_id(title, year, is_movie)
if not imdb_id:
return {}
# search
logger.debug('Using IMDB ID %r', imdb_id)
url = 'http://json.{}/{}.json'.format(self.server_url, imdb_id)
page_link = 'http://{}/#/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id)
# get the list of subtitles
logger.debug('Getting the list of subtitles')
r = self.session.get(url)
r.raise_for_status()
try:
results = r.json()
except ValueError:
return {}
# filter irrelevant results
if not is_movie:
results = results.get('subs', {}).get(str(season), {}).get(str(episode), [])
else:
results = results.get('subs', [])
# loop over results
subtitles = {}
for result in results:
language = Language.fromalpha2('he')
hearing_impaired = False
subtitle_id = result['id']
release = result['version']
# add the release and increment downloaded count if we already have the subtitle
if subtitle_id in subtitles:
logger.debug('Found additional release %r for subtitle %d', release, subtitle_id)
bisect.insort_left(subtitles[subtitle_id].releases, release) # deterministic order
subtitles[subtitle_id].downloaded += 1
continue
# otherwise create it
subtitle = WizdomSubtitle(language, hearing_impaired, page_link, title, season, episode, title, imdb_id,
subtitle_id, [release])
logger.debug('Found subtitle %r', subtitle)
subtitles[subtitle_id] = subtitle
return list(subtitles.values())
def list_subtitles(self, video: Episode, languages):
season = episode = None
title = video.title
year = video.year
filename = video.name
imdb_id = video.imdb_id
if isinstance(video, Episode):
title = video.series
season = video.season
episode = video.episode
imdb_id = video.series_imdb_id
return [s for s in self.query(title, season, episode, year, filename, imdb_id) if s.language in languages]
def download_subtitle(self, subtitle: WizdomSubtitle):
# download
url = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle.subtitle_id)
r = self.session.get(url, headers={'Referer': subtitle.page_link}, timeout=10)
r.raise_for_status()
# open the zip
with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
# remove some filenames from the namelist
namelist = [n for n in zf.namelist() if os.path.splitext(n)[1] in ['.srt', '.sub']]
if len(namelist) > 1:
raise ProviderError('More than one file to unzip')
subtitle.content = fix_line_ending(zf.read(namelist[0]))
|
s-store/sstore-soft
|
refs/heads/master
|
src/dtxn/net/demomessages.py
|
9
|
#!/usr/bin/python
import stupidcompiler
demo = stupidcompiler.MessageDefinition("Demo", "A simple test message.")
demo.addField(stupidcompiler.INT32, "integer", -42, "An integer.")
demo.addField(stupidcompiler.BOOL, "boolean", False, "A boolean.")
demo.addField(stupidcompiler.STRING, "str", None, "A string.")
messages = [demo]
if __name__ == "__main__":
stupidcompiler.main(messages, "net")
|
ewiger/tree_output
|
refs/heads/master
|
tree_output/houtput.py
|
1
|
# -*- coding: utf-8 -*-
'''
tree_output
===========
Python library to simplify building of tree output with command-line
interfaces.
Copyright (c) 2014 Yauhen Yakimovich
Licensed under the MIT License (MIT). Read a copy of LICENSE distributed with
this code.
'''
import sys
import json
import colorama
from colorama import Fore, Back, Style
from tree_output import version as tree_output_version
__version__ = tree_output_version
FOREMAP = {
'black': Fore.BLACK,
'red': Fore.RED,
'green': Fore.GREEN,
'yellow': Fore.YELLOW,
'blue': Fore.BLUE,
'magenta': Fore.MAGENTA,
'cyan': Fore.CYAN,
'white': Fore.WHITE,
'reset': Fore.RESET,
}
BACKMAP = {
'black': Back.BLACK,
'red': Back.RED,
'green': Back.GREEN,
'yellow': Back.YELLOW,
'blue': Back.BLUE,
'magenta': Back.MAGENTA,
'cyan': Back.CYAN,
'white': Back.WHITE,
'reset': Back.RESET,
}
STYLEMAP = {
'dim': Style.DIM,
'normal': Style.NORMAL,
'bright': Style.BRIGHT,
'reset_all': Style.RESET_ALL,
}
class HierarchicalOutput(object):
def __init__(self):
self.level = 0
@staticmethod
def factory(format='json'):
format = format.lower()
if format == 'json':
return JsonOutput()
elif format == 'ansi':
return AnsiOutput()
elif format == 'ascii':
return AsciiOutput()
elif format is None:
return NullOutput()
else:
raise Exception('Unknown format')
def emit(self, record, closed=False):
'''
Implement in format-specific adapter the aggregation of the output
record.
'''
if closed:
self.remove_level()
def add_level(self):
self.level += 1
def remove_level(self):
if self.level > 0:
self.level -= 1
class NullOutput(HierarchicalOutput):
def add_level(self):
'''Do nothing'''
def remove_level(self):
'''Do nothing'''
def emit(self, record, closed=False):
'''Do nothing'''
HierarchicalOutput.emit(self, record, closed)
class JsonOutput(HierarchicalOutput):
def __init__(self):
super(JsonOutput, self).__init__()
self.data = list()
self.root = self.data
self.parents = list()
self.parents.append(self.data)
def add_level(self):
super(JsonOutput, self).add_level()
self.parents.append(self.data)
sub_data = list()
self.data.append(sub_data)
self.data = sub_data
def remove_level(self):
super(JsonOutput, self).remove_level()
if len(self.parents) > 0:
self.data = self.parents.pop()
def emit(self, record, closed=False):
self.data.append(record)
HierarchicalOutput.emit(self, record, closed)
def __str__(self):
'''Represent as JSON'''
return json.dumps(self.root)
class AsciiOutput(HierarchicalOutput):
def __init__(self):
super(AsciiOutput, self).__init__()
self.branch = '+-- '
self.hanging_branch = '`-- '
self.pipe_branch = '| '
def emit(self, record, closed=False):
branch = self.branch
# Accept values from the record.
name = None
colors = dict()
if type(record) == dict:
name = record.get('name')
value = record.get('value')
if 'forecolor' in record:
colors['fore'] = record['forecolor']
if 'backcolor' in record:
colors['back'] = record['backcolor']
if 'style' in record:
colors['style'] = record['style']
else:
value = str(record)
# Do name output.
if name is not None:
self.output_named(name, value, branch=branch, colors=colors)
return
# Do value output.
if type(value) == list:
for index in enumerate(value):
if closed and index == len(value):
self.output(value[index], branch=self.hanging_branch,
colors=colors)
elif index == 0:
self.output(value[index], branch=branch, colors=colors)
branch = self.pipe_branch
else:
self.output(value[index], branch=branch, colors=colors)
else:
if closed:
self.output(value, branch=self.hanging_branch, colors=colors)
else:
self.output(value, branch=branch, colors=colors)
HierarchicalOutput.emit(self, record, closed)
def output_indent(self):
prefix = ''
if self.level > 0:
prefix = (self.pipe_branch + ' ') * (self.level)
if prefix:
sys.stdout.write(prefix)
def output_named(self, name, value, branch, colors):
self.output_indent()
self.color_write('[ %s ]: %s' % (name, value), branch=branch,
colors=colors)
def output(self, line, branch, colors):
if not line:
# Ignore color handling.
return
# Do padding on the left side according to level.
prefix = ''
if self.level > 0:
prefix = self.pipe_branch * (self.level)
sys.stdout.write(prefix)
# Write level char: branch, hanging branch or a pipe.
sys.stdout.write(branch)
# Write value
sys.stdout.write('%s\n' % line)
# Flush
sys.stdout.flush()
class AnsiOutput(AsciiOutput):
def __init__(self):
super(AsciiOutput, self).__init__()
self.branch = u'├──'
self.hanging_branch = u'└──'
self.pipe_branch = u'│ '
self.bracket_colors = {
'fore': 'red',
'style': 'bright',
}
self.branch_colors = {
'fore': 'cyan',
'style': 'bright',
}
colorama.init(autoreset=True)
def bake_colors(self, colors):
result = ''
if 'fore' in colors:
result += FOREMAP[colors['fore']]
if 'back' in colors:
result += BACKMAP[colors['back']]
if 'style' in colors:
result += STYLEMAP[colors['style']]
return result
def output_colors_reset(self):
sys.stdout.write(Fore.RESET + Back.RESET + Style.RESET_ALL)
def output_indent(self):
if self.level == 0:
return
sys.stdout.write(self.bake_colors(self.branch_colors))
super(AnsiOutput, self).output_indent()
self.output_colors_reset()
def output_named(self, name, value, branch, colors):
self.output_indent()
sys.stdout.write(self.bake_colors(self.branch_colors) + self.branch
+ u'─┐')
self.output_colors_reset()
line = self.bake_colors(self.bracket_colors) + '[ ' \
+ Fore.RESET + Back.RESET + Style.RESET_ALL \
+ self.bake_colors(colors) + name \
+ Fore.RESET + Back.RESET + Style.RESET_ALL \
+ self.bake_colors(self.bracket_colors) + ' ] ' \
+ Fore.RESET + Back.RESET + Style.RESET_ALL
if value:
line += u' ➜ ' + value
sys.stdout.write(' ' + line + '\n')
def output(self, line, branch, colors):
if not line:
# Just handle colors.
sys.stdout.write(self.bake_colors(colors))
return
# Do padding on the left side according to level.
self.output_indent()
# Write level char: branch, hanging branch or a pipe.
sys.stdout.write(self.bake_colors(self.branch_colors) + branch + ' ')
self.output_colors_reset()
# Write value
sys.stdout.write(self.bake_colors(colors) + '%s\n' % line)
self.output_colors_reset()
# Flush
sys.stdout.flush()
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/stacks/ADH/1.6/services/HIVE/package/alerts/alert_webhcat_server.py
|
7
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import socket
import time
import urllib2
import traceback
import logging
from resource_management.core.environment import Environment
from resource_management.core.resources import Execute
from resource_management.core import shell
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from os import getpid, sep
RESULT_CODE_OK = "OK"
RESULT_CODE_CRITICAL = "CRITICAL"
RESULT_CODE_UNKNOWN = "UNKNOWN"
OK_MESSAGE = "WebHCat status was OK ({0:.3f}s response from {1})"
CRITICAL_CONNECTION_MESSAGE = "Connection failed to {0} + \n{1}"
CRITICAL_HTTP_MESSAGE = "HTTP {0} response from {1} \n{2}"
CRITICAL_WEBHCAT_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE = "Unable to determine WebHCat health from unexpected JSON response"
TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
WEBHCAT_PRINCIPAL_KEY = '{{webhcat-site/templeton.kerberos.principal}}'
WEBHCAT_KEYTAB_KEY = '{{webhcat-site/templeton.kerberos.keytab}}'
SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
WEBHCAT_OK_RESPONSE = 'ok'
WEBHCAT_PORT_DEFAULT = 50111
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
CURL_CONNECTION_TIMEOUT_DEFAULT = str(int(CONNECTION_TIMEOUT_DEFAULT))
# default keytab location
SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
# default smoke principal
SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
# default smoke user
SMOKEUSER_DEFAULT = 'ambari-qa'
logger = logging.getLogger('ambari_alerts')
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (TEMPLETON_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SMOKEUSER_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
result_code = RESULT_CODE_UNKNOWN
if configurations is None:
return (result_code, ['There were no configurations supplied to the script.'])
webhcat_port = WEBHCAT_PORT_DEFAULT
if TEMPLETON_PORT_KEY in configurations:
webhcat_port = int(configurations[TEMPLETON_PORT_KEY])
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'
# parse script arguments
connection_timeout = CONNECTION_TIMEOUT_DEFAULT
curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
if CONNECTION_TIMEOUT_KEY in parameters:
connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
curl_connection_timeout = str(int(connection_timeout))
# the alert will always run on the webhcat host
if host_name is None:
host_name = socket.getfqdn()
smokeuser = SMOKEUSER_DEFAULT
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
# webhcat always uses http, never SSL
query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)
# initialize
total_time = 0
json_response = {}
if security_enabled:
try:
# defaults
smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
# check script params
if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
# check configurations last as they should always take precedence
if SMOKEUSER_PRINCIPAL_KEY in configurations:
smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
if SMOKEUSER_KEYTAB_KEY in configurations:
smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
kerberos_executable_search_paths = None
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
env = Environment.get_instance()
stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
"WebHCat Server Status", smokeuser,
connection_timeout=curl_connection_timeout)
# check the response code
response_code = int(stdout)
# 0 indicates no connection
if response_code == 0:
label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
# any other response aside from 200 is a problem
if response_code != 200:
label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
# now that we have the http status and it was 200, get the content
stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
False, "WebHCat Server Status", smokeuser,
connection_timeout=curl_connection_timeout)
json_response = json.loads(stdout)
except:
return (RESULT_CODE_CRITICAL, [traceback.format_exc()])
else:
url_response = None
try:
# execute the query for the JSON that includes WebHCat status
start_time = time.time()
url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
total_time = time.time() - start_time
json_response = json.loads(url_response.read())
except urllib2.HTTPError as httpError:
label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
except:
label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
return (RESULT_CODE_CRITICAL, [label])
finally:
if url_response is not None:
try:
url_response.close()
except:
pass
# if status is not in the response, we can't do any check; return CRIT
if 'status' not in json_response:
return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + str(json_response)])
# URL response received, parse it
try:
webhcat_status = json_response['status']
except:
return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + "\n" + traceback.format_exc()])
# proper JSON received, compare against known value
if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
result_code = RESULT_CODE_OK
label = OK_MESSAGE.format(total_time, query_url)
else:
result_code = RESULT_CODE_CRITICAL
label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)
return (result_code, [label])
|
MostlyOpen/odoo_addons
|
refs/heads/master
|
myo_mfile/models/mfile.py
|
1
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import *
from openerp import api, fields, models
class MediaFile(models.Model):
_name = 'myo.mfile'
name = fields.Char('Name', required=True, translate=False)
alias = fields.Char('Alias', help='Common name that the file is referred')
code = fields.Char(string='Code', required=False)
path = fields.Char(string='Path', compute='_compute_path_str', store=False, readonly=True)
description_old = fields.Text(string='Description', translate=False)
description = fields.Html(string='Description', translate=False)
notes_old = fields.Text(string='Notes')
notes = fields.Html(string='Notes', translate=False)
date_inclusion = fields.Datetime('Inclusion Date',
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the media file without removing it.",
default=1)
url = fields.Char('URL', help="URL of the File")
parent_id = fields.Many2one('myo.mfile', 'Parent File')
child_ids = fields.One2many('myo.mfile', 'parent_id', 'Child Files')
_order = 'name'
_sql_constraints = [('code_uniq', 'unique(code)', u'Error! The Code must be unique!')]
@api.one
def _compute_path_str(self):
if self.code:
if self.alias:
self.path = self.alias
else:
self.path = self.code
|
partofthething/home-assistant
|
refs/heads/dev
|
tests/components/axis/test_binary_sensor.py
|
5
|
"""Axis binary sensor platform tests."""
from homeassistant.components.axis.const import DOMAIN as AXIS_DOMAIN
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .test_device import NAME, setup_axis_integration
EVENTS = [
{
"operation": "Initialized",
"topic": "tns1:Device/tnsaxis:Sensor/PIR",
"source": "sensor",
"source_idx": "0",
"type": "state",
"value": "0",
},
{
"operation": "Initialized",
"topic": "tns1:PTZController/tnsaxis:PTZPresets/Channel_1",
"source": "PresetToken",
"source_idx": "0",
"type": "on_preset",
"value": "1",
},
{
"operation": "Initialized",
"topic": "tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
"type": "active",
"value": "1",
},
]
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert (
await async_setup_component(
hass,
BINARY_SENSOR_DOMAIN,
{BINARY_SENSOR_DOMAIN: {"platform": AXIS_DOMAIN}},
)
is True
)
assert AXIS_DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in Axis results in no sensor entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)
async def test_binary_sensors(hass):
"""Test that sensors are loaded properly."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
device.api.event.update(EVENTS)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 2
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_OFF
assert pir.name == f"{NAME} PIR 0"
assert pir.attributes["device_class"] == DEVICE_CLASS_MOTION
vmd4 = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_vmd4_profile_1")
assert vmd4.state == STATE_ON
assert vmd4.name == f"{NAME} VMD4 Profile 1"
assert vmd4.attributes["device_class"] == DEVICE_CLASS_MOTION
|
krischer/jane
|
refs/heads/master
|
src/jane/documents/management/commands/reindex_all_documents.py
|
1
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from jane.documents import models, signals
class Command(BaseCommand):
help = "Reindex all documents in Jane's document database."
def add_arguments(self, parser):
parser.add_argument(
'document_type', type=str,
choices=[_i.name for _i in models.DocumentType.objects.all()],
help='The document type of the files to upload.')
def handle(self, *args, **kwargs):
# Cannot easily fail as the model type settings are enforced by
# argparse.
document_type = models.DocumentType.objects.get(
name=kwargs["document_type"])
for doc in models.Document.objects.filter(document_type=document_type):
signals.index_document(sender=None, instance=doc, created=None)
print('.', end='')
|
sanjeevtripurari/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/auth/management/commands/createsuperuser.py
|
130
|
"""
Management utility to create superusers.
"""
from __future__ import unicode_literals
import getpass
import sys
from optparse import make_option
from django.contrib.auth import get_user_model
from django.contrib.auth.management import get_default_username
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
from django.utils.six.moves import input
from django.utils.text import capfirst
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
# Options are defined in an __init__ method to support swapping out
# custom user models in tests.
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD)
self.option_list = BaseCommand.option_list + (
make_option('--%s' % self.UserModel.USERNAME_FIELD, dest=self.UserModel.USERNAME_FIELD, default=None,
help='Specifies the login for the superuser.'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help=('Tells Django to NOT prompt the user for input of any kind. '
'You must use --%s with --noinput, along with an option for '
'any other required field. Superusers created with --noinput will '
' not be able to log in until they\'re given a valid password.' %
self.UserModel.USERNAME_FIELD)),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
) + tuple(
make_option('--%s' % field, dest=field, default=None,
help='Specifies the %s for the superuser.' % field)
for field in self.UserModel.REQUIRED_FIELDS
)
option_list = BaseCommand.option_list
help = 'Used to create a superuser.'
def handle(self, *args, **options):
username = options.get(self.UserModel.USERNAME_FIELD, None)
interactive = options.get('interactive')
verbosity = int(options.get('verbosity', 1))
database = options.get('database')
# If not provided, create the user with an unusable password
password = None
user_data = {}
# Do quick and dirty validation if --noinput
if not interactive:
try:
if not username:
raise CommandError("You must use --%s with --noinput." %
self.UserModel.USERNAME_FIELD)
username = self.username_field.clean(username, None)
for field_name in self.UserModel.REQUIRED_FIELDS:
if options.get(field_name):
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = field.clean(options[field_name], None)
else:
raise CommandError("You must use --%s with --noinput." % field_name)
except exceptions.ValidationError as e:
raise CommandError('; '.join(e.messages))
else:
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to trap for a
# keyboard interrupt and exit gracefully.
default_username = get_default_username()
try:
# Get a username
verbose_field_name = self.username_field.verbose_name
while username is None:
if not username:
input_msg = capfirst(verbose_field_name)
if default_username:
input_msg = "%s (leave blank to use '%s')" % (
input_msg, default_username)
raw_value = input(force_str('%s: ' % input_msg))
if default_username and raw_value == '':
raw_value = default_username
try:
username = self.username_field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
username = None
continue
try:
self.UserModel._default_manager.db_manager(database).get_by_natural_key(username)
except self.UserModel.DoesNotExist:
pass
else:
self.stderr.write("Error: That %s is already taken." %
verbose_field_name)
username = None
for field_name in self.UserModel.REQUIRED_FIELDS:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = options.get(field_name)
while user_data[field_name] is None:
raw_value = input(force_str('%s: ' % capfirst(field.verbose_name)))
try:
user_data[field_name] = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
user_data[field_name] = None
# Get a password
while password is None:
if not password:
password = getpass.getpass()
password2 = getpass.getpass(force_str('Password (again): '))
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
continue
if password.strip() == '':
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
continue
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
user_data[self.UserModel.USERNAME_FIELD] = username
user_data['password'] = password
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
if verbosity >= 1:
self.stdout.write("Superuser created successfully.")
|
minggli/chatbot
|
refs/heads/dev
|
tests/test_vector.py
|
1
|
from chatbot import settings as s
from chatbot.ie import extracted_urls, TextMiner
from chatbot.nlp.embedding import WordEmbedding
WEB_METAKEY, BASE_URL = s.WEB_METAKEY, s.BASE_URL
urls = extracted_urls(base_url=BASE_URL)
web_scraper = TextMiner(urls=urls, attrs=WEB_METAKEY, display=True)
raw_data = web_scraper.extract().jsonify()
corpus = [json['doc'] for json in raw_data]
v = WordEmbedding()
v.fit(corpus)
vectors = v.vectorize()
unrepresented_words = list()
for k, w in enumerate(corpus):
if vectors[k].all() == 1e-8:
unrepresented_words.append(w)
unrepresented_words.sort(key=lambda x: len(x), reverse=True)
for w in unrepresented_words:
print(w)
if len(w) < 5:
break
print('{0:.4f}%'.format(len(unrepresented_words) / len(corpus) * 100))
|
NavyaJayaram/MyRepository
|
refs/heads/master
|
YouTubeUsingAJS/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py
|
2929
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/tools/chrome_proxy/live_tests/chrome_proxy_benchmark.py
|
8
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from live_tests import chrome_proxy_measurements as measurements
from live_tests import pagesets
from telemetry import benchmark
class ChromeProxyLatency(benchmark.Benchmark):
tag = 'latency'
test = measurements.ChromeProxyLatency
page_set = pagesets.Top20StorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency.top_20'
class ChromeProxyLatencyDirect(benchmark.Benchmark):
tag = 'latency_direct'
test = measurements.ChromeProxyLatencyDirect
page_set = pagesets.Top20StorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency_direct.top_20'
class ChromeProxyDataSaving(benchmark.Benchmark):
tag = 'data_saving'
test = measurements.ChromeProxyDataSaving
page_set = pagesets.Top20StorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving.top_20'
class ChromeProxyDataSavingDirect(benchmark.Benchmark):
tag = 'data_saving_direct'
test = measurements.ChromeProxyDataSavingDirect
page_set = pagesets.Top20StorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving_direct.top_20'
|
patcorwin/fossil
|
refs/heads/master
|
pdil/tool/fossil/card.py
|
1
|
'''
'''
import os
import math
from maya.api import OpenMaya
from pymel.core import PyNode, xform, rotate, nurbsPlane, polyCylinder, scale, delete, makeIdentity, joint, hide, pointConstraint, group, parentConstraint, aimConstraint, warning, dt, confirmDialog, duplicate, ls, importFile, mel, spaceLocator, distanceDimension, select
from pdil.add import simpleName, meters
from pdil import core
from .core import proxyskel
from .core import config
from . import moveCard
from . import util
try:
basestring
except NameError: # python 3 compatibility
basestring = str
def addOutputControlsAttrs(obj):
_addOutputControls(obj, "Center")
_addOutputControls(obj, "Left")
_addOutputControls(obj, "Right")
def _addOutputControls(obj, side):
'''
Adds attributes to card for tracking the created controls. Used in conjunction
with OutputControls.
:param PyNode obj: The card to add attributes to
:param str side: Either "Left", "Right" or "Center"
'''
if obj.hasAttr('output' + side):
return
mobj = core.capi.asMObject(obj)
cattr = OpenMaya.MFnCompoundAttribute()
mattr = OpenMaya.MFnMessageAttribute()
nattr = OpenMaya.MFnNumericAttribute()
extraNodes = cattr.create('output' + side, 'out' + side[0])
cattr.array = True
link = mattr.create( 'outputLink' + side, 'ol' + side[0] )
type = nattr.create('out' + side + 'Type', 'o' + side[0] + 't', OpenMaya.MFnNumericData.kInt, 0)
cattr.addChild(link)
cattr.addChild(type)
mobj.addAttribute(extraNodes)
def addJointArrayAttr(obj):
'''
.. todo:: Eventually this should be abstracted.
'''
mobj = core.capi.asMObject( obj )
cAttr = OpenMaya.MFnCompoundAttribute()
mList = cAttr.create( 'joints', 'jnts' )
cAttr.array = True
mAttr = OpenMaya.MFnMessageAttribute()
jMsg = mAttr.create( 'jmsg', 'jmsg' )
cAttr.addChild( jMsg )
mobj.addAttribute( mList )
def placeJoints(card, positions):
'''
Takes a list of x,y and positions the joints. The center is (0,0) and
extends to 1,1 and -1,-1
'''
width, height = card.size
wMod = width / 2.0
hMod = height / 2.0
for pos, jnt in zip( positions, card.joints ):
jnt.tz.set( -wMod * pos[0] )
jnt.ty.set( hMod * pos[1] )
if hasattr(card, 'center') and not card.center:
pass
else:
card.pivToStart()
def makeArrow():
'''
Creates an arrow, with the base vert of 60 and tip of 60.
'''
arrow = polyCylinder( r=1, h=10, sx=20, sy=2, sz=1, ax=[0, 1, 0] )[0]
scale( arrow.vtx[40:59], [1.75, 0, 1.75] )
arrow.ty.set( 5 )
makeIdentity(arrow, apply=True, t=True, r=True, s=True, n=False)
xform(arrow, piv=(0, 0, 0), ws=True)
delete( arrow, ch=True )
jnt = joint(None)
jnt.drawStyle.set( 2 )
arrow.getShape().setParent( jnt, add=True, shape=True )
delete( arrow )
return jnt
def pivTo(card, x, y):
'''
Move the pivot to the point in the same, x,y coords
'''
width, height = card.size
wMod = width / 2.0
hMod = height / 2.0
piv = xform(card, q=True, ws=True, t=True)
xform( card, ws=True, piv=[0, piv[1] + hMod * y, piv[2] - wMod * x ] )
def pivToStart(card):
'''
Move the pivot to the start joint.
'''
piv = xform(card.start(), q=True, ws=True, piv=True)
xform( card, ws=True, piv=piv[:3])
def makeCard(jointCount=5, jointNames={'repeat': 'DEFAULT'}, rigInfo=None, size=(4, 6), suffix=''):
'''
.. todo:: Do not use defaults.
&&& names is really joints names, make it so.
'''
if isinstance(jointNames, basestring):
head, repeat, tail = util.parse(jointNames)
jointNames = {'head': head, 'repeat': repeat, 'tail': tail}
elif isinstance(jointNames, list):
jointNames = {'head': jointNames}
leadName = jointNames.get('head')[0] if jointNames.get('head') else jointNames.get('repeat', 'DEFAULT')
joints = []
width, height = size
# Base the card name off the lead joint
cardName = leadName
if not isinstance( cardName, basestring ):
cardName = cardName[0]
#jointNames = ' '.join(jointNames)
if not cardName.endswith('_card'):
if cardName.endswith('_'):
cardName += 'card'
else:
cardName += '_card'
# Make the actual card and tag with attrs
card = nurbsPlane(w=width, lr=height / float(width), ax=(1, 0, 0), n=cardName, d=1, u=1, v=1 )[0]
card.addAttr( 'fossilRigData', dt='string' )
card.addAttr( 'fossilRigState', dt='string' )
addOutputControlsAttrs(card)
addJointArrayAttr(card)
#card.addAttr( 'skeletonInfo', at='bool' )
#card.addAttr( 'buildOrder', at='long' )
#card.addAttr( 'nameInfo', dt='string' )
#card.addAttr( 'suffix', dt='string' )
# Reassign it so it gets the proper interface now that it has the attrs
card = PyNode(card)
rigData = {
'buildOrder': 10,
'mirrorCode': suffix,
'nameInfo': jointNames,
}
card.rigData = rigData
#card.buildOrder.set( 10 )
#card.suffix.set( suffix )
#card.nameInfo.set( jointNames ) # &&& I hate how I handle the names, want to put in rigInfo (I think that's the json attr...)
#card.rigParams = ''
#card.rigOptions = ''
arrow = makeArrow()
arrow.setParent( card )
arrow.rename('arrow')
card.scale >> arrow.inverseScale
arrow.t.set(0, 0, 0)
arrow.r.set(0, 0, -90)
hide(arrow)
card.setParent( proxyskel.masterGroup() )
# Place all the joints
delta = height / float(jointCount - 1) if jointCount > 1 else 0
for i in range(jointCount):
newJoint = card.addJoint()
joints.append(newJoint)
newJoint.ty.set( height / 2.0 - delta * i )
if len(joints) > 1:
for parentBpj, childBpj in zip( joints[0:-1], joints[1:] ):
proxyskel.pointer( parentBpj, childBpj )
elif joints:
proxyskel.makeProxy(joints[0], proxyskel.getProxyGroup())
joints[0].ty.set(0)
if joints:
card.setTempNames()
pivToStart(card)
return card
def nextLetter(c):
# Naively increment the letter, ex B -> C, or M -> N.
if not c:
return 'A'
return chr(ord(c) + 1)
def findUniqueNameInfo(nameScheme, alteration, cards=None):
scheme = (nameScheme[0], nameScheme[1] + alteration, nameScheme[2])
for c in cards:
if util.parse(c.nameInfo.get()) == scheme:
return findUniqueNameInfo(nameScheme, nextLetter(alteration), cards=cards)
else:
return scheme
class Orientation:
VERTICAL = 'vertical'
HORIZONTAL = 'horizontal'
def splitCard(tempJoint):
'''
Everything after and including the given joint will become a new card.
'''
oldCard = tempJoint.cardCon.node()
if oldCard.start() == tempJoint:
warning( 'Cannot split at the first joint' )
return
card = makeCard(jointCount=0, size=(1, 1))
newCvs = list(card.cv)
newCvs = [newCvs[0], newCvs[2], newCvs[1], newCvs[3]]
points = [ dt.Vector(xform(v, q=True, ws=True, t=True)) for v in oldCard.cv ]
points = [points[0], points[2], points[1], points[3]] # vtx and points must be rearranged in the same way
vtx = list(oldCard.cv)
vtx = [vtx[0], vtx[2], vtx[1], vtx[3]]
midA = (points[0] - points[2]) / 2.0 + points[2]
midB = (points[1] - points[3]) / 2.0 + points[3]
xform( vtx[0], ws=True, t=midA )
xform( vtx[1], ws=True, t=midB )
card.setParent( oldCard.getParent() )
card.t.set( oldCard.t.get() )
card.r.set( oldCard.r.get() )
card.s.set( oldCard.s.get() )
xform( newCvs[0], ws=True, t=points[0] )
xform( newCvs[1], ws=True, t=points[1] )
xform( newCvs[2], ws=True, t=midA )
xform( newCvs[3], ws=True, t=midB )
start, repeat, end = util.parse( oldCard.nameInfo.get())
index = oldCard.joints.index(tempJoint)
if index == len(start):
# New card is repeat + end
oldCard.nameInfo.set( ' '.join(start) )
card.nameInfo.set( repeat + '* ' + ' '.join(end) )
elif index == len(oldCard.joints) - len(end):
oldCard.nameInfo.set( ' '.join(start) + ' ' + repeat + '*' )
card.nameInfo.set( ' '.join(end) )
else:
# Terrible split!
oldCard.nameInfo.set( ' '.join(start) + ' ' + repeat + '*' )
card.nameInfo.set( repeat + 'X* ' + ' '.join(end) )
confirmDialog(m="You are splitting in the repeating Zone, you'll want to fix up names\nAn 'X' has been added to the new cards repeating section")
card.rename( card.nameInfo.get() )
oldCard.rename( oldCard.nameInfo.get() )
# Move the appropriate joints to the new card.
for j in oldCard.joints[index: ]:
prevConnection = j.message.listConnections(type=card.__class__, p=1)
j.message.disconnect( prevConnection[0] )
# Not sure why position is lost but I'm not sure it really matters
pos = xform(j, q=True, ws=True, t=True)
card.addJoint(j)
xform(j, ws=True, t=pos)
# Update .parentCard
movedJoints = set( card.joints )
for childCard in card.childrenCards:
for j in childCard.joints:
if j.parent in movedJoints:
childCard.parentCardLink = card
continue
# There might be a way to deal with moving controls, but the fact a split happend indicates they will be rebuilt.
def mirrorCard(card):
dup = duplicateCard(card)
mult = core.math.multiply( [card.tx, card.ry, card.rz], [-1, -1, -1])
mult >> dup.tx
card.rx >> dup.rx
mult.node().outputY >> dup.ry
mult.node().outputZ >> dup.rz
card.ty >> dup.ty
card.tz >> dup.tz
def duplicateCard(card):
d = duplicate( card )[0]
proxyskel.relink( card, d )
if card.parentCard:
d.parentCardLink = card.parentCard
return d
def getArrows():
return ls( 'arrow' )
def getConnectors():
'''
#-# I *think* the idea is to toggle the proxy connector display but I'm not certain
I also don't think this is useful. Maybe it was when I didn't have the connectors autohide with the card.
'''
#cards = ls( '*.skeletonInfo', o=1 )
for card in core.findNode.allCards():
for j in card.joints:
if not j.parent:
return j.proxy
def customUp(jnt, arrow=None):
if not arrow:
arrow = makeArrow()
arrow.setParent( jnt.getParent() )
arrow.rename( 'custom_arrow' )
core.dagObj.moveTo( arrow, jnt )
PyNode(jnt).customUp = arrow
return arrow
def customOrient(bpJoint):
newNodes = importFile( os.path.dirname(__file__) + '/Axis.ma', rnn=True, renameAll=True )
transform = ls(newNodes, type='transform')[0]
masterGroup = proxyskel.masterGroup()
for child in masterGroup.listRelatives():
if child.name() == 'customOrients':
customGroup = child
break
else:
customGroup = group(n='customOrients', p=masterGroup, em=True)
transform.setParent(customGroup)
transform.scale.set(3, 3, 3)
transform.t.setKeyable(False)
bpJoint.customOrient = transform
pointConstraint(bpJoint, transform)
transform.t.lock()
transform.rename( simpleName(bpJoint, 'orient_{0}') )
def cardIk(card):
#ctrl = mel.eval( 'curve -d 1 -p -0.5 1 -0.866026 -p -0.5 1 0.866025 -p 1 1 0 -p -0.5 1 -0.866026 -p 0 0 0 -p -0.5 -1 -0.866026 -p -0.5 -1 0.866025 -p 0 0 0 -p -0.5 1 0.866025 -p 1 1 0 -p 0 0 0 -p 1 -1 0 -p -0.5 -1 -0.866026 -p -0.5 -1 0.866025 -p 1 -1 0 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 -k 13 -k 14 ;' )
ctrl = PyNode(mel.eval('curve -d 1 -p 0 4 0 -p -2.828427 2.828427 -2.47269e-007 -p -4 0 -3.49691e-007 -p -2.828427 -2.828427 -2.47269e-007 -p 0 -4 0 -p 2.828427 -2.828427 0 -p 4 0 0 -p 2.828427 2.828427 0 -p 0 4 0 -p -1.23634e-007 2.828427 2.828427 -p -1.74846e-007 0 4 -p -1.23634e-007 -2.828427 2.828427 -p 0 -4 0 -p 3.70903e-007 -2.828427 -2.828427 -p 5.24537e-007 0 -4 -p 3.70903e-007 2.828427 -2.828427 -p 0 4 0 -p 0 0 0 -p 0 -4 0 -p 0 0 0 -p -4 0 0 -p 4 0 0 -p 0 0 -4 -p 0 0 4 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 -k 13 -k 14 -k 15 -k 16 -k 17 -k 18 -k 19 -k 20 -k 21 -k 22 -k 23 ;'))
ctrl.rename( card.name() + "_target" )
upCtrl = duplicate(ctrl)[0]
upCtrl.rename( card.name() + "_up" )
aim = spaceLocator()
aim.setParent(ctrl)
aim.t.set(0, 0, 0)
hide(aim)
up = spaceLocator()
up.setParent( upCtrl )
hide(up)
base = spaceLocator()
base.rename( 'cardIkBase' )
hide(base)
pointConstraint( card, base )
core.dagObj.moveTo( ctrl, card.joints[-1] )
#core.dagObj.moveTo( upCtrl, card.vtx[1] )
core.dagObj.moveTo( upCtrl, card.cv[1][0] )
aimConstraint( aim, card, wut='object', wuo=up, aim=[0, -1, 0], u=[0, 0, -1])
dist = distanceDimension( base, aim )
dist.getParent().setParent(ctrl)
hide(dist)
core.math.divide( dist.distance, dist.distance.get() / card.sy.get() ) >> card.sy
follower = spaceLocator()
follower.rename( 'cardIkFollower' )
follower.setParent( card )
follower.t.set(0, 0, 0)
hide(follower)
pointConstraint( up, follower, skip=['x', 'z'] )
sideDist = distanceDimension( follower, up )
sideDist.getParent().setParent(ctrl)
hide(sideDist)
core.math.divide( sideDist.distance, sideDist.distance.get() / card.sz.get() ) >> card.sz
# Orient controls with the card so moving in local space initially preserves orientation.
upCtrl.setRotation( card.getRotation(space='world'), space='world' )
ctrl.setRotation( card.getRotation(space='world'), space='world' )
distBetweenCtrls = (ctrl.getTranslation(space='world') - upCtrl.getTranslation(space='world') ).length()
if distBetweenCtrls < 8.0:
upCtrl.s.set( [distBetweenCtrls / 8.0] * 3 )
ctrl.s.set( [distBetweenCtrls / 8.0] * 3 )
select(ctrl)
def removeCardIk(card):
aimNode = card.rx.listConnections()
if not aimNode:
return
card.rx.disconnect()
card.ry.disconnect()
card.rz.disconnect()
card.sy.disconnect()
card.sz.disconnect()
aim = aimConstraint( aimNode, q=True, tl=True )[0]
up = aimConstraint( aimNode, q=True, wuo=True )
aimCtrl = aim.getParent()
upCtrl = up.getParent()
scaleA, scaleB = aimCtrl.listRelatives(ad=1, type='distanceDimShape')
delete(
scaleA.startPoint.listConnections()[0],
scaleB.startPoint.listConnections()[0],
aimCtrl,
upCtrl )
def reconnectRealBones():
'''
If the cards lose their connection to the bones, run this to reconnect them.
.. todo::
* I don't think there is anything actually preventing a helper from
being the child of another helper. Either fix that or account for it here
'''
failures = []
for card in core.findNode.allCards():
for jnt, names in card.getOutputMap(includeHelpers=True).items():
if names[0]:
realJoint = ls(names[0])
if len(realJoint) == 1:
jnt.real = realJoint[0]
else:
failures.append( jnt )
if len(names) > 1:
realJoint = ls(names[1])
if len(realJoint) == 1:
jnt.realMirror == realJoint[0]
else:
failures.append( jnt )
return failures
# Basic cards -----------------------------------------------------------------
#-# How can I turn this into a data driven thing?
'''
I think I want to the user to be able to select cards and save them as a "preset"
- Save .rigInfo
- Save the offset from the joint it is connected to
Is this it? It looks so simple!
'''
def spineCard(spineCount, orientation=Orientation.VERTICAL, isStart=True):
'''
Makes a spine with a Pelvis as the core joint and sub joints of the spine
and hips occupying the same space.
'''
hasHips = True
hasPelvis = True
spine = makeCard( spineCount, 'Spine*', size=meters(0.5, 1) )
spine.rigCommand = 'TranslateChain'
util.annotateSelectionHandle( spine.joints[0], 'Spine Start', (0, -2, 0) )
if hasPelvis:
pelvis = makeCard( 1, 'Pelvis', size=meters(0.2, 0.2) )
pelvis.fkControllerOptions = '-shape band -size 20 -color blue .65'
pelvis.start().orientTarget = '-world-'
pelvis.rz.set(90)
pelvis.start().t.lock()
moveCard.toObjByCenter( pelvis, spine.start() )
pelvis.rigCommand = 'TranslateChain'
#proxyskel.pointer( pelvis.start(), spine.start())
spine.start().setBPParent( pelvis.start() )
pointConstraint( spine.start(), pelvis)
util.annotateSelectionHandle( pelvis.start(), 'Pelvis (top joint)', (0, 0, -2) )
if hasHips:
hips = makeCard( 1, 'Hips', size=meters(0.2, 0.2) )
hips.fkControllerOptions = '-shape band -size 15 -color red .65'
hips.start().orientTarget = '-world-'
hips.ry.set(-90)
hips.start().t.lock()
moveCard.toObjByCenter( hips, spine.start() )
hips.rigCommand = 'TranslateChain'
#proxyskel.pointer( pelvis.start(), hips.start() )
hips.start().setBPParent( pelvis.start() )
pointConstraint( spine.start(), hips)
util.annotateSelectionHandle( hips.start(), 'Hips', (0, -2, 0) )
if orientation == Orientation.VERTICAL:
spine.rx.set( 180 )
else:
spine.rx.set( -90 )
moveCard.up( spine, meters(0.5) )
#spine.buildOrder.set( 0 ) # Probably not needed since (when?) proper build order is enforced
if isStart:
if hasPelvis:
pelvis.start().proxy.setParent( proxyskel.getProxyGroup() )
else:
spine.start().proxy.setParent( proxyskel.getProxyGroup() )
return spine, hips
def arm(clav, side):
leftArm = makeCard( 3, ['Shoulder', 'Elbow', 'Wrist'], size=meters(.2, 1), suffix=side )
leftArm.rigCommand = 'IkChain'
placeJoints( leftArm, [(0, 1), (0.5, 0), (0, -1)] )
rigData = leftArm.rigData
rigData['ikParams'] = {'name': 'Arm', 'endOrientType': 'True_Zero'}
leftArm.rigData = rigData
#clavicleEnd = getattr(clav, attrMap[side] )[0]
moveCard.to( leftArm, clav.end() )
moveCard.farther( leftArm, meters(.25) )
#proxyskel.pointer( clav.end(), leftArm.start() )
leftArm.start().setBPParent( clav.end() )
return leftArm
def handSetup( leftArm, numFingers, makeThumb ):
#hand = Container('Hand', meters(0.20, 0.20) )
hand = makeCard( 1, 'Hand', size=meters(0.20, 0.20) )
# It makes sense that the wrist is oriented to the hand
leftArm.end().customUp = hand.getUpArrow()
placeJoints( hand, [(0, -.7)] )
hand.joints[0].isHelper = True
leftArm.end().orientTarget = hand.joints[0]
hand.joints[0].setBPParent( leftArm.end() )
xform( hand, ws=True, t=xform(leftArm.end(), q=True, ws=True, t=True) )
moveCard.down( hand, meters(.1) )
#hand.setParent( leftArm.end() )
xform( hand, ws=True, piv=xform(leftArm.end(), q=True, ws=True, t=True) )
pointConstraint( leftArm.end(), core.dagObj.zero( hand ), mo=True)
[ hand.attr('t' + a).lock() for a in 'xyz' ]
mod = 0.15 / (numFingers - 1) if numFingers > 1 else 0
for i, finger in enumerate(['Index', 'Middle', 'Ring', 'Pinky'][:numFingers]):
card = makeCard( 4, finger + '*', suffix='left' )
moveCard.to( card, leftArm.end() )
moveCard.backward( card, meters(i * mod - 0.1) )
moveCard.down( card, meters(0.20) )
grp = group(card, n=finger + "_grp")
card.setParent( grp )
parentConstraint( hand, grp, mo=True )
card.ry.set(90)
card.joints[-1].isHelper = True
#proxy.pointer( leftArm.end(), card.start() )
card.start().setBPParent( leftArm.end() )
card.rigCommand = 'TranslateChain'
card.rigOptions = '-size 2 -visGroup fingers'
if makeThumb:
thumb = makeCard( 4, 'Thumb*', suffix='left' )
moveCard.to(thumb, leftArm.end())
thumb.ry.set(-90)
moveCard.to( thumb, leftArm.end() )
moveCard.forward( thumb, meters(0.1) )
moveCard.down( thumb, meters(0.1) )
moveCard.closer( thumb, meters(0.05) )
thumb.end().isHelper = True
grp = group(thumb, n="Thumb_grp")
parentConstraint( hand, grp, mo=True )
#proxy.pointer( leftArm.end(), thumb.start() )
thumb.start().setBPParent( leftArm.end() )
thumb.rigCommand = 'TranslateChain'
thumb.rigOptions = '-size 2 -visGroup fingers'
def leg( startJoint, dist ):
'''
dist, pos moves left
'''
suffix = 'left' if dist > 0 else 'right'
leftLeg = makeCard( 3, ['Hip', 'Knee', 'Ankle'], size=meters(.2, 1), suffix=suffix )
leftLeg.rigCommand = 'IkChain'
rigData = leftLeg.rigData
rigData['ikParams'] = {'name': 'Leg', 'endOrientType': 'True_Zero_Foot'}
leftLeg.rigData = rigData
placeJoints( leftLeg, [ (0, 1), (-0.23, 0.1), (0, -0.6) ] )
moveCard.to( leftLeg, startJoint )
moveCard.left( leftLeg, meters(dist) )
leftLeg.start().setBPParent(startJoint)
leftLeg.mirror = ''
return leftLeg
def hindleg(startJoint=None, dist=0.20):
suffix = 'left' if dist > 0 else 'right'
leg = makeCard( 4, ['Hip', 'Knee', 'Ankle', 'Toe'], size=meters(.2, 1), suffix=suffix )
leg.rigCommand = 'DogHindleg'
placeJoints( leg, [ (0, 1), (-1, 0.1), (1, -0.5), (0.1, -1) ] )
if startJoint:
moveCard.to( leg, startJoint )
leg.start().setBPParent( startJoint )
moveCard.left( leg, meters(dist) )
leg.start().postCommand = 'mirror;'
return leg
def foot(legCard):
foot = makeCard( 3, [ 'Ball', 'Toe', 'ToeEnd'], size=meters(.4, 0.2), suffix='left' )
placeJoints( foot, [(0.5, -1), (-0.7, -1), (-1, -1)] )
foot.joints[-1].isHelper = True
pivTo( foot, 1, 1 )
pointConstraint( legCard.end(), foot )
foot.t.lock()
foot.start().setBPParent( legCard.end() )
return foot
# Sort of special cards -------------------------------------------------------
def squashAndStretchCard(parent, count):
'''
.. todo::
* Use parent name by default
* Arrange joints in circle (as separately callable thing)
* Orient card along parent's X
'''
dist = 3.75 # I think this is due to meter being 0.15
card = makeCard( 1, 'Squash*', size=meters(.15, .15) )
angle = math.pi * 2.0 / count
for i in range(count):
card.addJoint()
card.joints[i].tz.set( math.cos(angle * i) * dist )
card.joints[i].ty.set( math.sin(angle * i) * dist )
moveCard.toObjByCenter( card, parent )
#card.setTempNames()
card.rigCommand = 'SquashStretch'
for j in card.joints:
j.setBPParent(parent)
#j.orientTarget = card.end()
card.end().isHelper = True
rot = xform(parent, q=True, ws=True, ro=True)
xform(card, ws=True, ro=rot)
rotate(card, [0, 0, -90], r=True, os=True)
# &&& I do not get what name scheme I'm doing here
cards = core.findNode.allCards()
cards.remove(card)
nameScheme = findUniqueNameInfo(util.parse(card.nameInfo.get()), '', cards=cards)
card.nameInfo.set( ' '.join(nameScheme[0]) + nameScheme[1] + '* ' + ' '.join(nameScheme[2]) )
return card
def weaponCard(parentName, name, asymmetric=True):
'''
I think parent might be a joint name and have it figure more stuff out than normal.
It seems logical that the anticipated things are Wrist and spine. Then the
rest of the joints can be listed out.
'''
card = makeCard( 1, name, size=meters(.15, .15) )
card.rigCommand = 'TranslateChain'
parent, direct = util.findTempJoint(parentName)
if asymmetric:
if parent.cardCon.node().isCardMirrored():
card.mirror = False
if not direct:
# Put card under root and set future parent
card.start().postCommand = 'reparent {extraNode0};'
card.start().extraNode[0] = parent
else:
card.start().setBPParent(parent)
else:
card.start().setBPParent(parent)
else:
card.start().setBPParent(parent)
def addTwistCard(jnt):
'''
Given a `BPJoint` to drive a twist, creates a card with sibling twist helper.
'''
names = jnt.card.nameList(usePrefix=False)
name = names[ jnt.cardCon.index() ]
# Strip off the suffix if one exists
if util.isMirrored( jnt ):
mirrorCode = jnt.card.rigData.get('mirrorCode', '') # &&& Can I always get the mirror code and always set it?
suffix = config.jointSideSuffix( mirrorCode )
name = name[: -len(suffix) ]
else:
mirrorCode = ''
name += 'Twist'
card = makeCard( jointCount=1, jointNames=[name], rigInfo=None, size=(1, 1) )
# &&& Can I prevent joints from being added to this card?
# Keep the card scaled along the length of the bone.
xform( card, piv=(0, .5, 0), ws=True )
aimConstraint( jnt, card, aim=[0, -1, 0], u=[1, 0, 0], wut='objectrotation', wuo=jnt.cardCon.node(), wu=[0, 0, 1] )
pointConstraint( jnt.parent, card )
dist, grp = core.dagObj.measure( jnt, jnt.parent )
grp.setParent( card )
dist.setParent( card )
dist.distance >> card.sy
# The twist needs to stay on axis.
card.start().tz.lock()
card.extraNode[0] = jnt
if mirrorCode:
card.suffix.set( mirrorCode )
card.rigCommand = 'TwistHelper'
card.sz.set( max(card.sy.get() / 4.0, 1.0) )
card.start().setBPParent(jnt.parent)
return card
def bipedSetup(spineCount=4, neckCount=1, numFingers=4, legType='Human', thumb=True, spineOrient=Orientation.VERTICAL):
spine, hips = spineCard(spineCount, spineOrient)
# Neck
neck = makeCard( neckCount, 'Neck*', size=meters(.15, .4) )
neck.rigCommand = 'TranslateChain'
neck.rx.set( 180 )
moveCard.to( neck, spine.end() )
moveCard.up( neck, meters(0.10) )
neck.start().setBPParent( spine.end() )
# Head
head = makeCard( 2, 'Head HeadTip', size=meters(.3, .3) )
head.rigCommand = 'TranslateChain'
head.rx.set( 180 )
moveCard.to( head, neck.end() )
moveCard.up( head, meters(0.10) )
head.end().isHelper = True
head.start().setBPParent( neck.end() )
spine.end().orientTarget = neck.start()
# Arms
clav = makeCard( 1, 'Clavicle', size=meters(.1, .1), suffix='left' )
clav.rigCommand = 'TranslateChain'
moveCard.to( clav, spine.end() )
moveCard.forward( clav, meters(0.10) )
moveCard.left( clav, meters(0.2) )
clav.ry.set(-90)
clav.mirror = ''
clav.start().setBPParent( spine.end() )
leftArm = arm( clav, 'left' )
handSetup( leftArm, numFingers, thumb )
# Legs
if legType == 'Human':
leftLeg = leg( hips.start(), 0.20 )
elif legType == 'Dogleg':
leftLeg = hindleg( hips.start(), 0.20 )
foot(leftLeg)
|
caveman-dick/ansible
|
refs/heads/devel
|
lib/ansible/inventory/data.py
|
2
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils.six import iteritems
from ansible.plugins.cache import FactCache
from ansible.utils.vars import combine_vars
from ansible.utils.path import basedir
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class InventoryData(object):
"""
Holds inventory data (host and group objects).
Using it's methods should guarantee expected relationships and data.
"""
def __init__(self):
# the inventory object holds a list of groups
self.groups = {}
self.hosts = {}
# provides 'groups' magic var, host object has group_names
self._groups_dict_cache = {}
# current localhost, implicit or explicit
self.localhost = None
self.current_source = None
# Always create the 'all' and 'ungrouped' groups,
for group in ('all', 'ungrouped'):
self.add_group(group)
self.add_child('all', 'ungrouped')
# prime cache
self.cache = FactCache()
def serialize(self):
data = dict()
return data
def deserialize(self, data):
pass
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
# use 'all' vars but not part of all group
new_host.vars = self.groups['all'].get_vars()
new_host.address = "127.0.0.1"
new_host.implicit = True
if "ansible_python_interpreter" not in new_host.vars:
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. #13585
py_interp = '/usr/bin/python'
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
'You can correct this by setting ansible_python_interpreter for localhost')
new_host.set_variable("ansible_python_interpreter", py_interp)
if "ansible_connection" not in new_host.vars:
new_host.set_variable("ansible_connection", 'local')
self.localhost = new_host
return new_host
def reconcile_inventory(self):
''' Ensure inventory basic rules, run after updates '''
display.debug('Reconcile groups and hosts in inventory.')
self.current_source = None
group_names = set()
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group_names.add(group.name)
# ensure all groups inherit from 'all'
if group.name != 'all' and not group.get_ancestors():
self.add_child('all', group.name)
host_names = set()
# get host vars from host_vars/ files and vars plugins
for host in self.hosts.values():
host_names.add(host.name)
mygroups = host.get_groups()
# ensure hosts are always in 'all'
if 'all' not in mygroups and not host.implicit:
self.add_child('all', host.name)
if self.groups['ungrouped'] in mygroups:
# clear ungrouped of any incorrectly stored by parser
if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
host.remove_group(self.groups['ungrouped'])
elif not host.implicit:
# add ungrouped hosts to ungrouped, except implicit
length = len(mygroups)
if length == 0 or (length == 1 and all in mygroups):
self.add_child('ungrouped', host.name)
# special case for implicit hosts
if host.implicit:
host.vars = combine_vars(self.groups['all'].get_vars(), host.vars)
# warn if overloading identifier as both group and host
for conflict in group_names.intersection(host_names):
display.warning("Found both group and host with same name: %s" % conflict)
self._groups_dict_cache = {}
def get_host(self, hostname):
''' fetch host object using name deal with implicit localhost '''
matching_host = self.hosts.get(hostname, None)
# if host is not in hosts dict
if matching_host is None and hostname in C.LOCALHOST:
# might need to create implicit localhost
matching_host = self._create_implicit_localhost(hostname)
return matching_host
def add_group(self, group):
''' adds a group to inventory if not there already '''
if group not in self.groups:
g = Group(group)
self.groups[group] = g
self._groups_dict_cache = {}
display.debug("Added group %s to inventory" % group)
else:
display.debug("group %s already in inventory" % group)
def add_host(self, host, group=None, port=None):
''' adds a host to inventory and possibly a group if not there already '''
g = None
if group:
if group in self.groups:
g = self.groups[group]
else:
raise AnsibleError("Could not find group %s in inventory" % group)
if host not in self.hosts:
h = Host(host, port)
self.hosts[host] = h
if self.current_source: # set to 'first source' in which host was encountered
self.set_variable(host, 'inventory_file', self.current_source)
self.set_variable(host, 'inventory_dir', basedir(self.current_source))
else:
self.set_variable(host, 'inventory_file', None)
self.set_variable(host, 'inventory_dir', None)
display.debug("Added host %s to inventory" % (host))
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if host in C.LOCALHOST:
if self.localhost is None:
self.localhost = self.hosts[host]
display.vvvv("Set default localhost to %s" % h)
else:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
else:
h = self.hosts[host]
if g:
g.add_host(h)
self._groups_dict_cache = {}
display.debug("Added host %s to group %s" % (host, group))
def set_variable(self, entity, varname, value):
''' sets a varible for an inventory object '''
if entity in self.groups:
inv_object = self.groups[entity]
elif entity in self.hosts:
inv_object = self.hosts[entity]
else:
raise AnsibleError("Could not identify group or host named %s" % entity)
inv_object.set_variable(varname, value)
display.debug('set %s for %s' % (varname, entity))
def add_child(self, group, child):
''' Add host or group to group '''
if group in self.groups:
g = self.groups[group]
if child in self.groups:
g.add_child_group(self.groups[child])
elif child in self.hosts:
g.add_host(self.hosts[child])
else:
raise AnsibleError("%s is not a known host nor group" % child)
self._groups_dict_cache = {}
display.debug('Group %s now contains %s' % (group, child))
else:
raise AnsibleError("%s is not a known group" % group)
def get_groups_dict(self):
"""
We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
"""
if not self._groups_dict_cache:
for (group_name, group) in iteritems(self.groups):
self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._groups_dict_cache
|
songmonit/CTTMSONLINE
|
refs/heads/master
|
addons/crm_profiling/crm_profiling.py
|
333
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
socialplanning/opencore
|
refs/heads/master
|
opencore/api/__init__.py
|
9480
|
#
|
shaheemirza/pupy
|
refs/heads/master
|
pupy/packages/windows/x86/psutil/_pswindows.py
|
66
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows platform implementation."""
import errno
import functools
import os
import sys
from collections import namedtuple
from . import _common
from . import _psutil_windows as cext
from ._common import conn_tmap, usage_percent, isfile_strict
from ._common import sockfam_to_enum, socktype_to_enum
from ._compat import PY3, xrange, lru_cache, long
from ._psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# process priority constants, import from __init__.py:
# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
"NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
"CONN_DELETE_TCB",
"AF_LINK",
]
# --- module level constants (gets pushed up to psutil module)
CONN_DELETE_TCB = "DELETE_TCB"
WAIT_TIMEOUT = 0x00000102 # 258 in decimal
ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
cext.ERROR_ACCESS_DENIED])
if enum is None:
AF_LINK = -1
else:
AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1})
AF_LINK = AddressFamily.AF_LINK
TCP_STATUSES = {
cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
if enum is not None:
class Priority(enum.IntEnum):
ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS
BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS
IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS
globals().update(Priority.__members__)
scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pextmem = namedtuple(
'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
'pagefile', 'peak_pagefile', 'private'])
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
ntpinfo = namedtuple(
'ntpinfo', ['num_handles', 'ctx_switches', 'user_time', 'kernel_time',
'create_time', 'num_threads', 'io_rcount', 'io_wcount',
'io_rbytes', 'io_wbytes'])
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
@lru_cache(maxsize=512)
def _win32_QueryDosDevice(s):
return cext.win32_QueryDosDevice(s)
def _convert_raw_path(s):
# convert paths using native DOS format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# into: "C:\Windows\systemew\file.txt"
if PY3 and not isinstance(s, str):
s = s.decode('utf8')
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = _win32_QueryDosDevice(rawdrive)
return os.path.join(driveletter, s[len(rawdrive):])
def py2_strencode(s, encoding=sys.getfilesystemencoding()):
if PY3 or isinstance(s, str):
return s
else:
try:
return s.encode(encoding)
except UnicodeEncodeError:
# Filesystem codec failed, return the plain unicode
# string (this should never happen).
return s
# --- public functions
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
#
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total = mem[2]
free = mem[3]
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, 0, 0)
def disk_usage(path):
"""Return disk usage associated with path."""
try:
total, free = cext.disk_usage(path)
except WindowsError:
if not os.path.exists(path):
msg = "No such file or directory: '%s'" % path
raise OSError(errno.ENOENT, msg)
raise
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [_common.sdiskpart(*x) for x in rawlist]
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
return scputimes(user, system, idle)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for cpu_t in cext.per_cpu_times():
user, system, idle = cpu_t
item = scputimes(user, system, idle)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
status = TCP_STATUSES[status]
fam = sockfam_to_enum(fam)
type = socktype_to_enum(type)
if _pid == -1:
nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type, laddr, raddr, status)
ret.add(nt)
return list(ret)
def net_if_stats():
ret = cext.net_if_stats()
for name, items in ret.items():
name = py2_strencode(name)
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def net_io_counters():
ret = cext.net_io_counters()
return dict([(py2_strencode(k), v) for k, v in ret.items()])
def net_if_addrs():
ret = []
for items in cext.net_if_addrs():
items = list(items)
items[0] = py2_strencode(items[0])
ret.append(items)
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
user = py2_strencode(user)
nt = _common.suser(user, None, hostname, tstamp)
retlist.append(nt)
return retlist
pids = cext.pids
pid_exists = cext.pid_exists
disk_io_counters = cext.disk_io_counters
ppid_map = cext.ppid_map # not meant to be public
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and WindowsError
exceptions into NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
"""Return process name, which on Windows is always the final
part of the executable.
"""
# This is how PIDs 0 and 4 are always represented in taskmgr
# and process-hacker.
if self.pid == 0:
return "System Idle Process"
elif self.pid == 4:
return "System"
else:
try:
# Note: this will fail with AD for most PIDs owned
# by another user but it's faster.
return py2_strencode(os.path.basename(self.exe()))
except AccessDenied:
return py2_strencode(cext.proc_name(self.pid))
@wrap_exceptions
def exe(self):
# Note: os.path.exists(path) may return False even if the file
# is there, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
# see https://github.com/giampaolo/psutil/issues/414
# see https://github.com/giampaolo/psutil/issues/528
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
return py2_strencode(_convert_raw_path(cext.proc_exe(self.pid)))
@wrap_exceptions
def cmdline(self):
ret = cext.proc_cmdline(self.pid)
if PY3:
return ret
else:
return [py2_strencode(s) for s in ret]
def ppid(self):
try:
return ppid_map()[self.pid]
except KeyError:
raise NoSuchProcess(self.pid, self._name)
def _get_raw_meminfo(self):
try:
return cext.proc_memory_info(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
# TODO: the C ext can probably be refactored in order
# to get this from cext.proc_info()
return cext.proc_memory_info_2(self.pid)
raise
@wrap_exceptions
def memory_info(self):
# on Windows RSS == WorkingSetSize and VSM == PagefileUsage
# fields of PROCESS_MEMORY_COUNTERS struct:
# http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms684877(v=vs.85).aspx
t = self._get_raw_meminfo()
return _common.pmem(t[2], t[7])
@wrap_exceptions
def memory_info_ex(self):
return pextmem(*self._get_raw_meminfo())
def memory_maps(self):
try:
raw = cext.proc_memory_maps(self.pid)
except OSError as err:
# XXX - can't use wrap_exceptions decorator as we're
# returning a generator; probably needs refactoring.
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
else:
for addr, perm, path, rss in raw:
path = _convert_raw_path(path)
addr = hex(addr)
yield (addr, perm, path, rss)
@wrap_exceptions
def kill(self):
return cext.proc_kill(self.pid)
@wrap_exceptions
def send_signal(self, sig):
os.kill(self.pid, sig)
@wrap_exceptions
def wait(self, timeout=None):
if timeout is None:
timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
timeout = int(timeout * 1000)
ret = cext.proc_wait(self.pid, timeout)
if ret == WAIT_TIMEOUT:
# support for private module import
if TimeoutExpired is None:
raise RuntimeError("timeout expired")
raise TimeoutExpired(timeout, self.pid, self._name)
return ret
@wrap_exceptions
def username(self):
if self.pid in (0, 4):
return 'NT AUTHORITY\\SYSTEM'
return cext.proc_username(self.pid)
@wrap_exceptions
def create_time(self):
# special case for kernel process PIDs; return system boot time
if self.pid in (0, 4):
return boot_time()
try:
return cext.proc_create_time(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
return ntpinfo(*cext.proc_info(self.pid)).create_time
raise
@wrap_exceptions
def num_threads(self):
return ntpinfo(*cext.proc_info(self.pid)).num_threads
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def cpu_times(self):
try:
ret = cext.proc_cpu_times(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
nt = ntpinfo(*cext.proc_info(self.pid))
ret = (nt.user_time, nt.kernel_time)
else:
raise
return _common.pcputimes(*ret)
@wrap_exceptions
def suspend(self):
return cext.proc_suspend(self.pid)
@wrap_exceptions
def resume(self):
return cext.proc_resume(self.pid)
@wrap_exceptions
def cwd(self):
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
# return a normalized pathname since the native C function appends
# "\\" at the and of the path
path = cext.proc_cwd(self.pid)
return py2_strencode(os.path.normpath(path))
@wrap_exceptions
def open_files(self):
if self.pid in (0, 4):
return []
ret = set()
# Filenames come in in native format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# Convert the first part in the corresponding drive letter
# (e.g. "C:\") by using Windows's QueryDosDevice()
raw_file_names = cext.proc_open_files(self.pid)
for _file in raw_file_names:
_file = _convert_raw_path(_file)
if isfile_strict(_file):
if not PY3:
_file = py2_strencode(_file)
ntuple = _common.popenfile(_file, -1)
ret.add(ntuple)
return list(ret)
@wrap_exceptions
def connections(self, kind='inet'):
return net_connections(kind, _pid=self.pid)
@wrap_exceptions
def nice_get(self):
value = cext.proc_priority_get(self.pid)
if enum is not None:
value = Priority(value)
return value
@wrap_exceptions
def nice_set(self, value):
return cext.proc_priority_set(self.pid, value)
# available on Windows >= Vista
if hasattr(cext, "proc_io_priority_get"):
@wrap_exceptions
def ionice_get(self):
return cext.proc_io_priority_get(self.pid)
@wrap_exceptions
def ionice_set(self, value, _):
if _:
raise TypeError("set_proc_ionice() on Windows takes only "
"1 argument (2 given)")
if value not in (2, 1, 0):
raise ValueError("value must be 2 (normal), 1 (low) or 0 "
"(very low); got %r" % value)
return cext.proc_io_priority_set(self.pid, value)
@wrap_exceptions
def io_counters(self):
try:
ret = cext.proc_io_counters(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
nt = ntpinfo(*cext.proc_info(self.pid))
ret = (nt.io_rcount, nt.io_wcount, nt.io_rbytes, nt.io_wbytes)
else:
raise
return _common.pio(*ret)
@wrap_exceptions
def status(self):
suspended = cext.proc_is_suspended(self.pid)
if suspended:
return _common.STATUS_STOPPED
else:
return _common.STATUS_RUNNING
@wrap_exceptions
def cpu_affinity_get(self):
def from_bitmask(x):
return [i for i in xrange(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, value):
def to_bitmask(l):
if not l:
raise ValueError("invalid argument %r" % l)
out = 0
for b in l:
out |= 2 ** b
return out
# SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
# is returned for an invalid CPU but this seems not to be true,
# therefore we check CPUs validy beforehand.
allcpus = list(range(len(per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
if not isinstance(cpu, (int, long)):
raise TypeError(
"invalid CPU %r; an integer is required" % cpu)
else:
raise ValueError("invalid CPU %r" % cpu)
bitmask = to_bitmask(value)
cext.proc_cpu_affinity_set(self.pid, bitmask)
@wrap_exceptions
def num_handles(self):
try:
return cext.proc_num_handles(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
return ntpinfo(*cext.proc_info(self.pid)).num_handles
raise
@wrap_exceptions
def num_ctx_switches(self):
ctx_switches = ntpinfo(*cext.proc_info(self.pid)).ctx_switches
# only voluntary ctx switches are supported
return _common.pctxsw(ctx_switches, 0)
|
pschmitt/home-assistant
|
refs/heads/dev
|
homeassistant/components/mobile_app/config_flow.py
|
14
|
"""Config flow for Mobile App."""
import uuid
from homeassistant import config_entries
from homeassistant.components import person
from homeassistant.helpers import entity_registry
from .const import ATTR_APP_ID, ATTR_DEVICE_ID, ATTR_DEVICE_NAME, CONF_USER_ID, DOMAIN
@config_entries.HANDLERS.register(DOMAIN)
class MobileAppFlowHandler(config_entries.ConfigFlow):
"""Handle a Mobile App config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
placeholders = {
"apps_url": "https://www.home-assistant.io/integrations/mobile_app/#apps"
}
return self.async_abort(
reason="install_app", description_placeholders=placeholders
)
async def async_step_registration(self, user_input=None):
"""Handle a flow initialized during registration."""
if ATTR_DEVICE_ID in user_input:
# Unique ID is combi of app + device ID.
await self.async_set_unique_id(
f"{user_input[ATTR_APP_ID]}-{user_input[ATTR_DEVICE_ID]}"
)
else:
user_input[ATTR_DEVICE_ID] = str(uuid.uuid4()).replace("-", "")
# Register device tracker entity and add to person registering app
ent_reg = await entity_registry.async_get_registry(self.hass)
devt_entry = ent_reg.async_get_or_create(
"device_tracker",
DOMAIN,
user_input[ATTR_DEVICE_ID],
suggested_object_id=user_input[ATTR_DEVICE_NAME],
)
await person.async_add_user_device_tracker(
self.hass, user_input[CONF_USER_ID], devt_entry.entity_id
)
return self.async_create_entry(
title=user_input[ATTR_DEVICE_NAME], data=user_input
)
|
apple/swift-lldb
|
refs/heads/stable
|
packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py
|
13
|
"""Test lldb's expression evaluations and collect statistics."""
from __future__ import print_function
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class ExpressionEvaluationCase(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.source = 'main.cpp'
self.line_to_break = line_number(
self.source, '// Set breakpoint here.')
self.count = 25
@benchmarks_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_expr_cmd(self):
"""Test lldb's expression commands and collect statistics."""
self.build()
self.exe_name = 'a.out'
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb expr cmd benchmark:", self.stopwatch)
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = self.getBuildArtifact(exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline(
'breakpoint set -f %s -l %d' %
(self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'expr ptr[j]->point.x'
expr_cmd2 = 'expr ptr[j]->point.y'
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None
|
TieWei/nova
|
refs/heads/enhanced/havana
|
nova/virt/baremetal/virtual_power_driver.py
|
7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Virtual power driver
from oslo.config import cfg
from nova import context as nova_context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
import nova.virt.powervm.common as connection
opts = [
cfg.StrOpt('virtual_power_ssh_host',
default='',
help='ip or name to virtual power host'),
cfg.IntOpt('virtual_power_ssh_port',
default=22,
help='Port to use for ssh to virtual power host'),
cfg.StrOpt('virtual_power_type',
default='virsh',
help='base command to use for virtual power(vbox,virsh)'),
cfg.StrOpt('virtual_power_host_user',
default='',
help='user to execute virtual power commands as'),
cfg.StrOpt('virtual_power_host_pass',
default='',
help='password for virtual power host_user'),
cfg.StrOpt('virtual_power_host_key',
help='ssh key for virtual power host_user'),
]
baremetal_vp = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_vp)
CONF.register_opts(opts, baremetal_vp)
_conn = None
_vp_cmd = None
_cmds = None
LOG = logging.getLogger(__name__)
def _normalize_mac(mac):
return mac.replace(':', '').lower()
class VirtualPowerManager(base.PowerManager):
"""Virtual Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of VMs based on their name and MAC address. It uses ssh to connect to the
VM's host and issue commands.
Node will be matched based on mac address
NOTE: for use in dev/test environments only!
"""
def __init__(self, **kwargs):
global _conn
global _cmds
if _cmds is None:
LOG.debug("Setting up %s commands." %
CONF.baremetal.virtual_power_type)
_vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \
CONF.baremetal.virtual_power_type
_cmds = importutils.import_class(_vpc)
self._vp_cmd = _cmds()
self.connection_data = _conn
node = kwargs.pop('node', {})
instance = kwargs.pop('instance', {})
self._node_name = instance.get('hostname', "")
context = nova_context.get_admin_context()
ifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
self._mac_addresses = [_normalize_mac(i['address']) for i in ifs]
self._connection = None
self._matched_name = ''
self.state = None
def _get_conn(self):
if not CONF.baremetal.virtual_power_ssh_host:
raise exception.NovaException(
_('virtual_power_ssh_host not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_user:
raise exception.NovaException(
_('virtual_power_host_user not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_pass:
# it is ok to not have a password if you have a keyfile
if CONF.baremetal.virtual_power_host_key is None:
raise exception.NovaException(
_('virtual_power_host_pass/key not set. Can not Start'))
_conn = connection.Connection(
CONF.baremetal.virtual_power_ssh_host,
CONF.baremetal.virtual_power_host_user,
CONF.baremetal.virtual_power_host_pass,
CONF.baremetal.virtual_power_ssh_port,
CONF.baremetal.virtual_power_host_key)
return _conn
def _set_connection(self):
if self._connection is None:
if self.connection_data is None:
self.connection_data = self._get_conn()
self._connection = connection.ssh_connect(self.connection_data)
def _get_full_node_list(self):
LOG.debug("Getting full node list.")
cmd = self._vp_cmd.list_cmd
full_list = self._run_command(cmd)
return full_list
def _check_for_node(self):
LOG.debug("Looking up Name for Mac address %s." % self._mac_addresses)
self._matched_name = ''
full_node_list = self._get_full_node_list()
for node in full_node_list:
cmd = self._vp_cmd.get_node_macs.replace('{_NodeName_}', node)
mac_address_list = self._run_command(cmd)
for mac in mac_address_list:
if _normalize_mac(mac) in self._mac_addresses:
self._matched_name = ('"%s"' % node)
break
return self._matched_name
def activate_node(self):
LOG.info("activate_node name %s" % self._node_name)
if self._check_for_node():
cmd = self._vp_cmd.start_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def reboot_node(self):
LOG.info("reset node: %s" % self._node_name)
if self._check_for_node():
cmd = self._vp_cmd.reboot_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def deactivate_node(self):
LOG.info("deactivate_node name %s" % self._node_name)
if self._check_for_node():
if self.is_power_on():
cmd = self._vp_cmd.stop_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ERROR
else:
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
LOG.debug("Checking if %s is running" % self._node_name)
if not self._check_for_node():
err_msg = _('Node "%(name)s" with MAC address %(mac)s not found.')
LOG.error(err_msg, {'name': self._node_name,
'mac': self._mac_addresses})
# in our case the _node_name is the the node_id
raise exception.NodeNotFound(node_id=self._node_name)
cmd = self._vp_cmd.list_running_cmd
running_node_list = self._run_command(cmd)
for node in running_node_list:
if self._matched_name in node:
return True
return False
def start_console(self):
pass
def stop_console(self):
pass
def _run_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
If {_NodeName_} is in the command it will get replaced by
the _matched_name value.
base_cmd will also get prepended to the command.
"""
self._set_connection()
cmd = cmd.replace('{_NodeName_}', self._matched_name)
cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd)
try:
stdout, stderr = processutils.ssh_execute(
self._connection, cmd, check_exit_code=check_exit_code)
result = stdout.strip().splitlines()
LOG.debug('Result for run_command: %s' % result)
except processutils.ProcessExecutionError:
result = []
LOG.exception("Error running command: %s" % cmd)
return result
|
pjamesjoyce/lcopt
|
refs/heads/development
|
lcopt/mass_balance.py
|
1
|
def recurse_mass(d, allow_no_mass = False, checkSecondary = True):
to_return = {}
#cum_impact = 0
isBiosphere = d['tag'] == 'biosphere'
if not isBiosphere and checkSecondary:
isBiosphere = 'biosphere' in d['secondary_tags']
for k, v in d.items():
if k == 'amount' and isBiosphere:
#print (k, -v)
to_return[k] = -v
elif k == 'technosphere':
#print('technosphere')
#print(len(to_return), d['activity'], d['activity']['unit'])
if d['activity']['unit'] in ['kg', 'g'] or allow_no_mass:
for e in v:
#print (e['activity'])
#cum_impact += e['impact']
#if 'cum_impact' in e.keys():
# cum_impact += e['cum_impact']
if k in to_return.keys():
to_return[k].append(recurse_mass(e, False))
else:
to_return[k] = [recurse_mass(e, False)]
elif k in['biosphere', 'impact']:
pass
elif k == 'activity':
#print (k,v)
#activity_list = v.split('(')
activity = v['name'] # activity_list[0].strip()
unit = v['unit'] # activity_list[1].split(',')[0]
#print(activity, unit)
to_return['activity'] = str(activity)
to_return['unit'] = unit
if unit in ['kg', 'g'] or allow_no_mass:
to_return['is_mass'] = True
else:
to_return['is_mass'] = False
#elif k == 'impact':
# print('impact of {} = {}'.format(d['activity'], v))
else:
to_return[k] = v
#print('cum_impact of {} = {}'.format(d['activity'], cum_impact))
#to_return['cum_impact'] = cum_impact
return to_return
|
botswana-harvard/microbiome
|
refs/heads/develop
|
microbiome/apps/mb_maternal/admin/maternal_off_study_admin.py
|
3
|
from django.contrib import admin
from ..models import MaternalOffStudy
from ..forms import MaternalOffStudyForm
from .base_maternal_model_admin import BaseMaternalModelAdmin
class MaternalOffStudyAdmin(BaseMaternalModelAdmin):
form = MaternalOffStudyForm
fields = (
'maternal_visit',
'report_datetime',
'offstudy_date',
'reason',
'reason_other',
'comment')
admin.site.register(MaternalOffStudy, MaternalOffStudyAdmin)
|
guerrerocarlos/odoo
|
refs/heads/8.0
|
addons/mrp/res_config.py
|
301
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class mrp_config_settings(osv.osv_memory):
_name = 'mrp.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_mrp_repair': fields.boolean("Manage repairs of products ",
help='Allows to manage all product repairs.\n'
'* Add/remove products in the reparation\n'
'* Impact for stocks\n'
'* Invoicing (products and/or services)\n'
'* Warranty concept\n'
'* Repair quotation report\n'
'* Notes for the technician and for the final customer.\n'
'-This installs the module mrp_repair.'),
'module_mrp_operations': fields.boolean("Allow detailed planning of work order",
help='This allows to add state, date_start,date_stop in production order operation lines (in the "Work Centers" tab).\n'
'-This installs the module mrp_operations.'),
'module_mrp_byproduct': fields.boolean("Produce several products from one manufacturing order",
help='You can configure by-products in the bill of material.\n'
'Without this module: A + B + C -> D.\n'
'With this module: A + B + C -> D + E.\n'
'-This installs the module mrp_byproduct.'),
'group_mrp_routings': fields.boolean("Manage routings and work orders ",
implied_group='mrp.group_mrp_routings',
help='Routings allow you to create and manage the manufacturing operations that should be followed '
'within your work centers in order to produce a product. They are attached to bills of materials '
'that will define the required raw materials.'),
'group_mrp_properties': fields.boolean("Allow several bill of materials per products using properties",
implied_group='product.group_mrp_properties',
help="""The selection of the right Bill of Material to use will depend on the properties specified on the sales order and the Bill of Material."""),
#FIXME: Should be removed as module product_manufacturer has been removed
'module_product_manufacturer': fields.boolean("Define manufacturers on products ",
help='This allows you to define the following for a product:\n'
'* Manufacturer\n'
'* Manufacturer Product Name\n'
'* Manufacturer Product Code\n'
'* Product Attributes.\n'
'-This installs the module product_manufacturer.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
h2oai/h2o-3
|
refs/heads/master
|
h2o-py/tests/testdir_misc/pyunit_init_https.py
|
2
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.exceptions import H2OConnectionError
def test_https_startup():
try:
h2o.init(ip = "127.0.0.1", port="12345", https=True) # Port 12345 is used, as 54321 is expected to be occupied
assert False, "Expected to fail starting local H2O server with https=true"
except H2OConnectionError as err:
print(err) # HTTPS is not allowed during localhost startup
assert "Starting local server is not available with https enabled. You may start local instance of H2O with https manually (https://docs.h2o.ai/h2o/latest-stable/h2o-docs/welcome.html#new-user-quick-start)." == str(err)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_https_startup)
else:
test_https_startup()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.