repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
edhuckle/statsmodels | refs/heads/master | statsmodels/examples/ex_kernel_regression3.py | 34 | # -*- coding: utf-8 -*-
"""script to try out Censored kernel regression
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
x2 = x**2 + 0.02 * np.random.normal(size=nobs)
y_true = np.sin(x*5)/x + 2*x - 3 * x2
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
cens_side = ['left', 'right', 'random'][2]
if cens_side == 'left':
c_val = 0.5
y_cens = np.clip(y, c_val, 100)
elif cens_side == 'right':
c_val = 3.5
y_cens = np.clip(y, -100, c_val)
elif cens_side == 'random':
c_val = 3.5 + 3 * np.random.randn(nobs)
y_cens = np.minimum(y, c_val)
model = nparam.KernelCensoredReg(endog=[y_cens],
#exog=[np.column_stack((x, x**2))], reg_type='lc',
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic', #'cv_ls', #[0.23, 434697.22], #'cv_ls',
censor_val=c_val[:,None],
#defaults=nparam.EstimatorSettings(efficient=True)
)
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
# model1 = nparam.KernelReg(endog=[y],
# exog=[x], reg_type='lc',
# var_type='c', bw='cv_ls')
# mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y_cens],
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic',# 'cv_ls'
)
mean2, mfx2 = model2.fit()
print(model.bw)
#print model1.bw
print(model2.bw)
ix = np.argsort(y_cens)
ix_rev = np.zeros(nobs, int)
ix_rev[ix] = np.arange(nobs)
ix_rev = model.sortix_rev
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_cens, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean[ix_rev], lw=2, label='model 0 mean')
ax.plot(x, mean2, lw=2, label='model 2 mean')
ax.legend()
plt.show()
|
AndreiKrutikov/PsiDB | refs/heads/master | gtest/test/gtest_color_test.py | 3259 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
WebSQL/sdk | refs/heads/master | wsql_sdk/_lang/python3.py | 1 | """
This file is part of WSQL-SDK
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "@bg"
from wsql_sdk._lang._python import *
includes_for_api = """
from wsql import Error, handle_error
from wsql.cluster import transaction"""
file_ext = ".py"
indent = " "
doc_indent = indent
break_lines = 2
return_array = """__cursor.fetchxall()"""
return_object = return_array + "[0]"
def temporary_table(name, columns):
"""create a temporary table"""
columns_def = ', '.join('`{0.name}` {0.type}'.format(x) for x in columns)
column_names = ', '.join('"{0.name}"'.format(x) for x in columns)
column_names_sql = ', '.join('`{0.name}`'.format(x) for x in columns)
place_holders = ', '.join(["%s"] * len(columns))
return """\
if not {0}:
return
__args = ((x.get(y, None) for y in ({1},)) for x in {0})
__cursor.execute(b"DROP TEMPORARY TABLE IF EXISTS `{0}`;")
__cursor.execute(b"CREATE TEMPORARY TABLE `{0}`({2}) ENGINE=MEMORY;")
__cursor.executemany(b"INSERT INTO `{0}` ({3}) VALUES ({4});", __args)"""\
.format(name, column_names, columns_def, column_names_sql, place_holders)
def transaction_open():
"""open transaction scope"""
return " @transaction"
def transaction_close():
"""close transaction scope"""
pass
def procedure_open(name, args):
"""open procedure body"""
args = ', '.join('{0}=None'.format(x) for x in args)
if args:
args = ', ' + args
return "def {0}(connection{1}):".format(name, args)
def procedure_close():
"""close procedure body"""
return """
try:
return connection.execute(__query)
except Error as e:
raise handle_error(exceptions, e)"""
def body_open():
"""open the main logic"""
return " def __query(__connection):"
def body_close():
"""close the main logic"""
pass
def cursor_open():
"""open cursor"""
return " with __connection.cursor() as __cursor:"
def cursor_close():
"""close cursor"""
pass
def procedure_call(name, args):
"""call procedure"""
args_str = ', '.join(x.name for x in args)
if len(args) == 1:
args_str += ","
return ' __cursor.callproc(b"{0}", ({1}))'.format(name, args_str)
|
dcbaker/alot | refs/heads/master | alot/db/utils.py | 1 | # Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import email
import email.charset as charset
from email.header import Header
from email.iterators import typed_subpart_iterator
import tempfile
import re
import logging
import mailcap
from cStringIO import StringIO
from .. import crypto
from .. import helper
from ..errors import GPGProblem
from ..settings import settings
from ..helper import string_sanitize
from ..helper import string_decode
from ..helper import parse_mailcap_nametemplate
from ..helper import split_commandstring
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
X_SIGNATURE_VALID_HEADER = 'X-Alot-OpenPGP-Signature-Valid'
X_SIGNATURE_MESSAGE_HEADER = 'X-Alot-OpenPGP-Signature-Message'
def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpgme.Signature`
:param error_msg: `str` containing an error message, the empty
string indicating no error
'''
sig_from = ''
if len(sigs) == 0:
error_msg = error_msg or 'no signature found'
else:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, we did not break but drop from the
# for loop.
uid_trusted = False
sig_from = key.uids[0].uid
except:
sig_from = sigs[0].fpr
uid_trusted = False
mail.add_header(
X_SIGNATURE_VALID_HEADER,
'False' if error_msg else 'True',
)
mail.add_header(
X_SIGNATURE_MESSAGE_HEADER,
u'Invalid: {0}'.format(error_msg)
if error_msg else
u'Valid: {0}'.format(sig_from)
if uid_trusted else
u'Untrusted: {0}'.format(sig_from)
)
def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)}
def message_from_file(handle):
'''Reads a mail from the given file-like object and returns an email
object, very much like email.message_from_file. In addition to
that OpenPGP encrypted data is detected and decrypted. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param handle: a file-like object
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
m = email.message_from_file(handle)
# make sure noone smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
p = get_params(m)
app_pgp_sig = 'application/pgp-signature'
app_pgp_enc = 'application/pgp-encrypted'
# handle OpenPGP signed data
if (m.is_multipart() and
m.get_content_subtype() == 'signed' and
p.get('protocol') == app_pgp_sig):
# RFC 3156 is quite strict:
# * exactly two messages
# * the second is of type 'application/pgp-signature'
# * the second contains the detached signature
malformed = False
if len(m.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(m.get_payload()))
else:
ct = m.get_payload(1).get_content_type()
if ct != app_pgp_sig:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_sig, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've
# seen a message with 'PGP-'. maybe we should be more
# permissive here, or maybe not, this is crypto stuff...
if not p.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
p.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(m.get_payload(0).as_string(),
m.get_payload(1).get_payload())
except GPGProblem as e:
malformed = unicode(e)
add_signature_headers(m, sigs, malformed)
# handle OpenPGP encrypted data
elif (m.is_multipart() and
m.get_content_subtype() == 'encrypted' and
p.get('protocol') == app_pgp_enc and
'Version: 1' in m.get_payload(0).get_payload()):
# RFC 3156 is quite strict:
# * exactly two messages
# * the first is of type 'application/pgp-encrypted'
# * the first contains 'Version: 1'
# * the second is of type 'application/octet-stream'
# * the second contains the encrypted and possibly signed data
malformed = False
ct = m.get_payload(0).get_content_type()
if ct != app_pgp_enc:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_enc, ct)
want = 'application/octet-stream'
ct = m.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want,
ct)
if not malformed:
try:
sigs, d = crypto.decrypt_verify(m.get_payload(1).get_payload())
except GPGProblem as e:
# signature verification failures end up here too if
# the combined method is used, currently this prevents
# the interpretation of the recovered plain text
# mail. maybe that's a feature.
malformed = unicode(e)
else:
# parse decrypted message
n = message_from_string(d)
# add the decrypted message to m. note that n contains
# all the attachments, no need to walk over n here.
m.attach(n)
# add any defects found
m.defects.extend(n.defects)
# there are two methods for both signed and encrypted
# data, one is called 'RFC 1847 Encapsulation' by
# RFC 3156, and one is the 'Combined method'.
if len(sigs) == 0:
# 'RFC 1847 Encapsulation', the signature is a
# detached signature found in the recovered mime
# message of type multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
m[k] = n[k]
else:
# an encrypted message without signatures
# should arouse some suspicion, better warn
# the user
add_signature_headers(m, [], 'no signature found')
else:
# 'Combined method', the signatures are returned
# by the decrypt_verify function.
# note that if we reached this point, we know the
# signatures are valid. if they were not valid,
# the else block of the current try would not have
# been executed
add_signature_headers(m, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg.encode('utf-8'))
content.set_charset('utf-8')
m.attach(content)
return m
def message_from_string(s):
'''Reads a mail from the given string. This is the equivalent of
:func:`email.message_from_string` which does nothing but to wrap
the given string in a StringIO object and to call
:func:`email.message_from_file`.
Please refer to the documentation of :func:`message_from_file` for
details.
'''
return message_from_file(StringIO(s))
def extract_headers(mail, headers=None):
"""
returns subset of this messages headers as human-readable format:
all header values are decoded, the resulting string has
one line "KEY: VALUE" for each requested header present in the mail.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param headers: headers to extract
:type headers: list of str
"""
headertext = u''
if headers is None:
headers = mail.iterkeys()
for key in headers:
value = u''
if key in mail:
value = decode_header(mail.get(key, ''))
headertext += '%s: %s\n' % (key, value)
return headertext
def extract_body(mail, types=None, field_key='copiousoutput'):
"""
returns a body text string for given mail.
If types is `None`, `text/*` is used:
The exact preferred type is specified by the prefer_plaintext config option
which defaults to text/html.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param types: mime content types to use for body string
:type types: list of str
"""
preferred = 'text/plain' if settings.get(
'prefer_plaintext') else 'text/html'
has_preferred = False
# see if the mail has our preferred type
if types is None:
has_preferred = list(typed_subpart_iterator(
mail, *preferred.split('/')))
body_parts = []
for part in mail.walk():
ctype = part.get_content_type()
if types is not None:
if ctype not in types:
continue
cd = part.get('Content-Disposition', '')
if cd.startswith('attachment'):
continue
# if the mail has our preferred type, we only keep this type
# note that if types != None, has_preferred always stays False
if has_preferred and ctype != preferred:
continue
enc = part.get_content_charset() or 'ascii'
raw_payload = part.get_payload(decode=True)
if ctype == 'text/plain':
raw_payload = string_decode(raw_payload, enc)
body_parts.append(string_sanitize(raw_payload))
else:
# get mime handler
_, entry = settings.mailcap_find_match(ctype, key=field_key)
tempfile_name = None
stdin = None
if entry:
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
with tempfile.NamedTemporaryFile(
delete=False, prefix=prefix, suffix=suffix) \
as tmpfile:
tmpfile.write(raw_payload)
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command
parms = tuple('='.join(p) for p in part.get_params())
# create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s', cmd)
logging.debug('parms: %s', str(parms))
cmdlist = split_commandstring(cmd)
# call handler
rendered_payload, _, _ = helper.call_cmd(cmdlist, stdin=stdin)
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
if rendered_payload: # handler had output
body_parts.append(string_sanitize(rendered_payload))
return u'\n\n'.join(body_parts)
def decode_header(header, normalize=False):
"""
decode a header value to a unicode string
values are usually a mixture of different substrings
encoded in quoted printable using different encodings.
This turns it into a single unicode string
:param header: the header value
:type header: str
:param normalize: replace trailing spaces after newlines
:type normalize: bool
:rtype: unicode
"""
# If the value isn't ascii as RFC2822 prescribes,
# we just return the unicode bytestring as is
value = string_decode(header) # convert to unicode
try:
value = value.encode('ascii')
except UnicodeEncodeError:
return value
# some mailers send out incorrectly escaped headers
# and double quote the escaped realname part again. remove those
# RFC: 2047
regex = r'"(=\?.+?\?.+?\?[^ ?]+\?=)"'
value = re.sub(regex, r'\1', value)
logging.debug("unquoted header: |%s|", value)
# otherwise we interpret RFC2822 encoding escape sequences
valuelist = email.header.decode_header(value)
decoded_list = []
for v, enc in valuelist:
v = string_decode(v, enc)
decoded_list.append(string_sanitize(v))
value = u' '.join(decoded_list)
if normalize:
value = re.sub(r'\n\s+', r' ', value)
return value
def encode_header(key, value):
"""
encodes a unicode string as a valid header value
:param key: the header field this value will be stored in
:type key: str
:param value: the value to be encoded
:type value: unicode
"""
# handle list of "realname <email>" entries separately
if key.lower() in ['from', 'to', 'cc', 'bcc']:
rawentries = value.split(',')
encodedentries = []
for entry in rawentries:
m = re.search(r'\s*(.*)\s+<(.*\@.*\.\w*)>\s*$', entry)
if m: # If a realname part is contained
name, address = m.groups()
# try to encode as ascii, if that fails, revert to utf-8
# name must be a unicode string here
namepart = Header(name)
# append address part encoded as ascii
entry = '%s <%s>' % (namepart.encode(), address)
encodedentries.append(entry)
value = Header(', '.join(encodedentries))
else:
value = Header(value)
return value
def is_subdir_of(subpath, superpath):
# make both absolute
superpath = os.path.realpath(superpath)
subpath = os.path.realpath(subpath)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([subpath, superpath]) == superpath
|
iandees/esri-dump | refs/heads/master | esridump/cli.py | 2 | import argparse
import email.parser
from six.moves import urllib
import logging
import json
import sys
from esridump import EsriDumper
def _collect_headers(strings):
headers = {}
parser = email.parser.Parser()
for string in strings:
headers.update(dict(parser.parsestr(string)))
return headers
def _collect_params(strings):
params = {}
for string in strings:
params.update(dict(urllib.parse.parse_qsl(string)))
return params
def _parse_args(args):
parser = argparse.ArgumentParser(
description="Convert a single Esri feature service URL to GeoJSON")
parser.add_argument("url",
help="Esri layer URL")
parser.add_argument("outfile",
type=argparse.FileType('w'),
help="Output file name (use - for stdout)")
parser.add_argument("--proxy",
help="Proxy string to send requests through ie: https://example.com/proxy.ashx?<SERVER>")
parser.add_argument("--jsonlines",
action='store_true',
default=False,
help="Output newline-delimited GeoJSON Features instead of a FeatureCollection")
parser.add_argument("-v", "--verbose",
action='store_const',
dest='loglevel',
const=logging.DEBUG,
default=logging.INFO,
help="Turn on verbose logging")
parser.add_argument("-q", "--quiet",
action='store_const',
dest='loglevel',
const=logging.WARNING,
default=logging.INFO,
help="Turn off most logging")
parser.add_argument("-f", "--fields",
help="Specify a comma-separated list of fields to request from the server")
parser.add_argument("--no-geometry",
dest='request_geometry',
action='store_false',
default=True,
help="Don't request geometry for the feature so the server returns attributes only")
parser.add_argument("-H", "--header",
action='append',
dest='headers',
default=[],
help="Add an HTTP header to send when requesting from Esri server")
parser.add_argument("-p", "--param",
action='append',
dest='params',
default=[],
help="Add a URL parameter to send when requesting from Esri server")
parser.add_argument("-t", "--timeout",
type=int,
default=30,
help="HTTP timeout in seconds, default 30")
parser.add_argument("--paginate-oid",
dest='paginate_oid',
action='store_true',
default=False,
help="Turn on paginate by OID regardless of normal pagination support")
return parser.parse_args(args)
def main():
args = _parse_args(sys.argv[1:])
headers = _collect_headers(args.headers)
params = _collect_params(args.params)
logger = logging.getLogger('cli')
logger.setLevel(args.loglevel)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
requested_fields = args.fields.split(',') if args.fields else None
dumper = EsriDumper(args.url,
extra_query_args=params,
extra_headers=headers,
fields=requested_fields,
request_geometry=args.request_geometry,
proxy=args.proxy,
timeout=args.timeout,
parent_logger=logger,
paginate_oid=args.paginate_oid)
if args.jsonlines:
for feature in dumper:
args.outfile.write(json.dumps(feature))
args.outfile.write('\n')
else:
args.outfile.write('{"type":"FeatureCollection","features":[\n')
feature_iter = iter(dumper)
try:
feature = next(feature_iter)
while True:
args.outfile.write(json.dumps(feature))
feature = next(feature_iter)
args.outfile.write(',\n')
except StopIteration:
args.outfile.write('\n')
args.outfile.write(']}')
if __name__ == '__main__':
main()
|
dalejung/earthdragon | refs/heads/master | earthdragon/navel/lockable.py | 1 | from earthdragon.typelet import Int
from earthdragon.multidecorator import (
MultiDecorator,
require_self,
only_self,
first
)
from ..feature import Attr, features
class UnexpectedMutationError(Exception):
pass
class Lockable:
_in_flight = Int(default=0)
@first
@only_self
def unlock(self):
self._in_flight += 1
yield
self._in_flight -= 1
__init__ = Attr()
__init__.add_hook(unlock)
@first
@require_self
def _lock_check(self, name, value): # replicate setattr signature
if name in ['_in_flight']:
return
if self._in_flight <= 0:
raise UnexpectedMutationError(name)
yield
__setattr__ = Attr()
__setattr__.add_hook(_lock_check)
mutate = MultiDecorator()
mutate.add_hook(Lockable.unlock)
|
brain-research/mirage-rl-qprop | refs/heads/master | sandbox/rocky/tf/algos/npg.py | 386048 | |
bigdig/vnpy | refs/heads/master | vnpy/trader/constant.py | 2 | """
General constant string used in VN Trader.
"""
from enum import Enum
class Direction(Enum):
"""
Direction of order/trade/position.
"""
LONG = "多"
SHORT = "空"
NET = "净"
class Offset(Enum):
"""
Offset of order/trade.
"""
NONE = ""
OPEN = "开"
CLOSE = "平"
CLOSETODAY = "平今"
CLOSEYESTERDAY = "平昨"
class Status(Enum):
"""
Order status.
"""
SUBMITTING = "提交中"
NOTTRADED = "未成交"
PARTTRADED = "部分成交"
ALLTRADED = "全部成交"
CANCELLED = "已撤销"
REJECTED = "拒单"
class Product(Enum):
"""
Product class.
"""
EQUITY = "股票"
FUTURES = "期货"
OPTION = "期权"
INDEX = "指数"
FOREX = "外汇"
SPOT = "现货"
ETF = "ETF"
BOND = "债券"
WARRANT = "权证"
SPREAD = "价差"
FUND = "基金"
class OrderType(Enum):
"""
Order type.
"""
LIMIT = "限价"
MARKET = "市价"
STOP = "STOP"
FAK = "FAK"
FOK = "FOK"
RFQ = "询价"
class OptionType(Enum):
"""
Option type.
"""
CALL = "看涨期权"
PUT = "看跌期权"
class Exchange(Enum):
"""
Exchange.
"""
# Chinese
CFFEX = "CFFEX" # China Financial Futures Exchange
SHFE = "SHFE" # Shanghai Futures Exchange
CZCE = "CZCE" # Zhengzhou Commodity Exchange
DCE = "DCE" # Dalian Commodity Exchange
INE = "INE" # Shanghai International Energy Exchange
SSE = "SSE" # Shanghai Stock Exchange
SZSE = "SZSE" # Shenzhen Stock Exchange
SGE = "SGE" # Shanghai Gold Exchange
WXE = "WXE" # Wuxi Steel Exchange
CFETS = "CFETS" # China Foreign Exchange Trade System
# Global
SMART = "SMART" # Smart Router for US stocks
NYSE = "NYSE" # New York Stock Exchnage
NASDAQ = "NASDAQ" # Nasdaq Exchange
ARCA = "ARCA" # ARCA Exchange
EDGEA = "EDGEA" # Direct Edge Exchange
ISLAND = "ISLAND" # Nasdaq Island ECN
BATS = "BATS" # Bats Global Markets
IEX = "IEX" # The Investors Exchange
NYMEX = "NYMEX" # New York Mercantile Exchange
COMEX = "COMEX" # COMEX of CME
GLOBEX = "GLOBEX" # Globex of CME
IDEALPRO = "IDEALPRO" # Forex ECN of Interactive Brokers
CME = "CME" # Chicago Mercantile Exchange
ICE = "ICE" # Intercontinental Exchange
SEHK = "SEHK" # Stock Exchange of Hong Kong
HKFE = "HKFE" # Hong Kong Futures Exchange
HKSE = "HKSE" # Hong Kong Stock Exchange
SGX = "SGX" # Singapore Global Exchange
CBOT = "CBT" # Chicago Board of Trade
CBOE = "CBOE" # Chicago Board Options Exchange
CFE = "CFE" # CBOE Futures Exchange
DME = "DME" # Dubai Mercantile Exchange
EUREX = "EUX" # Eurex Exchange
APEX = "APEX" # Asia Pacific Exchange
LME = "LME" # London Metal Exchange
BMD = "BMD" # Bursa Malaysia Derivatives
TOCOM = "TOCOM" # Tokyo Commodity Exchange
EUNX = "EUNX" # Euronext Exchange
KRX = "KRX" # Korean Exchange
OTC = "OTC" # OTC Product (Forex/CFD/Pink Sheet Equity)
IBKRATS = "IBKRATS" # Paper Trading Exchange of IB
# CryptoCurrency
BITMEX = "BITMEX"
OKEX = "OKEX"
HUOBI = "HUOBI"
BITFINEX = "BITFINEX"
BINANCE = "BINANCE"
BYBIT = "BYBIT" # bybit.com
COINBASE = "COINBASE"
DERIBIT = "DERIBIT"
GATEIO = "GATEIO"
BITSTAMP = "BITSTAMP"
# Special Function
LOCAL = "LOCAL" # For local generated data
class Currency(Enum):
"""
Currency.
"""
USD = "USD"
HKD = "HKD"
CNY = "CNY"
class Interval(Enum):
"""
Interval of bar data.
"""
MINUTE = "1m"
HOUR = "1h"
DAILY = "d"
WEEKLY = "w"
TICK = "tick"
|
joshua-cogliati-inl/raven | refs/heads/devel | framework/utils/RAVENiterators.py | 2 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Oct 13, 2015
@author: alfoa
"""
from __future__ import division, print_function, unicode_literals, absolute_import
#External Modules------------------------------------------------------------------------------------
import numpy as np
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
#Internal Modules End--------------------------------------------------------------------------------
class ravenArrayIterator(object):
"""
This class implements a custom RAVEN iterator, that avoids creating the cartesian product for N-Dimensional Grids
"""
def __init__(self, shape = None, arrayIn = None):
"""
Init method.
@ In, shape, tuple or list, (required if 'arrayIn' is not provided), the shape of the N-D array for which an iterator needs to be created
@ In, arrayIn, ndarray or cachedarray, (required if 'shape' is not provided) the array for which the iterator needs to be created
@ Out, None
"""
if shape is None and arrayIn is None:
raise IOError("either shape or arrayIn need to be passed in")
if shape is not None and arrayIn is not None:
raise IOError("both shape and arrayIn are passed in")
self.shape = shape if shape is not None else arrayIn.shape # array shape. tuple(stepsInDim1, stepsInDim2,....stepsInDimN)
self.ndim = len(self.shape) # number of dimension
self.maxCnt = np.prod(self.shape) # maximum number of combinations
self.cnt = 0 # counter used for the iterations
self.finished = False # has the iterator hitted the end?
self.iterator = None # this is the internal iterator object.
# if the variable 'shape' is passed in, the iterator is going to be created internally (no cartesian product needed)
# if the variable 'arrayIn' is passed in, the iterator is going to be associated to the numpy.nditer (it requires the
# cartesian product, since the arrayIn needs to represent a full grid)
if arrayIn is not None:
self.iterator = np.nditer(arrayIn,flags=['multi_index'])
self.multiIndex = self.iterator.multi_index
else:
self.iterator = [0]*self.ndim
self.multiIndex = self.iterator
def iternext(self):
"""
This method checks whether iterations are left, and perform a
single internal iteration without returning the result.
@ In, None
@ Out, self.finished, bool, return if the iteration finished
"""
self.cnt += 1
#if self.cnt != 1:
if type(self.iterator).__name__ == 'list':
if self.cnt >= self.maxCnt:
self.finished = True
else:
for i in range(len(self.iterator)-1, -1, -1):
if self.iterator[i] + 1 >= self.shape[i]:
self.iterator[i] = 0
continue
else:
self.iterator[i]+=1
break
else:
self.iterator.iternext()
self.finished = self.iterator.finished
if not self.finished:
self.multiIndex = self.iterator.multi_index
return self.finished
def reset(self):
"""
This method resets the iterator to its initial status
@ In, None
@ Out, None
"""
self.cnt, self.finished = 0, False
if type(self.iterator).__name__ == 'list':
self.iterator = [0]*self.ndim
else:
self.iterator.reset()
self.multiIndex, self.finished = self.iterator.multi_index, self.iterator.finished
def __iter__(self):
"""
Method that returns the iterator object and is implicitly called at the start of loops.
@ In, None
@ Out, self, iterator object
"""
return self
def next(self):
"""
Method to get the next pointer (value), if finished, raise a StopIteration exception
@ In, None
@ Out, iterator, tuple, the n-d iterator
"""
return self.__next__()
def __next__(self):
"""
See next(self)
@ In, None
@ Out, iterator, tuple, the n-d iterator
"""
self.iternext()
if self.finished:
raise StopIteration
if type(self.iterator).__name__ == 'list':
return self.iterator
else:
return self.iterator.multi_index
|
chokribr/inveniotest | refs/heads/master | modules/miscutil/lib/upgrades/invenio_2013_01_08_new_goto_table.py | 24 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New goto table"
def do_upgrade():
run_sql("""
CREATE TABLE IF NOT EXISTS goto (
label varchar(150) NOT NULL,
plugin varchar(150) NOT NULL,
parameters text NOT NULL,
creation_date datetime NOT NULL,
modification_date datetime NOT NULL,
PRIMARY KEY (label),
KEY (creation_date),
KEY (modification_date)
) ENGINE=MyISAM;
""")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
return 1
|
rosmo/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_profile_http_compression.py | 38 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_http_compression
short_description: Manage HTTP compression profiles on a BIG-IP
description:
- Manage HTTP compression profiles on a BIG-IP.
version_added: 2.7
options:
name:
description:
- Specifies the name of the compression profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(httpcompression) profile.
type: str
description:
description:
- Description of the HTTP compression profile.
type: str
buffer_size:
description:
- Maximum number of compressed bytes that the system buffers before inserting
a Content-Length header (which specifies the compressed size) into the response.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: int
gzip_level:
description:
- Specifies the degree to which the system compresses the content.
- Higher compression levels cause the compression process to be slower.
- Valid values are between 1 (least compression and fastest) to 9 (most
compression and slowest).
type: int
choices:
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
gzip_memory_level:
description:
- Number of kilobytes of memory that the system uses for internal compression
buffers when compressing a server response.
type: int
choices:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
gzip_window_size:
description:
- Number of kilobytes in the window size that the system uses when compressing
a server response.
type: int
choices:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create an HTTP compression profile
bigip_profile_http_compression:
name: profile1
description: Custom HTTP Compression Profile
buffer_size: 131072
gzip_level: 6
gzip_memory_level: 16k
gzip_window_size: 64k
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the resource.
returned: changed
type: str
sample: My custom profile
buffer_size:
description: The new buffer size of the profile.
returned: changed
type: int
sample: 4096
gzip_memory_level:
description: The new GZIP memory level, in KB, of the profile.
returned: changed
type: int
sample: 16
gzip_level:
description: The new GZIP level of the profile. Smaller is less compression.
returned: changed
type: int
sample: 2
gzip_window_size:
description: The new GZIP window size, in KB, of the profile.
returned: changed
type: int
sample: 64
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'bufferSize': 'buffer_size',
'defaultsFrom': 'parent',
'gzipMemoryLevel': 'gzip_memory_level',
'gzipLevel': 'gzip_level',
'gzipWindowSize': 'gzip_window_size',
}
api_attributes = [
'description',
'bufferSize',
'defaultsFrom',
'gzipMemoryLevel',
'gzipLevel',
'gzipWindowSize',
]
returnables = [
'description',
'buffer_size',
'gzip_memory_level',
'gzip_level',
'gzip_window_size',
]
updatables = [
'description',
'buffer_size',
'gzip_memory_level',
'gzip_level',
'gzip_window_size',
]
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] / 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] / 1024
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] * 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] * 1024
class ReportableChanges(Changes):
@property
def gzip_memory_level(self):
if self._values['gzip_memory_level'] is None:
return None
return self._values['gzip_memory_level'] / 1024
@property
def gzip_window_size(self):
if self._values['gzip_window_size'] is None:
return None
return self._values['gzip_window_size'] / 1024
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent profile cannot be changed"
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
raise F5ModuleError(resp.content)
def read_current_from_device(self): # lgtm [py/similar-function]
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http-compression/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
buffer_size=dict(type='int'),
description=dict(),
gzip_level=dict(
type='int',
choices=[1, 2, 3, 4, 5, 6, 7, 8, 9]
),
gzip_memory_level=dict(
type='int',
choices=[1, 2, 4, 8, 16, 32, 64, 128, 256]
),
gzip_window_size=dict(
type='int',
choices=[1, 2, 4, 8, 16, 32, 64, 128]
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
NaN-git/bicreditsnew | refs/heads/master | qa/rpc-tests/receivedby.py | 4 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcreditTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].setgenerate(True, 10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
pytest-dev/pytest | refs/heads/main | src/_pytest/_io/wcwidth.py | 16 | import unicodedata
from functools import lru_cache
@lru_cache(100)
def wcwidth(c: str) -> int:
"""Determine how many columns are needed to display a character in a terminal.
Returns -1 if the character is not printable.
Returns 0, 1 or 2 for other characters.
"""
o = ord(c)
# ASCII fast path.
if 0x20 <= o < 0x07F:
return 1
# Some Cf/Zp/Zl characters which should be zero-width.
if (
o == 0x0000
or 0x200B <= o <= 0x200F
or 0x2028 <= o <= 0x202E
or 0x2060 <= o <= 0x2063
):
return 0
category = unicodedata.category(c)
# Control characters.
if category == "Cc":
return -1
# Combining characters with zero width.
if category in ("Me", "Mn"):
return 0
# Full/Wide east asian characters.
if unicodedata.east_asian_width(c) in ("F", "W"):
return 2
return 1
def wcswidth(s: str) -> int:
"""Determine how many columns are needed to display a string in a terminal.
Returns -1 if the string contains non-printable characters.
"""
width = 0
for c in unicodedata.normalize("NFC", s):
wc = wcwidth(c)
if wc < 0:
return -1
width += wc
return width
|
crosswalk-project/chromium-crosswalk-efl | refs/heads/efl/crosswalk-10/39.0.2171.19 | tools/telemetry/telemetry/core/platform/linux_platform_backend.py | 25 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import sys
from telemetry import decorators
from telemetry.core.platform import linux_based_platform_backend
from telemetry.core.platform import platform_backend
from telemetry.core.platform import posix_platform_backend
from telemetry.core.platform.power_monitor import msr_power_monitor
from telemetry.util import cloud_storage
from telemetry.util import support_binaries
_POSSIBLE_PERFHOST_APPLICATIONS = [
'perfhost_precise',
'perfhost_trusty',
]
class LinuxPlatformBackend(
posix_platform_backend.PosixPlatformBackend,
linux_based_platform_backend.LinuxBasedPlatformBackend):
def __init__(self):
super(LinuxPlatformBackend, self).__init__()
self._power_monitor = msr_power_monitor.MsrPowerMonitor(self)
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetOSName(self):
return 'linux'
@decorators.Cache
def GetOSVersionName(self):
if not os.path.exists('/etc/lsb-release'):
raise NotImplementedError('Unknown Linux OS version')
codename = None
version = None
for line in self.GetFileContents('/etc/lsb-release').splitlines():
key, _, value = line.partition('=')
if key == 'DISTRIB_CODENAME':
codename = value.strip()
elif key == 'DISTRIB_RELEASE':
try:
version = float(value)
except ValueError:
version = 0
if codename and version:
break
return platform_backend.OSVersion(codename, version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def FlushEntireSystemCache(self):
p = subprocess.Popen(['/sbin/sysctl', '-w', 'vm.drop_caches=3'])
p.wait()
assert p.returncode == 0, 'Failed to flush system cache'
def CanLaunchApplication(self, application):
if application == 'ipfw' and not self._IsIpfwKernelModuleInstalled():
return False
return super(LinuxPlatformBackend, self).CanLaunchApplication(application)
def InstallApplication(self, application):
if application == 'ipfw':
self._InstallIpfw()
elif application == 'avconv':
self._InstallBinary(application, fallback_package='libav-tools')
elif application in _POSSIBLE_PERFHOST_APPLICATIONS:
self._InstallBinary(application)
else:
raise NotImplementedError(
'Please teach Telemetry how to install ' + application)
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def ReadMsr(self, msr_number, start=0, length=64):
cmd = ['/usr/sbin/rdmsr', '-d', str(msr_number)]
(out, err) = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if err:
raise OSError(err)
try:
result = int(out)
except ValueError:
raise OSError('Cannot interpret rdmsr output: %s' % out)
return result >> start & ((1 << length) - 1)
def _IsIpfwKernelModuleInstalled(self):
return 'ipfw_mod' in subprocess.Popen(
['lsmod'], stdout=subprocess.PIPE).communicate()[0]
def _InstallIpfw(self):
ipfw_bin = support_binaries.FindPath('ipfw', self.GetOSName())
ipfw_mod = support_binaries.FindPath('ipfw_mod.ko', self.GetOSName())
try:
changed = cloud_storage.GetIfChanged(
ipfw_bin, cloud_storage.INTERNAL_BUCKET)
changed |= cloud_storage.GetIfChanged(
ipfw_mod, cloud_storage.INTERNAL_BUCKET)
except cloud_storage.CloudStorageError, e:
logging.error(str(e))
logging.error('You may proceed by manually building and installing'
'dummynet for your kernel. See: '
'http://info.iet.unipi.it/~luigi/dummynet/')
sys.exit(1)
if changed or not self.CanLaunchApplication('ipfw'):
if not self._IsIpfwKernelModuleInstalled():
subprocess.check_call(['sudo', 'insmod', ipfw_mod])
os.chmod(ipfw_bin, 0755)
subprocess.check_call(['sudo', 'cp', ipfw_bin, '/usr/local/sbin'])
assert self.CanLaunchApplication('ipfw'), 'Failed to install ipfw. ' \
'ipfw provided binaries are not supported for linux kernel < 3.13. ' \
'You may proceed by manually building and installing dummynet for ' \
'your kernel. See: http://info.iet.unipi.it/~luigi/dummynet/'
def _InstallBinary(self, bin_name, fallback_package=None):
bin_path = support_binaries.FindPath(bin_name, self.GetOSName())
if not bin_path:
raise Exception('Could not find the binary package %s' % bin_name)
os.environ['PATH'] += os.pathsep + os.path.dirname(bin_path)
try:
cloud_storage.GetIfChanged(bin_path, cloud_storage.INTERNAL_BUCKET)
os.chmod(bin_path, 0755)
except cloud_storage.CloudStorageError, e:
logging.error(str(e))
if fallback_package:
raise Exception('You may proceed by manually installing %s via:\n'
'sudo apt-get install %s' %
(bin_name, fallback_package))
assert self.CanLaunchApplication(bin_name), 'Failed to install ' + bin_name
|
ity/pants | refs/heads/master | src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_compile.py | 1 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import textwrap
from contextlib import closing
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import Distribution, DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_open
from pants.util.memo import memoized_method, memoized_property
# Well known metadata file required to register scalac plugins with nsc.
_SCALAC_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
# Well known metadata file to register javac plugins.
_JAVAC_PLUGIN_INFO_FILE = 'META-INF/services/com.sun.source.util.Plugin'
# Well known metadata file to register annotation processors with a java 1.6+ compiler.
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
logger = logging.getLogger(__name__)
class BaseZincCompile(JvmCompile):
"""An abstract base class for zinc compilation tasks."""
_ZINC_MAIN = 'org.pantsbuild.zinc.Main'
_supports_concurrent_execution = True
@staticmethod
def _write_scalac_plugin_info(resources_dir, scalac_plugin_target):
scalac_plugin_info_file = os.path.join(resources_dir, _SCALAC_PLUGIN_INFO_FILE)
with safe_open(scalac_plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(scalac_plugin_target.plugin, scalac_plugin_target.classname)).strip())
@staticmethod
def _write_javac_plugin_info(resources_dir, javac_plugin_target):
javac_plugin_info_file = os.path.join(resources_dir, _JAVAC_PLUGIN_INFO_FILE)
with safe_open(javac_plugin_info_file, 'w') as f:
f.write(javac_plugin_target.classname)
@staticmethod
def validate_arguments(log, whitelisted_args, args):
"""Validate that all arguments match whitelisted regexes."""
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(idx):
arg = args[idx]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@staticmethod
def _get_zinc_arguments(settings):
"""Extracts and formats the zinc arguments given in the jvm platform settings.
This is responsible for the symbol substitution which replaces $JAVA_HOME with the path to an
appropriate jvm distribution.
:param settings: The jvm platform settings from which to extract the arguments.
:type settings: :class:`JvmPlatformSettings`
"""
zinc_args = [
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
]
if settings.args:
settings_args = settings.args
if any('$JAVA_HOME' in a for a in settings.args):
try:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=True)
except DistributionLocator.Error:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=False)
logger.debug('Substituting "$JAVA_HOME" with "{}" in jvm-platform args.'
.format(distribution.home))
settings_args = (a.replace('$JAVA_HOME', distribution.home) for a in settings.args)
zinc_args.extend(settings_args)
return zinc_args
@classmethod
def compiler_plugin_types(cls):
"""A tuple of target types which are compiler plugins."""
return (AnnotationProcessor, JavacPlugin, ScalacPlugin)
@classmethod
def get_jvm_options_default(cls, bootstrap_option_values):
return ('-Dfile.encoding=UTF-8', '-Dzinc.analysis.cache.limit=1000',
'-Djava.awt.headless=true', '-Xmx2g')
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-C-encoding', '-CUTF-8', '-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-C-deprecation', '-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path',
'-S-deprecation', '-S-unchecked', '-S-Xlint')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-nowarn', '-C-Xlint:none', '-S-nowarn', '-S-Xlint:none', )
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
return ('-S-Xfatal-warnings', '-C-Werror')
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
return ()
@classmethod
def register_options(cls, register):
super(BaseZincCompile, cls).register_options(register)
# TODO: disable by default because it breaks dependency parsing:
# https://github.com/pantsbuild/pants/issues/2224
# ...also, as of sbt 0.13.9, it is significantly slower for cold builds.
register('--name-hashing', advanced=True, type=bool, fingerprint=True,
help='Use zinc name hashing.')
register('--whitelisted-args', advanced=True, type=dict,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
register('--incremental', advanced=True, type=bool, default=True,
help='When set, zinc will use sub-target incremental compilation, which dramatically '
'improves compile performance while changing large targets. When unset, '
'changed targets will be compiled with an empty output directory, as if after '
'running clean-all.')
# TODO: Defaulting to false due to a few upstream issues for which we haven't pulled down fixes:
# https://github.com/sbt/sbt/pull/2085
# https://github.com/sbt/sbt/pull/2160
register('--incremental-caching', advanced=True, type=bool,
help='When set, the results of incremental compiles will be written to the cache. '
'This is unset by default, because it is generally a good precaution to cache '
'only clean/cold builds.')
cls.register_jvm_tool(register,
'zinc',
classpath=[
# NB: This is explicitly a `_2.10` JarDependency rather than a
# ScalaJarDependency. The latter would pick up the platform in a users'
# repo, whereas this binary is shaded and independent of the target
# platform version.
JarDependency('org.pantsbuild', 'zinc_2.10', '0.0.3')
],
main=cls._ZINC_MAIN,
custom_rules=[
# The compiler-interface and sbt-interface tool jars carry xsbt and
# xsbti interfaces that are used across the shaded tool jar boundary so
# we preserve these root packages wholesale along with the core scala
# APIs.
Shader.exclude_package('scala', recursive=True),
Shader.exclude_package('xsbt', recursive=True),
Shader.exclude_package('xsbti', recursive=True),
])
def sbt_jar(name, **kwargs):
return JarDependency(org='com.typesafe.sbt', name=name, rev='0.13.9', **kwargs)
cls.register_jvm_tool(register,
'compiler-interface',
classpath=[
sbt_jar(name='compiler-interface',
classifier='sources',
# We just want the single compiler-interface jar and not its
# dep on scala-lang
intransitive=True)
])
cls.register_jvm_tool(register,
'sbt-interface',
classpath=[
sbt_jar(name='sbt-interface',
# We just want the single sbt-interface jar and not its dep
# on scala-lang
intransitive=True)
])
@classmethod
def prepare(cls, options, round_manager):
super(BaseZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
@property
def incremental(self):
"""Zinc implements incremental compilation.
Setting this property causes the task infrastructure to clone the previous
results_dir for a target into the new results_dir for a target.
"""
return self.get_options().incremental
@property
def cache_incremental(self):
"""Optionally write the results of incremental compiles to the cache."""
return self.get_options().incremental_caching
def __init__(self, *args, **kwargs):
super(BaseZincCompile, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
try:
# Zinc uses com.sun.tools.javac.Main for in-process java compilation.
# If not present Zinc attempts to spawn an external javac, but we want to keep
# everything in our selected distribution, so we don't allow it to do that.
self._tools_jar = self.dist.find_libs(['tools.jar'])
except Distribution.Error as e:
raise TaskError(e)
# A directory to contain per-target subdirectories with apt processor info files.
self._processor_info_dir = os.path.join(self.workdir, 'apt-processor-info')
# Validate zinc options.
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args,
self._args)
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
return AnalysisTools(self.dist.real_home, ZincAnalysisParser(), ZincAnalysis,
get_buildroot(), self.get_options().pants_workdir)
def zinc_classpath(self):
return self.tool_classpath('zinc') + self._tools_jar
def compiler_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def extra_compile_time_classpath_elements(self):
# Classpath entries necessary for our compiler plugins.
return self.scalac_plugin_jars
def javac_plugin_args(self, exclude):
"""param tuple exclude: names of plugins to exclude, even if requested."""
raise NotImplementedError()
@property
def scalac_plugin_jars(self):
"""The classpath entries for jars containing code for enabled scalac plugins."""
raise NotImplementedError()
@property
def scalac_plugin_args(self):
raise NotImplementedError()
def write_extra_resources(self, compile_context):
"""Override write_extra_resources to produce plugin and annotation processor files."""
target = compile_context.target
if isinstance(target, ScalacPlugin):
self._write_scalac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, JavacPlugin):
self._write_javac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, AnnotationProcessor) and target.processors:
processor_info_file = os.path.join(compile_context.classes_dir, _PROCESSOR_INFO_FILE)
self._write_processor_info(processor_info_file, target.processors)
def _write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('{}\n'.format(processor.strip()))
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings, javac_plugins_to_exclude):
self._verify_zinc_classpath(classpath)
self._verify_zinc_classpath(upstream_analysis.keys())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', analysis_file,
'-classpath', ':'.join(classpath),
'-d', classes_output_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
if not self.get_options().name_hashing:
zinc_args.append('-no-name-hashing')
if log_file:
zinc_args.extend(['-capture-log', log_file])
zinc_args.extend(['-compiler-interface', self.tool_jar('compiler-interface')])
zinc_args.extend(['-sbt-interface', self.tool_jar('sbt-interface')])
zinc_args.extend(['-scala-path', ':'.join(self.compiler_classpath())])
zinc_args.extend(self.javac_plugin_args(javac_plugins_to_exclude))
zinc_args.extend(self.scalac_plugin_args)
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args.extend(args)
zinc_args.extend(self._get_zinc_arguments(settings))
if fatal_warnings:
zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
else:
zinc_args.extend(self.get_options().fatal_warnings_disabled_args)
jvm_options = list(self._jvm_options)
zinc_args.extend(sources)
self.log_zinc_file(analysis_file)
if self.runjava(classpath=self.zinc_classpath(),
main=self._ZINC_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name='zinc',
workunit_labels=[WorkUnitLabel.COMPILER]):
raise TaskError('Zinc compile failed.')
def _verify_zinc_classpath(self, classpath):
def is_outside(path, putative_parent):
return os.path.relpath(path, putative_parent).startswith(os.pardir)
for path in classpath:
if not os.path.isabs(path):
raise TaskError('Classpath entries provided to zinc should be absolute. '
'{} is not.'.format(path))
if is_outside(path, self.get_options().pants_workdir) and is_outside(path, self.dist.home):
raise TaskError('Classpath entries provided to zinc should be in working directory or '
'part of the JDK. {} is not.'.format(path))
if path != os.path.normpath(path):
raise TaskError('Classpath entries provided to zinc should be normalized '
'(i.e. without ".." and "."). {} is not.'.format(path))
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
class ZincCompile(BaseZincCompile):
"""Compile Scala and Java code using Zinc."""
_name = 'zinc'
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from javac plugin name to list of arguments for that plugin.')
register('--scalac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these scalac plugins.')
register('--scalac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from scalac plugin name to list of arguments for that plugin.')
# Scalac plugin jars must already be available at compile time, because they need to be listed
# on the scalac command line. We search for available plugins on the tool classpath provided
# by //:scalac-plugin-jars. Therefore any in-repo plugins must be published, so they can be
# pulled in as a tool.
# TODO: Ability to use built in-repo plugins via their context jars.
cls.register_jvm_tool(register, 'scalac-plugin-jars', classpath=[])
@classmethod
def product_types(cls):
return ['runtime_classpath', 'classes_by_source', 'product_deps_by_src']
def select(self, target):
# Require that targets are marked for JVM compilation, to differentiate from
# targets owned by the scalajs contrib module.
if not isinstance(target, JvmTarget):
return False
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
@memoized_method
def javac_plugin_args(self, exclude):
if not self.get_options().javac_plugins:
return []
exclude = exclude or []
# Allow multiple flags and also comma-separated values in a single flag.
active_plugins = set([p for val in self.get_options().javac_plugins
for p in val.split(',')]).difference(exclude)
ret = []
javac_plugin_args = self.get_options().javac_plugin_args
for name in active_plugins:
# Note: Args are separated by spaces, and there is no way to escape embedded spaces, as
# javac's Main does a simple split on these strings.
plugin_args = javac_plugin_args.get(name, [])
for arg in plugin_args:
if ' ' in arg:
raise TaskError('javac plugin args must not contain spaces '
'(arg {} for plugin {})'.format(arg, name))
ret.append('-C-Xplugin:{} {}'.format(name, ' '.join(plugin_args)))
return ret
@memoized_property
def scalac_plugin_jars(self):
"""The classpath entries for jars containing code for enabled scalac plugins."""
if self.get_options().scalac_plugins:
return self.tool_classpath('scalac-plugin-jars')
else:
return []
@memoized_property
def scalac_plugin_args(self):
if not self.get_options().scalac_plugins:
return []
scalac_plugin_args = self.get_options().scalac_plugin_args
active_plugins = self._find_scalac_plugins()
ret = []
for name, jar in active_plugins.items():
ret.append('-S-Xplugin:{}'.format(jar))
for arg in scalac_plugin_args.get(name, []):
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_scalac_plugins(self):
"""Returns a map from plugin name to plugin jar."""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in self.get_options().scalac_plugins for p in val.split(',')])
plugins = {}
buildroot = get_buildroot()
for jar in self.scalac_plugin_jars:
with open_zip(jar, 'r') as jarfile:
try:
with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
plugin_info = ElementTree.parse(plugin_info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError(
'File {} in {} is not a valid scalac plugin descriptor'.format(
_SCALAC_PLUGIN_INFO_FILE, jar))
name = plugin_info.find('name').text
if name in plugin_names:
if name in plugins:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, plugins[name], jar))
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
plugins[name] = os.path.relpath(jar, buildroot)
except KeyError:
pass
unresolved_plugins = plugin_names - set(plugins.keys())
if unresolved_plugins:
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
return plugins
|
lawnmowerlatte/hyper | refs/heads/development | hyper/ssl_compat.py | 3 | # -*- coding: utf-8 -*-
"""
hyper/ssl_compat
~~~~~~~~~
Shoves pyOpenSSL into an API that looks like the standard Python 3.x ssl
module.
Currently exposes exactly those attributes, classes, and methods that we
actually use in hyper (all method signatures are complete, however). May be
expanded to something more general-purpose in the future.
"""
try:
import StringIO as BytesIO
except ImportError:
from io import BytesIO
import errno
import socket
import time
from OpenSSL import SSL as ossl
from service_identity.pyopenssl import verify_hostname as _verify
CERT_NONE = ossl.VERIFY_NONE
CERT_REQUIRED = ossl.VERIFY_PEER | ossl.VERIFY_FAIL_IF_NO_PEER_CERT
_OPENSSL_ATTRS = dict(
OP_NO_COMPRESSION='OP_NO_COMPRESSION',
PROTOCOL_TLSv1_2='TLSv1_2_METHOD',
PROTOCOL_SSLv23='SSLv23_METHOD',
)
for external, internal in _OPENSSL_ATTRS.items():
value = getattr(ossl, internal, None)
if value:
locals()[external] = value
OP_ALL = 0
# TODO: Find out the names of these other flags.
for bit in [31] + list(range(10)):
OP_ALL |= 1 << bit
HAS_NPN = True
def _proxy(method):
def inner(self, *args, **kwargs):
return getattr(self._conn, method)(*args, **kwargs)
return inner
# Referenced in hyper/http20/connection.py. These values come
# from the python ssl package, and must be defined in this file
# for hyper to work in python versions <2.7.9
SSL_ERROR_WANT_READ = 2
SSL_ERROR_WANT_WRITE = 3
# TODO missing some attributes
class SSLError(OSError):
pass
class CertificateError(SSLError):
pass
def verify_hostname(ssl_sock, server_hostname):
"""
A method nearly compatible with the stdlib's match_hostname.
"""
if isinstance(server_hostname, bytes):
server_hostname = server_hostname.decode('ascii')
return _verify(ssl_sock._conn, server_hostname)
class SSLSocket(object):
SSL_TIMEOUT = 3
SSL_RETRY = .01
def __init__(self, conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname, check_hostname):
self._conn = conn
self._do_handshake_on_connect = do_handshake_on_connect
self._suppress_ragged_eofs = suppress_ragged_eofs
self._check_hostname = check_hostname
if server_side:
self._conn.set_accept_state()
else:
if server_hostname:
self._conn.set_tlsext_host_name(
server_hostname.encode('utf-8')
)
self._server_hostname = server_hostname
# FIXME does this override do_handshake_on_connect=False?
self._conn.set_connect_state()
if self.connected and self._do_handshake_on_connect:
self.do_handshake()
@property
def connected(self):
try:
self._conn.getpeername()
except socket.error as e:
if e.errno != errno.ENOTCONN:
# It's an exception other than the one we expected if we're not
# connected.
raise
return False
return True
# Lovingly stolen from CherryPy
# (http://svn.cherrypy.org/tags/cherrypy-3.2.1/cherrypy/wsgiserver/ssl_pyopenssl.py).
def _safe_ssl_call(self, suppress_ragged_eofs, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping."""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except (ossl.WantReadError, ossl.WantWriteError):
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.SSL_RETRY)
except ossl.Error as e:
if suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
raise socket.error(e.args[0])
if time.time() - start > self.SSL_TIMEOUT:
raise socket.timeout('timed out')
def connect(self, address):
self._conn.connect(address)
if self._do_handshake_on_connect:
self.do_handshake()
def do_handshake(self):
self._safe_ssl_call(False, self._conn.do_handshake)
if self._check_hostname:
verify_hostname(self, self._server_hostname)
def recv(self, bufsize, flags=None):
return self._safe_ssl_call(
self._suppress_ragged_eofs,
self._conn.recv,
bufsize,
flags
)
def recv_into(self, buffer, bufsize=None, flags=None):
# A temporary recv_into implementation. Should be replaced when
# PyOpenSSL has merged pyca/pyopenssl#121.
if bufsize is None:
bufsize = len(buffer)
data = self.recv(bufsize, flags)
data_len = len(data)
buffer[0:data_len] = data
return data_len
def send(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.send, data, flags)
def sendall(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.sendall, data, flags)
def selected_npn_protocol(self):
proto = self._conn.get_next_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def selected_alpn_protocol(self):
proto = self._conn.get_alpn_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def getpeercert(self):
def resolve_alias(alias):
return dict(
C='countryName',
ST='stateOrProvinceName',
L='localityName',
O='organizationName',
OU='organizationalUnitName',
CN='commonName',
).get(alias, alias)
def to_components(name):
# TODO Verify that these are actually *supposed* to all be
# single-element tuples, and that's not just a quirk of the
# examples I've seen.
return tuple(
[
(resolve_alias(k.decode('utf-8'), v.decode('utf-8')),)
for k, v in name.get_components()
]
)
# The standard getpeercert() takes the nice X509 object tree returned
# by OpenSSL and turns it into a dict according to some format it seems
# to have made up on the spot. Here, we do our best to emulate that.
cert = self._conn.get_peer_certificate()
result = dict(
issuer=to_components(cert.get_issuer()),
subject=to_components(cert.get_subject()),
version=cert.get_subject(),
serialNumber=cert.get_serial_number(),
notBefore=cert.get_notBefore(),
notAfter=cert.get_notAfter(),
)
# TODO extensions, including subjectAltName
# (see _decode_certificate in _ssl.c)
return result
# a dash of magic to reduce boilerplate
methods = ['accept', 'bind', 'close', 'getsockname', 'listen', 'fileno']
for method in methods:
locals()[method] = _proxy(method)
class SSLContext(object):
def __init__(self, protocol):
self.protocol = protocol
self._ctx = ossl.Context(protocol)
self.options = OP_ALL
self.check_hostname = False
self.npn_protos = []
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return self._ctx.get_verify_mode()
@verify_mode.setter
def verify_mode(self, value):
# TODO verify exception is raised on failure
self._ctx.set_verify(
value, lambda conn, cert, errnum, errdepth, ok: ok
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# TODO factor out common code
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(
lambda max_length, prompt_twice, userdata: password
)
self._ctx.use_privatekey_file(keyfile or certfile)
def set_npn_protocols(self, protocols):
self.protocols = list(map(lambda x: x.encode('ascii'), protocols))
def cb(conn, protos):
# Detect the overlapping set of protocols.
overlap = set(protos) & set(self.protocols)
# Select the option that comes last in the list in the overlap.
for p in self.protocols:
if p in overlap:
return p
else:
return b''
self._ctx.set_npn_select_callback(cb)
def set_alpn_protocols(self, protocols):
protocols = list(map(lambda x: x.encode('ascii'), protocols))
self._ctx.set_alpn_protos(protocols)
def wrap_socket(self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
conn = ossl.Connection(self._ctx, sock)
return SSLSocket(conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname,
# TODO what if this is changed after the fact?
self.check_hostname)
|
lukaszkostrzewa/wiki | refs/heads/master | maintenance/cssjanus/csslex.py | 172 | #!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""CSS Lexical Grammar rules.
CSS lexical grammar from http://www.w3.org/TR/CSS21/grammar.html
"""
__author__ = ['elsigh@google.com (Lindsey Simon)',
'msamuel@google.com (Mike Samuel)']
# public symbols
__all__ = [ "NEWLINE", "HEX", "NON_ASCII", "UNICODE", "ESCAPE", "NMSTART", "NMCHAR", "STRING1", "STRING2", "IDENT", "NAME", "HASH", "NUM", "STRING", "URL", "SPACE", "WHITESPACE", "COMMENT", "QUANTITY", "PUNC" ]
# The comments below are mostly copied verbatim from the grammar.
# "@import" {return IMPORT_SYM;}
# "@page" {return PAGE_SYM;}
# "@media" {return MEDIA_SYM;}
# "@charset" {return CHARSET_SYM;}
KEYWORD = r'(?:\@(?:import|page|media|charset))'
# nl \n|\r\n|\r|\f ; a newline
NEWLINE = r'\n|\r\n|\r|\f'
# h [0-9a-f] ; a hexadecimal digit
HEX = r'[0-9a-f]'
# nonascii [\200-\377]
NON_ASCII = r'[\200-\377]'
# unicode \\{h}{1,6}(\r\n|[ \t\r\n\f])?
UNICODE = r'(?:(?:\\' + HEX + r'{1,6})(?:\r\n|[ \t\r\n\f])?)'
# escape {unicode}|\\[^\r\n\f0-9a-f]
ESCAPE = r'(?:' + UNICODE + r'|\\[^\r\n\f0-9a-f])'
# nmstart [_a-z]|{nonascii}|{escape}
NMSTART = r'(?:[_a-z]|' + NON_ASCII + r'|' + ESCAPE + r')'
# nmchar [_a-z0-9-]|{nonascii}|{escape}
NMCHAR = r'(?:[_a-z0-9-]|' + NON_ASCII + r'|' + ESCAPE + r')'
# ident -?{nmstart}{nmchar}*
IDENT = r'-?' + NMSTART + NMCHAR + '*'
# name {nmchar}+
NAME = NMCHAR + r'+'
# hash
HASH = r'#' + NAME
# string1 \"([^\n\r\f\\"]|\\{nl}|{escape})*\" ; "string"
STRING1 = r'"(?:[^\"\\]|\\.)*"'
# string2 \'([^\n\r\f\\']|\\{nl}|{escape})*\' ; 'string'
STRING2 = r"'(?:[^\'\\]|\\.)*'"
# string {string1}|{string2}
STRING = '(?:' + STRING1 + r'|' + STRING2 + ')'
# num [0-9]+|[0-9]*"."[0-9]+
NUM = r'(?:[0-9]*\.[0-9]+|[0-9]+)'
# s [ \t\r\n\f]
SPACE = r'[ \t\r\n\f]'
# w {s}*
WHITESPACE = '(?:' + SPACE + r'*)'
# url special chars
URL_SPECIAL_CHARS = r'[!#$%&*-~]'
# url chars ({url_special_chars}|{nonascii}|{escape})*
URL_CHARS = r'(?:%s|%s|%s)*' % (URL_SPECIAL_CHARS, NON_ASCII, ESCAPE)
# url
URL = r'url\(%s(%s|%s)%s\)' % (WHITESPACE, STRING, URL_CHARS, WHITESPACE)
# comments
# see http://www.w3.org/TR/CSS21/grammar.html
COMMENT = r'/\*[^*]*\*+([^/*][^*]*\*+)*/'
# {E}{M} {return EMS;}
# {E}{X} {return EXS;}
# {P}{X} {return LENGTH;}
# {C}{M} {return LENGTH;}
# {M}{M} {return LENGTH;}
# {I}{N} {return LENGTH;}
# {P}{T} {return LENGTH;}
# {P}{C} {return LENGTH;}
# {D}{E}{G} {return ANGLE;}
# {R}{A}{D} {return ANGLE;}
# {G}{R}{A}{D} {return ANGLE;}
# {M}{S} {return TIME;}
# {S} {return TIME;}
# {H}{Z} {return FREQ;}
# {K}{H}{Z} {return FREQ;}
# % {return PERCENTAGE;}
UNIT = r'(?:em|ex|px|cm|mm|in|pt|pc|deg|rad|grad|ms|s|hz|khz|%)'
# {num}{UNIT|IDENT} {return NUMBER;}
QUANTITY = '%s(?:%s%s|%s)?' % (NUM, WHITESPACE, UNIT, IDENT)
# "<!--" {return CDO;}
# "-->" {return CDC;}
# "~=" {return INCLUDES;}
# "|=" {return DASHMATCH;}
# {w}"{" {return LBRACE;}
# {w}"+" {return PLUS;}
# {w}">" {return GREATER;}
# {w}"," {return COMMA;}
PUNC = r'<!--|-->|~=|\|=|[\{\+>,:;]'
|
felipenaselva/repo.felipe | refs/heads/master | plugin.program.addoninstaller/defaultORIG.py | 8 | # TVADDONS.ag / TVADDONS.ag - Addon Installer - Module By: Blazetamer (2013-2014)
# Thanks to Blazetamer, Eleazar Coding, Showgun, ....
siteTitle="TVADDONS.AG"; #siteTitle="XBMCHUB.COM";
addon_id='plugin.program.addoninstaller'; import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,os,sys,time,shutil,downloader,extract
base_url2='http://addons.tvaddons.ag'; #'http://addons.xbmchub.com'
tribeca_url2='http://tribeca.tvaddons.ag/tools/'; #tribeca_url2='http://tribeca.xbmchub.com/tools/';
tribeca_url=tribeca_url2+'installer/sources/'; base_url=base_url2+'/';
try: from addon.common.addon import Addon
except:
try: from t0mm0.common.addon import Addon
except: from t0mm0_common_addon import Addon
addon=Addon(addon_id,sys.argv)
try: from addon.common.net import Net
except:
try: from t0mm0.common.net import Net
except: from t0mm0_common_net import Net
net=Net(); settings=xbmcaddon.Addon(id=addon_id); ADDON=xbmcaddon.Addon(id=addon_id);
artPath=xbmc.translatePath(os.path.join('special://home','addons',addon_id,'resources','art2/'));
def getArtwork(n): return xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art2',n))
def getArtworkJ(n): return xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art2',n+'.jpg'))
def catArtwork(n): return 'http://addons.tvaddons.ag/images/categories/%s.png'%n
mainPath=xbmc.translatePath(os.path.join('special://home','addons',addon_id));
fanart=xbmc.translatePath(os.path.join(mainPath,'fanart.jpg')); #fanart=artPath+'fanart.jpg'; #fanart=xbmc.translatePath(os.path.join('special://home','addons',addon_id+'/'))+'fanart.jpg'; #fanart=getArtworkJ('fanart')
iconart=xbmc.translatePath(os.path.join(mainPath,'icon.png')); #print ['fanart',fanart,'iconart',iconart];
TxtAddonUpdater='Addon Updater'; ImgAddonUpdater=getArtworkJ('autoupdater');
#****************************************************************
def MAININDEX():
hubpath=xbmc.translatePath(os.path.join('special://home','addons','repository.xbmchub'))
hubnotespath=xbmc.translatePath(os.path.join('special://home','addons','plugin.program.xbmchub.notifications'))
try:
if not os.path.exists(hubpath): HUBINSTALL('TVADDONS.AG.Repository','http://offshoregit.com/xbmchub/xbmc-hub-repo/raw/master/repository.xbmchub/repository.xbmchub-1.0.6.zip','','addon','none')
if not os.path.exists(hubnotespath): HUBINSTALL('TVADDONS.AG.Notifications','http://offshoregit.com/xbmchub/xbmc-hub-repo/raw/master/plugin.program.xbmchub.notifications/plugin.program.xbmchub.notifications-1.0.2.zip','','addon','none')
except: pass
addDir('Search by: Addon/Author',base_url+'search/?keyword=','searchaddon',getArtworkJ('Search')) #catArtwork('webinterface')) #
#if settings.getSetting('newest')=='true': addDir('Newest Addons',base_url,'innertabs',getArtworkJ('NewestAddons'))
#if settings.getSetting('updated')=='true': addDir('Recently Updated',base_url,'innertabs',getArtworkJ('RecentlyUpdated'))
#if settings.getSetting('toprepositories')=='true': addDir('Top Developers',base_url,'toprepolist',getArtworkJ('TopDevs'))
if settings.getSetting('featured')=='true': addDir('Featured Addons',base_url+'category/featured/','addonlist',getArtworkJ('Featuredaddons')) #catArtwork('featured')) #
if settings.getSetting('video')=='true': addDir('Video Addons',base_url+'category/video/','addonlist',getArtworkJ('VideoAddons')) #catArtwork('video')) #
if settings.getSetting('audio')=='true': addDir('Audio Addons',base_url+'category/audio/','addonlist',getArtworkJ('AudioAddons')) #catArtwork('audio')) #
#if settings.getSetting('picture')=='true': addDir('Picture Addons',base_url+'category/pictures/','addonlist',getArtworkJ('PictureAddons')) #catArtwork('pictures')) #
if settings.getSetting('program')=='true': addDir('Program Addons',base_url+'category/programs/','addonlist',getArtworkJ('ProgramAddons')) #catArtwork('programs')) #
if settings.getSetting('services')=='true': addDir('Service Addons',base_url+'category/services/','addonlist',getArtworkJ('ServiceAddons')) #catArtwork('services')) #
if settings.getSetting('repositories')=='true': addDir('Repositories',base_url+'category/repositories/','addonlist',getArtworkJ('Repositories')) #catArtwork('repositories')) #
#if settings.getSetting('world')=='true': addDir('World Section',tribeca_url+'world.php','worldlist',getArtworkJ('WorldSection')) #catArtwork('metadata')) #
if settings.getSetting('world')=='true': addDir('World Section',base_url+'category/international/repositories','interlist',getArtworkJ('WorldSection')) #catArtwork('video')) #
if settings.getSetting('adult')=='true': addDir('Adult Addons',tribeca_url+'xxx.php','adultlist',getArtworkJ('AdultAddons')) #catArtwork('pictures')) #
ForPrimeWire();
#addDir(TxtAddonUpdater,base_url+'category/featured/','autoupdate',ImgAddonUpdater);
##addDir(TxtAddonUpdater,'...','autoupdate2',ImgAddonUpdater);
addDir('Installer Settings','none','settings',getArtworkJ('InstallerSettings')); #catArtwork('programs')) #
AUTO_VIEW('addons')
#****************************************************************
def INTERNATIONAL(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url); match=GetListItems(link); CMi=[]; #AUTO_VIEW('list');
#CMi.append(['Information',"XBMC.Action(Info)"]);
if 'repository' in url: ToMode='interrepolist'
else: ToMode='addonlist'
for url,image,name, in match: iconimage=base_url+image; add2HELPDir(name,url,ToMode,iconimage,fanart,'','addon',CMi,True)
nmatch=GetListNextPage(link);
if len(nmatch) > 0: addDir('Next Page',(nmatch[0]),'interrepolist',getArtworkJ('NextPage'))
AUTO_VIEW('list')
return
def nolines(t): return t.replace('\r','').replace('\n','').replace('\t','').replace('\a','')
def ForPrimeWire():
html=nolines(OPEN_URL(tribeca_url2+'wizard/links.txt')); #print html
if ("1CHANNEL" in html.upper()):
match=re.compile('name="(1CHANNEL.*?)"\s*url="(.+?)"\s*img="(.+?)"\s*fanart="(.+?)"', re.IGNORECASE).findall(html)[0]; #print match
if len(match) > 0: (name2,url2,img2,fanart2)=match; addDir(name2,url2,'WizardTypeInstaller',img2);
def WizardTypeInstaller(name,url): MyAddonInstaller(name,url,xbmc.translatePath(os.path.join('special://','home')))
def AddonTypeInstaller(name,url): MyAddonInstaller(name,url,xbmc.translatePath(os.path.join('special://','home','addons')))
def MyAddonInstaller(name,url,ToPath):
if len(ToPath)==0: return
path=xbmc.translatePath(os.path.join('special://home','addons','packages'))
dp=xbmcgui.DialogProgress(); dp.create("Addon Installer","Downloading ",'','Please Wait')
lib=os.path.join(path,name+'.zip')
try: os.remove(lib)
except: pass
url=FireDrive(url)
if '[error]' in url: print url; dialog=xbmcgui.Dialog(); dialog.ok("Error!",url); return
else: print url
downloader.download(url,lib,dp)
addonfolder=ToPath
time.sleep(2)
dp.update(0,"","Extracting Zip Please Wait")
print '======================================='; print addonfolder; print '======================================='
extract.all(lib,addonfolder,dp)
time.sleep(2)
xbmc.executebuiltin("XBMC.UpdateLocalAddons()");
dialog=xbmcgui.Dialog(); dialog.ok("Addon Instaler",name+" has been installed","","")
##
#****************************************************************
def AutoUpdate(url): #Featured Addons
print url; link=nolines(OPEN_URL(url))
if "/featured-addons.php" in url:
match=re.compile('name="(.+?)"url="(.+?)"').findall(link)
for name,url2 in match:
itemID=url2[0:-1].split('/')[-1]; print 'checking for addon: '+itemID;
path=xbmc.translatePath(os.path.join('special://home/addons',itemID));
AddonDotXml=xbmc.translatePath(os.path.join('special://home/addons',itemID,'addon.xml'));
if (os.path.exists(path)==True) and (os.path.isfile(AddonDotXml)==True): print 'path and addon.xml found for: '+itemID; AutoUpdate_ADDONINDEX(name,url2,'addon',itemID);
#add2HELPDir(name,url,'addonindex',fanart,fanart,'','addon')
elif ("/featured-repos.php" in url) or ("/xxx.php" in url):
match=re.compile("'name' => '(.+?)'.+?downloadUrl' => '(.+?)'").findall(link)
for name,url2 in match:
lang='Featured'; name=name.replace('’',"'"); name=name.capitalize();
itemID=url2[0:-1].split('/')[-1]; print 'checking for addon: '+itemID;
path=xbmc.translatePath(os.path.join('special://home/addons',itemID));
AddonDotXml=xbmc.translatePath(os.path.join('special://home/addons',itemID,'addon.xml'));
if (os.path.exists(path)==True) and (os.path.isfile(AddonDotXml)==True): print 'path and addon.xml found for: '+itemID; AutoUpdate_ADDONINDEX(name,url2,'addon',itemID);
elif ("/category/programs/" in url) or ("/category/video/" in url) or ("/category/audio/" in url) or ("/category/" in url):
#match=re.compile('<li><a href="(.+?)"><span class="thumbnail"><img src="(.+?)" width="100%" alt="(.+?)"').findall(link)
match=re.compile('<li><a href="(.+?)"><span class="thumbnail"><img src="(.+?)" (?:width="100%" |class="pic" )?alt="(.+?)"').findall(link)
for url2,image,name, in match:
iconimage=base_url+image;
itemID=url2[0:-1].split('/')[-1]; print 'checking for addon: '+itemID;
path=xbmc.translatePath(os.path.join('special://home/addons',itemID));
AddonDotXml=xbmc.translatePath(os.path.join('special://home/addons',itemID,'addon.xml'));
if (os.path.exists(path)==True) and (os.path.isfile(AddonDotXml)==True): print 'path and addon.xml found for: '+itemID; AutoUpdate_ADDONINDEX(name,url2,'addon',itemID);
else: print "url type mismatch in attempt to catch the right items match regex string."; return
def AutoUpdate_ADDONINDEX(name,url,filetype,itemID):
description='No Description available'; print [name,url,filetype,itemID];
path=xbmc.translatePath(os.path.join('special://home/addons',itemID))
AddonDotXml=xbmc.translatePath(os.path.join('special://home/addons',itemID,'addon.xml'))
LocalAddonDotXml=nolines(File_Open(AddonDotXml));
LocalVersion=(re.compile('version=["\']([0-9a-zA-Z\.\-]+)["\']\s*').findall(LocalAddonDotXml.split('<addon')[1])[0]).strip(); print "LocalVersion: "+LocalVersion;
try: link=OPEN_URL(url);
except: print "failed to load url: "+url; return
itemDirectDownload=re.compile('Direct Download:</strong><br /><a href="(.+?)"').findall(link)[0]
itemAddonVersion=(re.compile('Version:</strong>(.+?)<br').findall(link)[0]).strip()
print "RemoteVersion: "+itemAddonVersion;
itemRepository=re.compile('Repository:</strong> <a href="(.+?)"').findall(link)[0]
itemImage=base_url+(re.compile('<img src="(.+?)" alt=".+?" class="pic" /></span>').findall(link)[0])
itemAuthor=re.compile('Author:</strong> <a href=".+?">(.+?)</a>').findall(link)[0]
itemAddonName=re.compile('class="pic" /></span>\r\n\t\t\t\t<h2>(.+?)</h2>').findall(link)[0]
## ### ##
##DO SOMETHING HERE##
#if not LocalVersion==itemAddonVersion:
cV=compareVersions(LocalVersion,itemAddonVersion); print cV;
if cV=='do_upgrade':
try:
ADDONINSTALL(itemAddonName,itemDirectDownload,description,filetype,itemRepository,True,itemAddonVersion,LocalVersion)
addHELPDir('AutoUpdated: '+itemAddonName+' - v'+itemAddonVersion,itemDirectDownload,'addoninstall',itemImage,fanart,description,'addon',itemRepository,itemAddonVersion,itemAuthor)
except: print "error while trying to install: "+itemAddonName; return
## ### ##
def compareVersions(LocalV,RemoteV):
if LocalV==RemoteV: return 'are_equal'
if ('.' in LocalV) and ('.' in RemoteV):
try: dotL=LocalV.split('.'); dotR=RemoteV.split('.');
except: return 'do_upgrade'
try:
for i in [0,1,2,3]:
if dotL[i] > dotR[i]: return 'local_greater_than_remote'
except: return 'do_upgrade'
return 'do_upgrade'
#****************************************************************
def GetListItems(link):
try: return re.compile('<li><a href="(.+?)"><span class="thumbnail"><img src="(.+?)" (?:width="100%" |class="pic" )?alt="(.+?)"').findall(link)
except: return []
def GetListNextPage(link):
try: return re.compile('"page last" href="(.+?)"><dfn title="next Page">').findall(link)
except: return []
def List_Addons_Inner_Tabs(Name,url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url); print 'url: '+url; print 'length of html: '+str(len(link));
if 'newest' in Name.lower(): link=link.split('<div class="tabs-inner" id="newest">' )[-1].split('</div>')[0]
elif 'updated' in Name.lower(): link=link.split('<div class="tabs-inner" id="updated">')[-1].split('</div>')[0]
match=re.compile("<li><a href='(.+?)'><img src='(.+?)' width='60' height='60' alt='(.+?)' class='pic alignleft' /><b>\s*(.+?)\s*</b></a><span class='date'>\s*(\d\d\d\d-\d\d-\d\d)\s*</span></li").findall(link)
for url,image,name,name2,released in match: iconimage=base_url+image; add2HELPDir('[COLOR FF0077D7]%s [COLOR FFFFFFFF][[COLOR FFFFFFFF]%s[/COLOR]][/COLOR][/COLOR]'%(name,released),url,'addonindex',iconimage,fanart,'','addon')
AUTO_VIEW('list')
def List_Inter_Addons(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url); match=GetListItems(link); CMi=[]; #AUTO_VIEW('list');
#CMi.append(['Information',"XBMC.Action(Info)"]);
if '/category/repositories/' in url: ToMode='addonlist'
else: ToMode='addonindex'
for url,image,name, in match: iconimage=base_url+image; add2HELPDir(name,url,ToMode,iconimage,fanart,'','addon',CMi,True)
nmatch=GetListNextPage(link);
if len(nmatch) > 0: addDir('Next Page',(nmatch[0]),'addonlist',getArtworkJ('NextPage'))
AUTO_VIEW('list')
def List_Addons(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url); match=GetListItems(link); CMi=[]; #AUTO_VIEW('list');
#CMi.append(['Information',"XBMC.Action(Info)"]);
if '/category/repositories/' in url: ToMode='addonlist'
else: ToMode='addonindex'
for url,image,name, in match: iconimage=base_url+image; add2HELPDir(name,url,ToMode,iconimage,fanart,'','addon',CMi,True)
nmatch=GetListNextPage(link);
if len(nmatch) > 0: addDir('Next Page',(nmatch[0]),'addonlist',getArtworkJ('NextPage'))
AUTO_VIEW('addons')
#AUTO_VIEW('list')
def List_Repo_Top_Developers(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url); print 'url: '+url; #print link; #return
try: link=link.split('<span class="sidebar-widget-header"><h3 class="sidebar-widget-header">Top developers</h3></span>')[-1]; link=link.split('</ul>')[0];
except: pass
match=re.compile("<li><img src='(.+?)' height='20' width='20' alt='(.+?)' /><a href='(.+?)' title='Show all addons from this author'>\s*(.+?)\s+\((\d+)\s+uploads\)</a></li").findall(link.replace('</li>','</li\n\r\a>')); #print match
for (image,rank,url,name,uploads) in match: iconimage=base_url+image; add2HELPDir("[COLOR FF0077D7]%s [COLOR FFFFFFFF][[COLOR FFFFFFFF]%s[/COLOR]][/COLOR][/COLOR]"%(name,uploads),url,'addonlist',iconimage,fanart,'','addon')
AUTO_VIEW('list')
#****************************************************************
def List_Tribeca_WorldAlone(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url).replace('\r','').replace('\n','').replace('\t','')
match=re.compile("'name' => '(.+?)','language' => '(.+?)'.+?downloadUrl' => '(.+?)'").findall(link)
if len(match)==0: return
for name,lang,dload in match: lang=lang.capitalize(); addHELPDir(name+' ('+lang+')',dload,'addoninstall','',fanart,'','addon','none','','')
#AUTO_VIEW('list',50)
def List_Tribeca_WorldList(url):
if not '://' in url: url=base_url2+url
link=OPEN_URL(url).replace('\r','').replace('\n','').replace('\t','')
match=re.compile("'name' => '(.+?)','language' => '(.+?)'.+?downloadUrl' => '(.+?)'").findall(link)
AUTO_VIEW('list')
if len(match)==0: return
for name,lang,dload in match: lang=lang.capitalize(); addHELPDir(name+' ('+lang+')',dload,'addoninstall','',fanart,'','addon','none','','')
List_Tribeca_WorldAlone(tribeca_url+'world-solo.php')
AUTO_VIEW('list',50)
def List_Tribeca_Adult(url):
link=OPEN_URL(url).replace('\r','').replace('\n','').replace('\t','');
match=re.compile("'name' => '(.+?)'.+?downloadUrl' => '(.+?)'").findall(link);
if len(match)==0: return
for name,dload in match: lang='Adults Only'; addHELPDir(name+' ('+lang+')',dload,'addoninstall','',fanart,'','addon','none','','')
AUTO_VIEW('list',50)
#****************************************************************
def ADDONINDEX(name,url,filetype):
link=OPEN_URL(url); description='Description not available at this time'; CMi=[];
try: ifanart=re.compile('<div id="featured-image">\s*<img width="\d+" height="\d+" src="(cache/images/[0-9A-Za-z]+_fanart.jpg)" class=".*?" alt="" />\s*</div>').findall(link)[0]
except: ifanart=fanart
try: iconimage=re.compile('<span class="thumbnail"><img src="(.+?)" alt=".+?" class="pic" /></span>').findall(link)[0]
except: iconimage=''
if (not '://' in ifanart) and (not artPath in ifanart) and (not mainPath in ifanart): ifanart=base_url+ifanart
if not '://' in iconimage: iconimage=base_url+iconimage
print ['ifanart',ifanart,'iconimage',iconimage];
name=re.compile('class="pic" /></span>[\r\a\n\t]*\s*<h2>(.+?)</h2>').findall(link)[0]; print ['name',name];
repourl=re.compile('Repository:</strong> <a href="(.+?)"').findall(link)[0]; repourl=repourl.replace('https://github','http://github'); print ['repourl',repourl];
try: description=re.compile('Description:</h4><p>\s*(.+?)\s*</p>').findall(link.replace('\n','').replace('\t',''))[0]; print ['description',description];
except: description='No Description available'
addonurl=re.compile('Download:</strong><br /><a href="(.+?)"').findall(link)[0]; print ['addonurl',addonurl];
(aurthorUrl,author)=re.compile('Author:</strong> <a href="(.+?)">\s*(.+?)\s*</a>').findall(link)[0]; print ['author',author,'aurthorUrl',aurthorUrl];
version=re.compile('Version:</strong>\s*(.+?)\s*<br').findall(link)[0]; print ['version',version];
releaseddate=re.compile('>Released:</strong>\s*(.+?)\s*<').findall(link)[0]; print ['version',version];
try: forumUrl=re.compile('>Forum:</strong><br /><a href="(.+?)"').findall(link)[0]; print ['version',version];
except: forumUrl=''
#CMi.append(['Information',"XBMC.Action(Info)"])
CMi.append(['Check Others by %s'%author,"XBMC.Container.Update(plugin://%s/?mode=addonlist&url=%s)"%(addon_id,aurthorUrl)])
#CMi.append(['*Check Others by %s'%author,"XBMC.Container.Update(%s)"%(addon.build_plugin_url({'mode':'addonlist','url':aurthorUrl}))])
CMi.append(['Visit Page',"XBMC.Container.Update(plugin://%s/?mode=BrowseUrl&url=%s)"%(addon_id,urllib.quote_plus(url))])
if len(forumUrl) > 0: CMi.append(['Visit Forum',"XBMC.Container.Update(plugin://%s/?mode=BrowseUrl&url=%s)"%(addon_id,urllib.quote_plus(forumUrl))])
#print CMi
addHELPDir('[COLOR FF0077D7]Install [/COLOR][COLOR FFFFFFFF]%s[/COLOR][COLOR FF0077D7] (v%s) [%s][/COLOR] [COLOR FF0077D7][I]by[/I][/COLOR] [COLOR FFFFFFFF]%s[/COLOR]'%(name,version,releaseddate,author),addonurl,'addoninstall',iconimage,ifanart,description,'addon',repourl,version,author,CMi,True);
AUTO_VIEW('addons')
#****************************************************************
def Note(header="",message="",sleep=5000): xbmc.executebuiltin("XBMC.Notification(%s,%s,%i)" % (header,message,sleep))
def File_Save(path,data): file=open(path,'w'); file.write(data); file.close()
def File_Open(path):
if os.path.isfile(path): file=open(path, 'r'); contents=file.read(); file.close(); return contents ## File found.
else: return '' ## File not found.
def OPEN_URL(url):
try: req=urllib2.Request(url); req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'); response=urllib2.urlopen(req); link=response.read(); response.close(); return link
except: return ""
def FireDrive(url):
if ('http://m.firedrive.com/file/' not in url) and ('https://m.firedrive.com/file/' not in url) and ('http://www.firedrive.com/file/' not in url) and ('http://firedrive.com/file/' not in url) and ('https://www.firedrive.com/file/' not in url) and ('https://firedrive.com/file/' not in url): return url ## contain with current url if not a filedrive url.
#else:
try:
if 'https://' in url: url=url.replace('https://','http://')
html=net.http_GET(url).content; #print html;
if ">This file doesn't exist, or has been removed.<" in html: return "[error] This file doesn't exist, or has been removed."
elif ">File Does Not Exist | Firedrive<" in html: return "[error] File Does Not Exist."
elif "404: This file might have been moved, replaced or deleted.<" in html: return "[error] 404: This file might have been moved, replaced or deleted."
data={}; r=re.findall(r'<input\s+type="\D+"\s+name="(.+?)"\s+value="(.+?)"\s*/>',html);
for name,value in r: data[name]=value
#print data;
if len(data)==0: return '[error] input data not found.'
html=net.http_POST(url,data,headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0','Referer': url,'Host': 'www.firedrive.com'}).content
r=re.search('<a\s+href="(.+?)"\s+target="_blank"\s+id=\'top_external_download\'\s+title=\'Download This File\'\s*>',html)
if r: print urllib.unquote_plus(r.group(1)); return urllib.unquote_plus(r.group(1))
else: return url+'#[error]'
except: return url+'#[error]'
#****************************************************************
def HUBINSTALL(name,url,description,filetype,repourl):
try: url=FireDrive(url)
except: print "error in FireDrive() function."
path=xbmc.translatePath(os.path.join('special://home','addons','packages')); dp=xbmcgui.DialogProgress();
dp.create("First Launch:","Creating Database ",'','Only Shown on First Launch');
lib=os.path.join(path,name+'.zip')
try: os.remove(lib)
except: pass
downloader.download(url,lib,dp)
if filetype=='addon': addonfolder=xbmc.translatePath(os.path.join('special://','home','addons'))
time.sleep(2)
#dp.update(0,"","Installing selections.....")
print '======================================='; print addonfolder; print '=======================================';
extract.all(lib,addonfolder,'')
#****************************************************************
def DEPENDINSTALL(name,url,description,filetype,repourl):
#Split Script Depends============================
files=url.split('/'); dependname=files[-1:]; dependname=str(dependname);
dependname=dependname.replace('[','').replace(']','').replace('"','').replace('[','').replace("'",'').replace(".zip",'');
#StoprSplit======================================
path=xbmc.translatePath(os.path.join('special://home','addons','packages')); dp=xbmcgui.DialogProgress();
dp.create("Configuring Requirments:","Downloading and ",'','Installing '+name);
lib=os.path.join(path,name+'.zip');
try: os.remove(lib)
except: pass
downloader.download(url,lib,dp)
if filetype=='addon': addonfolder=xbmc.translatePath(os.path.join('special://','home','addons'))
time.sleep(2)
#dp.update(0,"","Installing selections.....")
print '======================================='; print addonfolder; print '======================================='
extract.all(lib,addonfolder,'')
#Start Script Depend Search==================================================================
depends=xbmc.translatePath(os.path.join('special://home','addons',dependname,'addon.xml'));
source=open(depends,mode='r'); link=source.read(); source.close();
dmatch=re.compile('import addon="(.+?)"').findall(link)
for requires in dmatch:
if not 'xbmc.python' in requires:
print 'Script Requires --- '+requires;
dependspath=xbmc.translatePath(os.path.join('special://home','addons',requires))
#if not os.path.exists(dependspath): DEPENDINSTALL(requires,'http://addonrepo.com/xbmchub/depends/'+requires+'.zip','','addon','none')
if not os.path.exists(dependspath): DEPENDINSTALL(requires,tribeca_url2+'maintenance/modules/'+requires+'.zip','','addon','none')
#End Script Depend Search======================================================================
def ADDONINSTALL(name,url,description,filetype,repourl,Auto=False,v='',vO=''):
print ['name',name,'url',url,'description',description,'filetype',filetype,'repourl',repourl,'Auto',Auto,'v',v,'vO',vO];
try: name=name.split('[COLOR FF0077D7]Install [/COLOR][COLOR FFFFFFFF]')[1].split('[/COLOR][COLOR FF0077D7] (v')[0]
except: pass
#Start Depend Setup================================================================================
print 'Installing Url is '+url; ##addonfile=url.split('-');
newfile=str(url.split('-')[0:-1]); ##folder=newfile.split('/');
addonname=str(newfile.split('/')[-1:]).replace('[','').replace(']','').replace('"','').replace('[','').replace("'",'');
print ['newfile',newfile,'addonname',addonname];
print 'SOURCE FILE IS '+addonname;
#End of Depend Setup==================================================================================
path=xbmc.translatePath(os.path.join('special://home','addons','packages')); vTag='';
if len(v) > 0: vTag+=" v"+v
if len(vO) > 0: vTag+=" [local v"+vO+"]"
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Do you wish to install the chosen add-on and"," its respective repository if needed? "," "+name+vTag,"Cancel","Install")
#if Auto==True: confirm=True
if confirm:
dp=xbmcgui.DialogProgress(); dp.create("Download Progress:","Downloading your selection ",'','Please Wait');
lib=os.path.join(path,name+'.zip')
try: os.remove(lib)
except: pass
downloader.download(url,lib,dp)
if filetype=='addon': addonfolder=xbmc.translatePath(os.path.join('special://','home','addons'))
elif filetype=='media': addonfolder=xbmc.translatePath(os.path.join('special://','home'))
elif filetype=='main': addonfolder=xbmc.translatePath(os.path.join('special://','home'))
time.sleep(2)
#dp.update(0,"","Installing selections.....")
print '======================================='; print addonfolder; print '=======================================';
extract.all(lib,addonfolder,dp)
try:
#Start Addon Depend Search==================================================================
depends=xbmc.translatePath(os.path.join('special://home','addons',addonname,'addon.xml'));
source=open(depends,mode='r'); link=source.read(); source.close();
dmatch=re.compile('import addon="(.+?)"').findall(link)
for requires in dmatch:
if not 'xbmc.python' in requires:
print 'Requires --- '+requires; dependspath=xbmc.translatePath(os.path.join('special://home/addons',requires));
#if not os.path.exists(dependspath): DEPENDINSTALL(requires,'http://addonrepo.com/xbmchub/depends/'+requires+'.zip','','addon','none')
if not os.path.exists(dependspath): DEPENDINSTALL(requires,tribeca_url2+'maintenance/modules/'+requires+'.zip','','addon','none')
except:pass
#End Addon Depend Search======================================================================
#dialog=xbmcgui.Dialog()
#dialog.ok("Success!","Please Reboot To Take Effect"," Brought To You By %s "% siteTitle)
## #start repo dl# ##
if 'none' not in repourl:
path=xbmc.translatePath(os.path.join('special://home/addons','packages')); dp=xbmcgui.DialogProgress();
dp.create("Updating Repo if needed:","Configuring Installation ",'',' ');
lib=os.path.join(path,name+'.zip')
try: os.remove(lib)
except: pass
downloader.download(repourl, lib, '')
if filetype=='addon': addonfolder=xbmc.translatePath(os.path.join('special://','home/addons'))
elif filetype=='media': addonfolder=xbmc.translatePath(os.path.join('special://','home'))
elif filetype=='main': addonfolder=xbmc.translatePath(os.path.join('special://','home'))
time.sleep(2)
#dp.update(0,"","Checking Installation......")
print '======================================='; print addonfolder; print '======================================='
extract.all(lib,addonfolder,dp);
xbmc.executebuiltin("XBMC.UpdateLocalAddons()");
dialog=xbmcgui.Dialog();
if Auto==True: Note("Success!",name+" "+v+" Installed")
else: dialog.ok("Success!"," Your Selection(s) Have Been Installed."," Brought To You By %s "% siteTitle)
else:
xbmc.executebuiltin("XBMC.UpdateLocalAddons()");
dialog=xbmcgui.Dialog();
if Auto==True: Note("Success!",name+" "+v+" Installed")
else: dialog.ok("Success!"," Your Selections Have Been Installed"," Brought To You By %s "% siteTitle)
'''confirm=xbmcgui.Dialog().yesno("Success!"," Please Restart To Take Effect"," Brought To You By %s "% siteTitle," ","Later","Restart")
if confirm: xbmc.executebuiltin('Quit')
else: pass'''
else: return
#****************************************************************
def AUTO_VIEW(content='',viewmode=''): # Set View
viewmode=str(viewmode); content=str(content);
if len(viewmode)==0:
if settings.getSetting('auto-view')=='true':
if content=='addons': viewmode=settings.getSetting('addon-view')
if content=='list': viewmode=settings.getSetting('list-view')
else: viewmode=settings.getSetting('default-view')
else: viewmode='500'
if len(content) > 0: xbmcplugin.setContent(int(sys.argv[1]),str(content))
#if settings.getSetting('auto-view')=='true': xbmc.executebuiltin("Container.SetViewMode(%s)" % str(viewmode))
if len(viewmode) > 0: xbmc.executebuiltin("Container.SetViewMode(%s)" % str(viewmode))
# HELPDIR**************************************************************
def addDir(name,url,mode,thumb):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name); ok=True;
liz=xbmcgui.ListItem(name,iconImage=iconart,thumbnailImage=thumb);
#liz.setInfo(type="Video",infoLabels={"title":name,"Plot":description})
try: liz.setProperty("fanart_image",fanart)
except: pass
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True); return ok
def addHELPDir(name,url,mode,iconimage,fanart,description,filetype,repourl,version,author,contextmenuitems=[],contextreplace=False):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description)+"&filetype="+urllib.quote_plus(filetype)+"&repourl="+urllib.quote_plus(repourl)+"&author="+urllib.quote_plus(author)+"&version="+urllib.quote_plus(version); ok=True;
liz=xbmcgui.ListItem(name,iconImage=iconart,thumbnailImage=iconimage); #"DefaultFolder.png"
#if len(contextmenuitems) > 0:
liz.addContextMenuItems(contextmenuitems,replaceItems=contextreplace)
liz.setInfo(type="Video",infoLabels={"title":name,"plot":description});
liz.setProperty("fanart_image",fanart); liz.setProperty("Addon.Description",description); liz.setProperty("Addon.Creator",author); liz.setProperty("Addon.Version",version)
#properties={'Addon.Description':meta["plot"]}
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False); return ok
def add2HELPDir(name,url,mode,iconimage,fanart,description,filetype,contextmenuitems=[],contextreplace=False):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description)+"&filetype="+urllib.quote_plus(filetype); ok=True;
liz=xbmcgui.ListItem(name,iconImage=iconart,thumbnailImage=iconimage);
#if len(contextmenuitems) > 0:
liz.addContextMenuItems(contextmenuitems,replaceItems=contextreplace)
liz.setInfo(type="Video",infoLabels={"title":name,"Plot":description});
liz.setProperty("fanart_image",fanart);
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True); return ok
#****************************************************************
def _get_keyboard(default="",heading="",hidden=False): #Start Ketboard Function
""" shows a keyboard and returns a value """
keyboard=xbmc.Keyboard(default,heading,hidden ); keyboard.doModal()
if (keyboard.isConfirmed()): return unicode(keyboard.getText(),"utf-8")
return default
def SEARCHADDON(url): #Start Search Function
searchUrl=url; vq=_get_keyboard(heading="Search add-ons")
if (not vq): return False,0 # if blank or the user cancelled the keyboard, return
title=urllib.quote_plus(vq); searchUrl+=title+'&criteria=title'; print "Searching URL: "+searchUrl; List_Addons(searchUrl); AUTO_VIEW('list') # we need to set the title to our query
#****************************************************************
def get_params():
param=[]; paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]; cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'): params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&'); param={}
for i in range(len(pairsofparams)):
splitparams={}; splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2: param[splitparams[0]]=splitparams[1]
return param
params=get_params(); url=None; name=None; mode=None; year=None; imdb_id=None
def grbPrm(n):
try: return urllib.unquote_plus(params[n])
except: return ''
url=grbPrm("url"); filetype=grbPrm("filetype"); iconimage=grbPrm("iconimage"); fanart=grbPrm("fanart"); description=grbPrm("description"); name=grbPrm("name"); repourl=grbPrm("repourl"); author=grbPrm("author"); version=grbPrm("version");
try: mode=urllib.unquote_plus(params["mode"])
except: pass
print "Mode: "+str(mode); print "URL: "+str(url); print "Name: "+str(name)
#****************************************************************
#if mode==None or url==None or len(url)<1: STATUSCATS()
if mode==None or url==None or len(url)<1: MAININDEX()
try:
if url: print url
except: pass
if mode=='settings': addon.show_settings() # Settings
elif mode=='autoupdate': items=AutoUpdate(url) #
elif mode=='autoupdate2': # Featured
AutoUpdate(tribeca_url+'featured-addons.php')
AutoUpdate(tribeca_url+'featured-repos.php')
elif mode=='interrepolist': items=List_Inter_Addons(url)
elif mode=='interlist': items=INTERNATIONAL(url)
elif mode=='innertabs': items=List_Addons_Inner_Tabs(name,url) # Newest / Updated
elif mode=='addonlist': items=List_Addons(url) # List Addons
elif mode=='worldlist': items=List_Tribeca_WorldList(url) # World Addons - Temp
elif mode=='toprepolist': items=List_Repo_Top_Developers(url) # Top Devs
elif mode=='searchaddon': SEARCHADDON(url) # Search
elif mode=='addonindex': ADDONINDEX(name,url,filetype) # Right Before Installing Addon(s)
elif mode=='addoninstall': ADDONINSTALL(name,url,description,filetype,repourl) # Installing Addon(s)
elif mode=='adultlist': items=List_Tribeca_Adult(url) # Adult Addons - Temp
elif mode=='WizardTypeInstaller': WizardTypeInstaller(name,url) #
elif mode=='AddonTypeInstaller': AddonTypeInstaller(name,url) #
elif mode=='dependinstall': DEPENDINSTALL(name,url,description,filetype,repourl) # Dependancies
elif mode=='BrowseUrl': xbmc.executebuiltin("XBMC.System.Exec(%s)" % url) #
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
diogocs1/comps | refs/heads/master | web/addons/account/wizard/account_state_open.py | 341 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jk1/intellij-community | refs/heads/master | python/testData/inspections/PyPackageRequirementsInspection/PartiallySatisfiedRequirementsTxt/test1.py | 69 | <warning descr="Package requirements 'Markdown', 'Django==1.3.1' are not satisfied">print("Hello, World!")
</warning> |
rahul67/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/urls.py | 70 | from django.conf.urls import patterns, include
urlpatterns = patterns('',
# test_client urls
(r'^test_client/', include('test_client.urls')),
(r'^test_client_regress/', include('test_client_regress.urls')),
# File upload test views
(r'^file_uploads/', include('file_uploads.urls')),
# Always provide the auth system login and logout views
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# test urlconf for {% url %} template tag
(r'^url_tag/', include('template_tests.urls')),
# django built-in views
(r'^views/', include('view_tests.urls')),
# test urlconf for middleware tests
(r'^middleware/', include('middleware.urls')),
# admin widget tests
(r'widget_admin/', include('admin_widgets.urls')),
# admin custom URL tests
(r'^custom_urls/', include('admin_custom_urls.urls')),
# admin scripts tests
(r'^admin_scripts/', include('admin_scripts.urls')),
)
|
neo1973/xbmc | refs/heads/master | tools/EventClients/examples/python/example_mouse.py | 262 | #!/usr/bin/python
# This is a simple example showing how you can send mouse movement
# events to XBMC.
# NOTE: Read the comments in 'example_button1.py' for a more detailed
# explanation.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO and can contain an icon
packet = PacketHELO("Example Mouse", ICON_PNG,
"../../icons/mouse.png")
packet.send(sock, addr)
# wait for notification window to close (in XBMC)
time.sleep(2)
# send mouse events to take cursor from top left to bottom right of the screen
# here 0 to 65535 will map to XBMC's screen width and height.
# Specifying absolute mouse coordinates is unsupported currently.
for i in range(0, 65535, 2):
packet = PacketMOUSE(i,i)
packet.send(sock, addr)
# ok we're done, close the connection
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
|
ppiotr/Invenio | refs/heads/docextract | modules/webstyle/lib/webpage.py | 23 | ## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Web Page Functions"""
__revision__ = "$Id$"
from invenio.config import \
CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM, \
CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP, \
CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM, \
CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP, \
CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_SITE_NAME_INTL, \
CFG_SITE_NAME
from invenio.messages import gettext_set_language
from invenio.webuser import \
create_userinfobox_body, \
create_useractivities_menu, \
create_adminactivities_menu, \
getUid
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
from xml.dom.minidom import getDOMImplementation
def create_navtrailbox_body(title,
previous_links,
prolog="",
separator=""" > """,
epilog="",
language=CFG_SITE_LANG):
"""Create navigation trail box body
input: title = page title;
previous_links = the trail content from site title until current page (both ends exclusive).
output: text containing the navtrail
"""
return webstyle_templates.tmpl_navtrailbox_body(ln = language,
title = title,
previous_links = \
previous_links,
separator = separator,
prolog = prolog,
epilog = epilog)
def page(title, body, navtrail="", description="", keywords="",
metaheaderadd="", uid=None,
cdspageheaderadd="", cdspageboxlefttopadd="",
cdspageboxleftbottomadd="", cdspageboxrighttopadd="",
cdspageboxrightbottomadd="", cdspagefooteradd="", lastupdated="",
language=CFG_SITE_LANG, verbose=1, titleprologue="",
titleepilogue="", secure_page_p=0, req=None, errors=None, warnings=None, navmenuid="admin",
navtrail_append_title_p=1, of="", rssurl=CFG_SITE_URL+"/rss", show_title_p=True,
body_css_classes=None, show_header=True, show_footer=True):
"""page(): display CDS web page
input: title of the page
body of the page in html format
description goes to the metadata in the header of the HTML page
keywords goes to the metadata in the header of the html page
metaheaderadd goes to further metadata in the header of the html page
cdspageheaderadd is a message to be displayed just under the page header
cdspageboxlefttopadd is a message to be displayed in the page body on left top
cdspageboxleftbottomadd is a message to be displayed in the page body on left bottom
cdspageboxrighttopadd is a message to be displayed in the page body on right top
cdspageboxrightbottomadd is a message to be displayed in the page body on right bottom
cdspagefooteradd is a message to be displayed on the top of the page footer
lastupdated is a text containing the info on last update (optional)
language is the language version of the page
verbose is verbosity of the page (useful for debugging)
titleprologue is to be printed right before page title
titleepilogue is to be printed right after page title
req is the mod_python request object
log is the string of data that should be appended to the log file (errors automatically logged)
secure_page_p is 0 or 1 and tells whether we are to use HTTPS friendly page elements or not
navmenuid the section of the website this page belongs (search, submit, baskets, etc.)
navtrail_append_title_p is 0 or 1 and tells whether page title is appended to navtrail
of is an output format (use xx for xml output (e.g. AJAX))
rssfeed is the url of the RSS feed for this page
show_title_p is 0 or 1 and tells whether page title should be displayed in body of the page
show_header is 0 or 1 and tells whether page header should be displayed or not
show_footer is 0 or 1 and tells whether page footer should be displayed or not
output: the final cds page with header, footer, etc.
"""
_ = gettext_set_language(language)
if req is not None:
if uid is None:
uid = getUid(req)
secure_page_p = req.is_https() and 1 or 0
if uid is None:
## 0 means generic guest user.
uid = 0
if of == 'xx':
#xml output (e.g. AJAX calls) => of=xx
req.content_type = 'text/xml'
impl = getDOMImplementation()
output = impl.createDocument(None, "invenio-message", None)
root = output.documentElement
body_node = output.createElement('body')
body_text = output.createCDATASection(unicode(body, 'utf_8'))
body_node.appendChild(body_text)
root.appendChild(body_node)
return output.toprettyxml(encoding="utf-8" )
else:
return webstyle_templates.tmpl_page(req, ln=language,
description = description,
keywords = keywords,
metaheaderadd = metaheaderadd,
userinfobox = create_userinfobox_body(req, uid, language),
useractivities_menu = create_useractivities_menu(req, uid, navmenuid, language),
adminactivities_menu = create_adminactivities_menu(req, uid, navmenuid, language),
navtrailbox = create_navtrailbox_body(navtrail_append_title_p \
and title or '',
navtrail,
language=language),
uid = uid,
secure_page_p = secure_page_p,
pageheaderadd = cdspageheaderadd,
boxlefttop = CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP,
boxlefttopadd = cdspageboxlefttopadd,
boxleftbottomadd = cdspageboxleftbottomadd,
boxleftbottom = CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM,
boxrighttop = CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP,
boxrighttopadd = cdspageboxrighttopadd,
boxrightbottomadd = cdspageboxrightbottomadd,
boxrightbottom = CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM,
titleprologue = titleprologue,
title = title,
titleepilogue = titleepilogue,
body = body,
lastupdated = lastupdated,
pagefooteradd = cdspagefooteradd,
navmenuid = navmenuid,
rssurl = rssurl,
show_title_p = show_title_p,
body_css_classes=body_css_classes,
show_header=show_header,
show_footer=show_footer)
def pageheaderonly(title, navtrail="", description="", keywords="", uid=0,
cdspageheaderadd="", language=CFG_SITE_LANG, req=None,
secure_page_p=0, verbose=1, navmenuid="admin",
navtrail_append_title_p=1, metaheaderadd="",
rssurl=CFG_SITE_URL+"/rss", body_css_classes=None):
"""Return just the beginning of page(), with full headers.
Suitable for the search results page and any long-taking scripts."""
if req is not None:
if uid is None:
uid = getUid(uid)
secure_page_p = req.is_https() and 1 or 0
return webstyle_templates.tmpl_pageheader(req,
ln = language,
headertitle = title,
description = description,
keywords = keywords,
metaheaderadd = metaheaderadd,
userinfobox = create_userinfobox_body(req, uid, language),
useractivities_menu = create_useractivities_menu(req, uid, navmenuid, language),
adminactivities_menu = create_adminactivities_menu(req, uid, navmenuid, language),
navtrailbox = create_navtrailbox_body(navtrail_append_title_p \
and title or '',
navtrail,
language=language),
uid = uid,
secure_page_p = secure_page_p,
pageheaderadd = cdspageheaderadd,
navmenuid = navmenuid,
rssurl = rssurl,
body_css_classes=body_css_classes)
def pagefooteronly(cdspagefooteradd="", lastupdated="",
language=CFG_SITE_LANG, req=None, verbose=1):
"""Return just the ending of page(), with full footer.
Suitable for the search results page and any long-taking scripts."""
return webstyle_templates.tmpl_pagefooter(req,
ln=language,
lastupdated = lastupdated,
pagefooteradd = cdspagefooteradd)
def create_error_box(req, title=None, verbose=1, ln=CFG_SITE_LANG, errors=None):
"""Analyse the req object and the sys traceback and return a text
message box with internal information that would be suitful to
display when something bad has happened.
"""
_ = gettext_set_language(ln)
return webstyle_templates.tmpl_error_box(title = title,
ln = ln,
verbose = verbose,
req = req,
errors = errors)
def adderrorbox(header='', datalist=[]):
"""used to create table around main data on a page, row based"""
try:
perc = str(100 // len(datalist)) + '%'
except ZeroDivisionError:
perc = 1
output = '<table class="errorbox">'
output += '<thead><tr><th class="errorboxheader" colspan="%s">%s</th></tr></thead>' % (len(datalist), header)
output += '<tbody>'
for row in [datalist]:
output += '<tr>'
for data in row:
output += '<td style="vertical-align: top; margin-top: 5px; width: %s;">' % (perc, )
output += data
output += '</td>'
output += '</tr>'
output += '</tbody></table>'
return output
def error_page(title, req, ln=CFG_SITE_LANG):
# load the right message language
_ = gettext_set_language(ln)
site_name = CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)
return page(title = _("Error"),
body = create_error_box(req, title=str(title), verbose=0, ln=ln),
description="%s - Internal Error" % site_name,
keywords="%s, Internal Error" % site_name,
uid = getUid(req),
language=ln,
req=req)
def warning_page(title, req, ln=CFG_SITE_LANG):
# load the right message language
_ = gettext_set_language(ln)
site_name = CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)
return page(title = _("Warning"),
body = title,
description="%s - Internal Error" % site_name,
keywords="%s, Internal Error" % site_name,
uid = getUid(req),
language=ln,
req=req)
def write_warning(msg, type='', prologue='<br />', epilogue='<br />', req=None):
"""Prints warning message and flushes output."""
if msg:
ret = webstyle_templates.tmpl_write_warning(
msg = msg,
type = type,
prologue = prologue,
epilogue = epilogue,
)
if req is None:
return ret
else:
req.write(ret)
else:
return ''
|
tolbkni/fabric | refs/heads/master | fabric/contrib/django.py | 65 | """
.. versionadded:: 0.9.2
These functions streamline the process of initializing Django's settings module
environment variable. Once this is done, your fabfile may import from your
Django project, or Django itself, without requiring the use of ``manage.py``
plugins or having to set the environment variable yourself every time you use
your fabfile.
Currently, these functions only allow Fabric to interact with
local-to-your-fabfile Django installations. This is not as limiting as it
sounds; for example, you can use Fabric as a remote "build" tool as well as
using it locally. Imagine the following fabfile::
from fabric.api import run, local, hosts, cd
from fabric.contrib import django
django.project('myproject')
from myproject.myapp.models import MyModel
def print_instances():
for instance in MyModel.objects.all():
print(instance)
@hosts('production-server')
def print_production_instances():
with cd('/path/to/myproject'):
run('fab print_instances')
With Fabric installed on both ends, you could execute
``print_production_instances`` locally, which would trigger ``print_instances``
on the production server -- which would then be interacting with your
production Django database.
As another example, if your local and remote settings are similar, you can use
it to obtain e.g. your database settings, and then use those when executing a
remote (non-Fabric) command. This would allow you some degree of freedom even
if Fabric is only installed locally::
from fabric.api import run
from fabric.contrib import django
django.settings_module('myproject.settings')
from django.conf import settings
def dump_production_database():
run('mysqldump -u %s -p=%s %s > /tmp/prod-db.sql' % (
settings.DATABASE_USER,
settings.DATABASE_PASSWORD,
settings.DATABASE_NAME
))
The above snippet will work if run from a local, development environment, again
provided your local ``settings.py`` mirrors your remote one in terms of
database connection info.
"""
import os
def settings_module(module):
"""
Set ``DJANGO_SETTINGS_MODULE`` shell environment variable to ``module``.
Due to how Django works, imports from Django or a Django project will fail
unless the shell environment variable ``DJANGO_SETTINGS_MODULE`` is
correctly set (see `the Django settings docs
<http://docs.djangoproject.com/en/dev/topics/settings/>`_.)
This function provides a shortcut for doing so; call it near the top of
your fabfile or Fabric-using code, after which point any Django imports
should work correctly.
.. note::
This function sets a **shell** environment variable (via
``os.environ``) and is unrelated to Fabric's own internal "env"
variables.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = module
def project(name):
"""
Sets ``DJANGO_SETTINGS_MODULE`` to ``'<name>.settings'``.
This function provides a handy shortcut for the common case where one is
using the Django default naming convention for their settings file and
location.
Uses `settings_module` -- see its documentation for details on why and how
to use this functionality.
"""
settings_module('%s.settings' % name)
|
jclakkis/discus-inferno | refs/heads/master | flaskenv/lib/python2.7/posixpath.py | 4 | /usr/lib/python2.7/posixpath.py |
chinmaygarde/mojo | refs/heads/ios | tools/android/remove_strings.py | 183 | #!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Remove strings by name from a GRD file."""
import optparse
import re
import sys
def RemoveStrings(grd_path, string_names):
"""Removes strings with the given names from a GRD file. Overwrites the file.
Args:
grd_path: path to the GRD file.
string_names: a list of string names to be removed.
"""
with open(grd_path, 'r') as f:
grd = f.read()
names_pattern = '|'.join(map(re.escape, string_names))
pattern = r'<message [^>]*name="(%s)".*?</message>\s*' % names_pattern
grd = re.sub(pattern, '', grd, flags=re.DOTALL)
with open(grd_path, 'w') as f:
f.write(grd)
def ParseArgs(args):
usage = 'usage: %prog GRD_PATH...'
parser = optparse.OptionParser(
usage=usage, description='Remove strings from GRD files. Reads string '
'names from stdin, and removes strings with those names from the listed '
'GRD files.')
options, args = parser.parse_args(args=args)
if not args:
parser.error('must provide GRD_PATH argument(s)')
return args
def main(args=None):
grd_paths = ParseArgs(args)
strings_to_remove = filter(None, map(str.strip, sys.stdin.readlines()))
for grd_path in grd_paths:
RemoveStrings(grd_path, strings_to_remove)
if __name__ == '__main__':
main()
|
spasovski/zamboni | refs/heads/master | mkt/account/tests/test_api.py | 2 | # -*- coding: utf-8 -*-
import collections
import json
import uuid
from urlparse import urlparse
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.utils.http import urlencode
from mock import patch, Mock
from nose.tools import eq_, ok_
from amo.tests import TestCase, app_factory
from mkt.account.views import MineMixin
from mkt.api.tests.test_oauth import RestOAuth
from mkt.constants.apps import INSTALL_TYPE_REVIEWER
from mkt.site.fixtures import fixture
from mkt.webapps.models import Installed
from users.models import UserProfile
class TestPotatoCaptcha(object):
def _test_bad_api_potato_data(self, response, data=None):
if not data:
data = json.loads(response.content)
eq_(400, response.status_code)
ok_('non_field_errors' in data)
eq_(data['non_field_errors'], [u'Form could not be submitted.'])
class FakeResourceBase(object):
pass
class FakeResource(MineMixin, FakeResourceBase):
def __init__(self, pk, request):
self.kwargs = {'pk': pk}
self.request = request
class TestMine(TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.request = Mock()
self.request.amo_user = UserProfile.objects.get(id=2519)
@patch.object(FakeResourceBase, 'get_object', create=True)
def test_get_object(self, mocked_get_object):
r = FakeResource(999, self.request)
r.get_object()
eq_(r.kwargs['pk'], 999)
r = FakeResource('mine', self.request)
r.get_object()
eq_(r.kwargs['pk'], 2519)
class TestPermission(RestOAuth):
fixtures = fixture('user_2519', 'user_10482')
def setUp(self):
super(TestPermission, self).setUp()
self.get_url = reverse('account-permissions', kwargs={'pk': 2519})
self.user = UserProfile.objects.get(pk=2519)
def test_verbs(self):
self._allowed_verbs(self.get_url, ('get'))
def test_other(self):
self.get_url = reverse('account-permissions', kwargs={'pk': 10482})
eq_(self.client.get(self.get_url).status_code, 403)
def test_no_permissions(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
self.assertSetEqual(
['admin', 'developer', 'localizer', 'lookup', 'curator',
'reviewer', 'webpay', 'stats', 'revenue_stats'],
res.json['permissions'].keys()
)
ok_(not all(res.json['permissions'].values()))
def test_some_permission(self):
self.grant_permission(self.user, 'Localizers:%')
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(res.json['permissions']['localizer'])
def test_mine(self):
self.get_url = reverse('account-permissions', kwargs={'pk': 'mine'})
self.test_some_permission()
def test_mine_anon(self):
self.get_url = reverse('account-permissions', kwargs={'pk': 'mine'})
res = self.anon.get(self.get_url)
eq_(res.status_code, 403)
def test_publisher(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(not res.json['permissions']['curator'])
def test_publisher_ok(self):
self.grant_permission(self.user, 'Collections:Curate')
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(res.json['permissions']['curator'])
def test_webpay(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(not res.json['permissions']['webpay'])
def test_webpay_ok(self):
self.grant_permission(self.user, 'ProductIcon:Create')
self.grant_permission(self.user, 'Transaction:NotifyFailure')
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(res.json['permissions']['webpay'])
def test_stats(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(not res.json['permissions']['stats'])
def test_stats_ok(self):
self.grant_permission(self.user, 'Stats:View')
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(res.json['permissions']['stats'])
def test_revenue_stats(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(not res.json['permissions']['revenue_stats'])
def test_revenue_stats_ok(self):
self.grant_permission(self.user, 'RevenueStats:View')
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
ok_(res.json['permissions']['revenue_stats'])
class TestAccount(RestOAuth):
fixtures = fixture('user_2519', 'user_10482', 'webapp_337141')
def setUp(self):
super(TestAccount, self).setUp()
self.url = reverse('account-settings', kwargs={'pk': 2519})
self.user = UserProfile.objects.get(pk=2519)
def test_verbs(self):
self._allowed_verbs(self.url, ('get', 'patch', 'put'))
def test_not_allowed(self):
eq_(self.anon.get(self.url).status_code, 403)
def test_allowed(self):
res = self.client.get(self.url)
eq_(res.status_code, 200, res.content)
data = json.loads(res.content)
eq_(data['display_name'], self.user.display_name)
def test_other(self):
url = reverse('account-settings', kwargs={'pk': 10482})
eq_(self.client.get(url).status_code, 403)
def test_own(self):
url = reverse('account-settings', kwargs={'pk': 'mine'})
res = self.client.get(url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['display_name'], self.user.display_name)
def test_patch(self):
res = self.client.patch(self.url,
data=json.dumps({'display_name': 'foo'}))
eq_(res.status_code, 200)
user = UserProfile.objects.get(pk=self.user.pk)
eq_(user.display_name, 'foo')
def test_put(self):
res = self.client.put(self.url,
data=json.dumps({'display_name': 'foo'}))
eq_(res.status_code, 200)
user = UserProfile.objects.get(pk=self.user.pk)
eq_(user.display_name, 'foo')
eq_(user.username, self.user.username) # Did not change.
def test_patch_extra_fields(self):
res = self.client.patch(self.url,
data=json.dumps({'display_name': 'foo',
'username': 'bob'}))
eq_(res.status_code, 200)
user = UserProfile.objects.get(pk=self.user.pk)
eq_(user.display_name, 'foo') # Got changed successfully.
eq_(user.username, self.user.username) # Did not change.
def test_patch_other(self):
url = reverse('account-settings', kwargs={'pk': 10482})
res = self.client.patch(url, data=json.dumps({'display_name': 'foo'}))
eq_(res.status_code, 403)
class TestInstalled(RestOAuth):
fixtures = fixture('user_2519', 'user_10482', 'webapp_337141')
def setUp(self):
super(TestInstalled, self).setUp()
self.list_url = reverse('installed-apps')
self.user = UserProfile.objects.get(pk=2519)
def test_verbs(self):
self._allowed_verbs(self.list_url, ('get'))
def test_not_allowed(self):
eq_(self.anon.get(self.list_url).status_code, 403)
def test_installed(self):
ins = Installed.objects.create(user=self.user, addon_id=337141)
res = self.client.get(self.list_url)
eq_(res.status_code, 200, res.content)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['id'], ins.addon.pk)
eq_(data['objects'][0]['user'],
{'developed': False, 'purchased': False, 'installed': True})
def test_installed_pagination(self):
ins1 = Installed.objects.create(user=self.user, addon=app_factory())
ins2 = Installed.objects.create(user=self.user, addon=app_factory())
ins3 = Installed.objects.create(user=self.user, addon=app_factory())
res = self.client.get(self.list_url, {'limit': 2})
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(len(data['objects']), 2)
eq_(data['objects'][0]['id'], ins1.addon.id)
eq_(data['objects'][1]['id'], ins2.addon.id)
eq_(data['meta']['total_count'], 3)
eq_(data['meta']['limit'], 2)
eq_(data['meta']['previous'], None)
eq_(data['meta']['offset'], 0)
next = urlparse(data['meta']['next'])
eq_(next.path, self.list_url)
eq_(QueryDict(next.query).dict(), {u'limit': u'2', u'offset': u'2'})
res = self.client.get(self.list_url, {'limit': 2, 'offset': 2})
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(len(data['objects']), 1)
eq_(data['objects'][0]['id'], ins3.addon.id)
eq_(data['meta']['total_count'], 3)
eq_(data['meta']['limit'], 2)
prev = urlparse(data['meta']['previous'])
eq_(next.path, self.list_url)
eq_(QueryDict(prev.query).dict(), {u'limit': u'2', u'offset': u'0'})
eq_(data['meta']['offset'], 2)
eq_(data['meta']['next'], None)
def not_there(self):
res = self.client.get(self.list_url)
eq_(res.status_code, 200, res.content)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 0)
def test_installed_other(self):
Installed.objects.create(user_id=10482, addon_id=337141)
self.not_there()
def test_installed_reviewer(self):
Installed.objects.create(user=self.user, addon_id=337141,
install_type=INSTALL_TYPE_REVIEWER)
self.not_there()
class FakeUUID(object):
hex = '000000'
@patch.object(settings, 'SECRET_KEY', 'gubbish')
class TestLoginHandler(TestCase):
def setUp(self):
super(TestLoginHandler, self).setUp()
self.url = reverse('account-login')
def post(self, data):
return self.client.post(self.url, json.dumps(data),
content_type='application/json')
@patch.object(uuid, 'uuid4', FakeUUID)
@patch('requests.post')
def _test_login(self, http_request):
FakeResponse = collections.namedtuple('FakeResponse',
'status_code content')
http_request.return_value = FakeResponse(200, json.dumps(
{'status': 'okay', 'email': 'cvan@mozilla.com'}))
res = self.post({'assertion': 'fake-assertion',
'audience': 'fakeamo.org'})
eq_(res.status_code, 201)
data = json.loads(res.content)
eq_(data['token'],
'cvan@mozilla.com,95c9063d9f249aacfe5697fc83192ed6480c01463e2a80b3'
'5af5ecaef11754700f4be33818d0e83a0cfc2cab365d60ba53b3c2b9f8f6589d1'
'c43e9bbb876eef0,000000')
return data
def test_login_new_user_success(self):
data = self._test_login()
ok_(not any(data['permissions'].values()))
def test_login_existing_user_success(self):
profile = UserProfile.objects.create(email='cvan@mozilla.com')
profile.create_django_user(
backend='django_browserid.auth.BrowserIDBackend')
self.grant_permission(profile, 'Apps:Review')
data = self._test_login()
eq_(data['permissions'],
{'admin': False,
'developer': False,
'localizer': False,
'lookup': False,
'curator': False,
'reviewer': True,
'webpay': False,
'stats': False,
'revenue_stats': False})
@patch('requests.post')
def test_login_failure(self, http_request):
FakeResponse = collections.namedtuple('FakeResponse',
'status_code content')
http_request.return_value = FakeResponse(200, json.dumps(
{'status': 'busted'}))
res = self.post({'assertion': 'fake-assertion',
'audience': 'fakeamo.org'})
eq_(res.status_code, 403)
def test_login_old_user_new_email(self):
"""
Login is based on (and reports) the email in UserProfile.
"""
profile = UserProfile.objects.create(email='cvan@mozilla.com')
profile.create_django_user(
backend='django_browserid.auth.BrowserIDBackend')
profile.user.email = 'old_email@example.com'
profile.user.save()
self._test_login()
def test_login_empty(self):
res = self.post({})
data = json.loads(res.content)
eq_(res.status_code, 400)
assert 'assertion' in data
class TestFeedbackHandler(TestPotatoCaptcha, RestOAuth):
def setUp(self):
super(TestFeedbackHandler, self).setUp()
self.url = reverse('account-feedback')
self.user = UserProfile.objects.get(pk=2519)
self.default_data = {
'chromeless': 'no',
'feedback': u'Hér€ is whàt I rælly think.',
'platform': u'Desktøp',
'from_url': '/feedback',
'sprout': 'potato'
}
self.headers = {
'HTTP_USER_AGENT': 'Fiiia-fox',
'REMOTE_ADDR': '48.151.623.42'
}
def _call(self, anonymous=False, data=None):
post_data = self.default_data.copy()
client = self.anon if anonymous else self.client
if data:
post_data.update(data)
res = client.post(self.url, data=json.dumps(post_data),
**self.headers)
return res, json.loads(res.content)
def _test_success(self, res, data):
eq_(201, res.status_code)
fields = self.default_data.copy()
# PotatoCaptcha field shouldn't be present in returned data.
del fields['sprout']
ok_('sprout' not in data)
# Rest of the fields should all be here.
for name in fields.keys():
eq_(fields[name], data[name])
eq_(len(mail.outbox), 1)
assert self.default_data['feedback'] in mail.outbox[0].body
assert self.headers['REMOTE_ADDR'] in mail.outbox[0].body
def test_send(self):
res, data = self._call()
self._test_success(res, data)
eq_(unicode(self.user), data['user'])
email = mail.outbox[0]
eq_(email.from_email, self.user.email)
assert self.user.username in email.body
assert self.user.name in email.body
assert unicode(self.user.pk) in email.body
assert self.user.email in email.body
def test_send_urlencode(self):
self.headers['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
post_data = self.default_data.copy()
res = self.client.post(self.url, data=urlencode(post_data),
**self.headers)
data = json.loads(res.content)
self._test_success(res, data)
eq_(unicode(self.user), data['user'])
eq_(mail.outbox[0].from_email, self.user.email)
def test_send_without_platform(self):
del self.default_data['platform']
self.url += '?dev=platfoo'
res, data = self._call()
self._test_success(res, data)
assert 'platfoo' in mail.outbox[0].body
def test_send_anonymous(self):
res, data = self._call(anonymous=True)
self._test_success(res, data)
assert not data['user']
assert 'Anonymous' in mail.outbox[0].body
eq_(settings.NOBODY_EMAIL, mail.outbox[0].from_email)
def test_send_potato(self):
tuber_res, tuber_data = self._call(data={'tuber': 'potat-toh'},
anonymous=True)
potato_res, potato_data = self._call(data={'sprout': 'potat-toh'},
anonymous=True)
self._test_bad_api_potato_data(tuber_res, tuber_data)
self._test_bad_api_potato_data(potato_res, potato_data)
def test_missing_optional_field(self):
res, data = self._call(data={'platform': None})
eq_(201, res.status_code)
def test_send_bad_data(self):
"""
One test to ensure that Feedback API is doing its validation duties.
"""
res, data = self._call(data={'feedback': None})
eq_(400, res.status_code)
assert 'feedback' in data
class TestNewsletter(RestOAuth):
def setUp(self):
super(TestNewsletter, self).setUp()
self.url = reverse('account-newsletter')
@patch('basket.subscribe')
def test_signup_bad(self, subscribe):
res = self.client.post(self.url,
data=json.dumps({'email': '!not_an_email'}))
eq_(res.status_code, 400)
ok_(not subscribe.called)
@patch('basket.subscribe')
def test_signup_empty(self, subscribe):
res = self.client.post(self.url)
eq_(res.status_code, 400)
ok_(not subscribe.called)
@patch('basket.subscribe')
def test_signup_anonymous(self, subscribe):
res = self.anon.post(self.url)
eq_(res.status_code, 403)
ok_(not subscribe.called)
@patch('basket.subscribe')
def test_signup(self, subscribe):
res = self.client.post(self.url,
data=json.dumps({'email': 'bob@example.com'}))
eq_(res.status_code, 204)
subscribe.assert_called_with(
'bob@example.com', 'marketplace', lang='en-US', country='us',
trigger_welcome='Y', optin='Y', format='H')
@patch('basket.subscribe')
def test_signup_plus(self, subscribe):
res = self.client.post(
self.url,
data=json.dumps({'email': 'bob+totally+real@example.com'}))
subscribe.assert_called_with(
'bob+totally+real@example.com', 'marketplace', lang='en-US',
country='us', trigger_welcome='Y', optin='Y', format='H')
eq_(res.status_code, 204)
|
wdurhamh/statsmodels | refs/heads/master | statsmodels/sandbox/panel/random_panel.py | 33 | # -*- coding: utf-8 -*-
"""Generate a random process with panel structure
Created on Sat Dec 17 22:15:27 2011
Author: Josef Perktold
Notes
-----
* written with unbalanced panels in mind, but not flexible enough yet
* need more shortcuts and options for balanced panel
* need to add random intercept or coefficients
* only one-way (repeated measures) so far
"""
import numpy as np
from . import correlation_structures as cs
class PanelSample(object):
'''data generating process for panel with within correlation
allows various within correlation structures, but no random intercept yet
Parameters
----------
nobs : int
total number of observations
k_vars : int
number of explanatory variables to create in exog, including constant
n_groups int
number of groups in balanced sample
exog : None or ndarray
default is None, in which case a exog is created
within : bool
If True (default), then the exog vary within a group. If False, then
only variation across groups is used.
TODO: this option needs more work
corr_structure : ndarray or ??
Default is np.eye.
corr_args : tuple
arguments for the corr_structure
scale : float
scale of noise, standard deviation of normal distribution
seed : None or int
If seed is given, then this is used to create the random numbers for
the sample.
Notes
-----
The behavior for panel robust covariance estimators seems to differ by
a large amount by whether exog have mostly within group or across group
variation. I do not understand why this should be the case from the theory,
and this would warrant more investigation.
This is just used in one example so far and needs more usage to see what
will be useful to add.
'''
def __init__(self, nobs, k_vars, n_groups, exog=None, within=True,
corr_structure=np.eye, corr_args=(), scale=1, seed=None):
nobs_i = nobs//n_groups
nobs = nobs_i * n_groups #make balanced
self.nobs = nobs
self.nobs_i = nobs_i
self.n_groups = n_groups
self.k_vars = k_vars
self.corr_structure = corr_structure
self.groups = np.repeat(np.arange(n_groups), nobs_i)
self.group_indices = np.arange(n_groups+1) * nobs_i #check +1
if exog is None:
if within:
#t = np.tile(np.linspace(-1,1,nobs_i), n_groups)
t = np.tile(np.linspace(0, 2, nobs_i), n_groups)
#rs2 = np.random.RandomState(9876)
#t = 1 + 0.3 * rs2.randn(nobs_i * n_groups)
#mix within and across variation
#t += np.repeat(np.linspace(-1,1,nobs_i), n_groups)
else:
#no within group variation,
t = np.repeat(np.linspace(-1,1,nobs_i), n_groups)
exog = t[:,None]**np.arange(k_vars)
self.exog = exog
#self.y_true = exog.sum(1) #all coefficients equal 1,
#moved to make random coefficients
#initialize
self.y_true = None
self.beta = None
if seed is None:
seed = np.random.randint(0, 999999)
self.seed = seed
self.random_state = np.random.RandomState(seed)
#this makes overwriting difficult, move to method?
self.std = scale * np.ones(nobs_i)
corr = self.corr_structure(nobs_i, *corr_args)
self.cov = cs.corr2cov(corr, self.std)
self.group_means = np.zeros(n_groups)
def get_y_true(self):
if self.beta is None:
self.y_true = self.exog.sum(1)
else:
self.y_true = np.dot(self.exog, self.beta)
def generate_panel(self):
'''
generate endog for a random panel dataset with within correlation
'''
random = self.random_state
if self.y_true is None:
self.get_y_true()
nobs_i = self.nobs_i
n_groups = self.n_groups
use_balanced = True
if use_balanced: #much faster for balanced case
noise = self.random_state.multivariate_normal(np.zeros(nobs_i),
self.cov,
size=n_groups).ravel()
#need to add self.group_means
noise += np.repeat(self.group_means, nobs_i)
else:
noise = np.empty(self.nobs, np.float64)
noise.fill(np.nan)
for ii in range(self.n_groups):
#print ii,
idx, idxupp = self.group_indices[ii:ii+2]
#print idx, idxupp
mean_i = self.group_means[ii]
noise[idx:idxupp] = self.random_state.multivariate_normal(
mean_i * np.ones(self.nobs_i), self.cov)
endog = self.y_true + noise
return endog
if __name__ == '__main__':
pass
|
mineo/pacman | refs/heads/master | test/pacman/tests/upgrade032.py | 13 | self.description = "Install packages explicitly"
lp1 = pmpkg("pkg1")
lp1.reason = 1
self.addpkg2db("local", lp1)
p1 = pmpkg("pkg1", "1.0-2")
p2 = pmpkg("pkg2", "1.0-2")
for p in p1, p2:
self.addpkg(p)
self.args = "-U --asexplicit %s" % " ".join([p.filename() for p in p1, p2])
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=pkg1|1.0-2")
self.addrule("PKG_VERSION=pkg2|1.0-2")
self.addrule("PKG_REASON=pkg1|0")
self.addrule("PKG_REASON=pkg2|0")
|
gardner/youtube-dl | refs/heads/master | youtube_dl/extractor/hostingbulk.py | 75 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class HostingBulkIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?hostingbulk\.com/
(?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
_FILE_DELETED_REGEX = r'<b>File Not Found</b>'
_TEST = {
'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
'info_dict': {
'id': 'n0ulw1hv20fm',
'ext': 'mp4',
'title': 'md5:5afeba33f48ec87219c269e054afd622',
'filesize': 6816081,
'thumbnail': 're:^http://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
# Custom request with cookie to set language to English, so our file
# deleted regex would work.
request = compat_urllib_request.Request(
url, headers={'Cookie': 'lang=english'})
webpage = self._download_webpage(request, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
filesize = int_or_none(
self._search_regex(
r'<small>\((\d+)\sbytes?\)</small>',
webpage,
'filesize',
fatal=False
)
)
thumbnail = self._search_regex(
r'<img src="([^"]+)".+?class="pic"',
webpage, 'thumbnail', fatal=False)
fields = self._hidden_inputs(webpage)
request = compat_urllib_request.Request(url, urlencode_postdata(fields))
request.add_header('Content-type', 'application/x-www-form-urlencoded')
response = self._request_webpage(request, video_id,
'Submiting download request')
video_url = response.geturl()
formats = [{
'format_id': 'sd',
'filesize': filesize,
'url': video_url,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
|
cgstudiomap/cgstudiomap | refs/heads/develop | main/eggs/Shapely-1.5.13-py2.7.egg/shapely/geometry/proxy.py | 17 | """Proxy for coordinates stored outside Shapely geometries
"""
from shapely.geometry.base import deserialize_wkb, EMPTY
from shapely.geos import lgeos
class CachingGeometryProxy(object):
context = None
factory = None
__geom__ = EMPTY
_gtag = None
def __init__(self, context):
self.context = context
@property
def _is_empty(self):
return self.__geom__ in [EMPTY, None]
def empty(self, val=EMPTY):
if not self._is_empty and self.__geom__:
lgeos.GEOSGeom_destroy(self.__geom__)
self.__geom__ = val
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context)
self._gtag = gtag
return self.__geom__
def gtag(self):
return hash(repr(self.context))
class PolygonProxy(CachingGeometryProxy):
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context[0], self.context[1])
self._gtag = gtag
return self.__geom__
|
laperry1/android_external_chromium_org | refs/heads/cm-12.1 | third_party/libxml/src/gentest.py | 298 | #!/usr/bin/python -u
#
# generate a tester program for the API
#
import sys
import os
import string
try:
import libxml2
except:
print "libxml2 python bindings not available, skipping testapi.c generation"
sys.exit(0)
if len(sys.argv) > 1:
srcPref = sys.argv[1] + '/'
else:
srcPref = ''
#
# Modules we want to skip in API test
#
skipped_modules = [ "SAX", "xlink", "threads", "globals",
"xmlmemory", "xmlversion", "xmlexports",
#deprecated
"DOCBparser",
]
#
# defines for each module
#
modules_defines = {
"HTMLparser": "LIBXML_HTML_ENABLED",
"catalog": "LIBXML_CATALOG_ENABLED",
"xmlreader": "LIBXML_READER_ENABLED",
"relaxng": "LIBXML_SCHEMAS_ENABLED",
"schemasInternals": "LIBXML_SCHEMAS_ENABLED",
"xmlschemas": "LIBXML_SCHEMAS_ENABLED",
"xmlschemastypes": "LIBXML_SCHEMAS_ENABLED",
"xpath": "LIBXML_XPATH_ENABLED",
"xpathInternals": "LIBXML_XPATH_ENABLED",
"xinclude": "LIBXML_XINCLUDE_ENABLED",
"xpointer": "LIBXML_XPTR_ENABLED",
"xmlregexp" : "LIBXML_REGEXP_ENABLED",
"xmlautomata" : "LIBXML_AUTOMATA_ENABLED",
"xmlsave" : "LIBXML_OUTPUT_ENABLED",
"DOCBparser" : "LIBXML_DOCB_ENABLED",
"xmlmodule" : "LIBXML_MODULES_ENABLED",
"pattern" : "LIBXML_PATTERN_ENABLED",
"schematron" : "LIBXML_SCHEMATRON_ENABLED",
}
#
# defines for specific functions
#
function_defines = {
"htmlDefaultSAXHandlerInit": "LIBXML_HTML_ENABLED",
"xmlSAX2EndElement" : "LIBXML_SAX1_ENABLED",
"xmlSAX2StartElement" : "LIBXML_SAX1_ENABLED",
"xmlSAXDefaultVersion" : "LIBXML_SAX1_ENABLED",
"UTF8Toisolat1" : "LIBXML_OUTPUT_ENABLED",
"xmlCleanupPredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlInitializePredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlSetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeaturesList": "LIBXML_LEGACY_ENABLED",
"xmlIOParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDoc": "LIBXML_SAX1_ENABLED",
"xmlParseMemory": "LIBXML_SAX1_ENABLED",
"xmlRecoverDoc": "LIBXML_SAX1_ENABLED",
"xmlParseFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseFileWithData": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDoc": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDTD": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseFile": "LIBXML_SAX1_ENABLED",
"xmlParseEntity": "LIBXML_SAX1_ENABLED",
"xmlParseExternalEntity": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemoryWithData": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemory": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemoryRecover": "LIBXML_SAX1_ENABLED",
"xmlSetupParserForBuffer": "LIBXML_SAX1_ENABLED",
"xmlStopParser": "LIBXML_PUSH_ENABLED",
"xmlAttrSerializeTxtContent": "LIBXML_OUTPUT_ENABLED",
"xmlSAXParseFile": "LIBXML_SAX1_ENABLED",
"xmlSAXParseEntity": "LIBXML_SAX1_ENABLED",
"xmlNewTextChild": "LIBXML_TREE_ENABLED",
"xmlNewDocRawNode": "LIBXML_TREE_ENABLED",
"xmlNewProp": "LIBXML_TREE_ENABLED",
"xmlReconciliateNs": "LIBXML_TREE_ENABLED",
"xmlValidateNCName": "LIBXML_TREE_ENABLED",
"xmlValidateNMToken": "LIBXML_TREE_ENABLED",
"xmlValidateName": "LIBXML_TREE_ENABLED",
"xmlNewChild": "LIBXML_TREE_ENABLED",
"xmlValidateQName": "LIBXML_TREE_ENABLED",
"xmlSprintfElementContent": "LIBXML_OUTPUT_ENABLED",
"xmlValidGetPotentialChildren" : "LIBXML_VALID_ENABLED",
"xmlValidGetValidElements" : "LIBXML_VALID_ENABLED",
"docbDefaultSAXHandlerInit" : "LIBXML_DOCB_ENABLED",
"xmlTextReaderPreservePattern" : "LIBXML_PATTERN_ENABLED",
}
#
# Some functions really need to be skipped for the tests.
#
skipped_functions = [
# block on I/O
"xmlFdRead", "xmlReadFd", "xmlCtxtReadFd",
"htmlFdRead", "htmlReadFd", "htmlCtxtReadFd",
"xmlReaderNewFd", "xmlReaderForFd",
"xmlIORead", "xmlReadIO", "xmlCtxtReadIO",
"htmlIORead", "htmlReadIO", "htmlCtxtReadIO",
"xmlReaderNewIO", "xmlBufferDump", "xmlNanoFTPConnect",
"xmlNanoFTPConnectTo", "xmlNanoHTTPMethod", "xmlNanoHTTPMethodRedir",
# Complex I/O APIs
"xmlCreateIOParserCtxt", "xmlParserInputBufferCreateIO",
"xmlRegisterInputCallbacks", "xmlReaderForIO",
"xmlOutputBufferCreateIO", "xmlRegisterOutputCallbacks",
"xmlSaveToIO", "xmlIOHTTPOpenW",
# library state cleanup, generate false leak informations and other
# troubles, heavillyb tested otherwise.
"xmlCleanupParser", "xmlRelaxNGCleanupTypes", "xmlSetListDoc",
"xmlSetTreeDoc", "xmlUnlinkNode",
# hard to avoid leaks in the tests
"xmlStrcat", "xmlStrncat", "xmlCatalogAddLocal", "xmlNewTextWriterDoc",
"xmlXPathNewValueTree", "xmlXPathWrapString",
# unimplemented
"xmlTextReaderReadInnerXml", "xmlTextReaderReadOuterXml",
"xmlTextReaderReadString",
# destructor
"xmlListDelete", "xmlOutputBufferClose", "xmlNanoFTPClose", "xmlNanoHTTPClose",
# deprecated
"xmlCatalogGetPublic", "xmlCatalogGetSystem", "xmlEncodeEntities",
"xmlNewGlobalNs", "xmlHandleEntity", "xmlNamespaceParseNCName",
"xmlNamespaceParseNSDef", "xmlNamespaceParseQName",
"xmlParseNamespace", "xmlParseQuotedString", "xmlParserHandleReference",
"xmlScanName",
"xmlDecodeEntities",
# allocators
"xmlMemFree",
# verbosity
"xmlCatalogSetDebug", "xmlShellPrintXPathError", "xmlShellPrintNode",
# Internal functions, no user space should really call them
"xmlParseAttribute", "xmlParseAttributeListDecl", "xmlParseName",
"xmlParseNmtoken", "xmlParseEntityValue", "xmlParseAttValue",
"xmlParseSystemLiteral", "xmlParsePubidLiteral", "xmlParseCharData",
"xmlParseExternalID", "xmlParseComment", "xmlParsePITarget", "xmlParsePI",
"xmlParseNotationDecl", "xmlParseEntityDecl", "xmlParseDefaultDecl",
"xmlParseNotationType", "xmlParseEnumerationType", "xmlParseEnumeratedType",
"xmlParseAttributeType", "xmlParseAttributeListDecl",
"xmlParseElementMixedContentDecl", "xmlParseElementChildrenContentDecl",
"xmlParseElementContentDecl", "xmlParseElementDecl", "xmlParseMarkupDecl",
"xmlParseCharRef", "xmlParseEntityRef", "xmlParseReference",
"xmlParsePEReference", "xmlParseDocTypeDecl", "xmlParseAttribute",
"xmlParseStartTag", "xmlParseEndTag", "xmlParseCDSect", "xmlParseContent",
"xmlParseElement", "xmlParseVersionNum", "xmlParseVersionInfo",
"xmlParseEncName", "xmlParseEncodingDecl", "xmlParseSDDecl",
"xmlParseXMLDecl", "xmlParseTextDecl", "xmlParseMisc",
"xmlParseExternalSubset", "xmlParserHandlePEReference",
"xmlSkipBlankChars",
]
#
# These functions have side effects on the global state
# and hence generate errors on memory allocation tests
#
skipped_memcheck = [ "xmlLoadCatalog", "xmlAddEncodingAlias",
"xmlSchemaInitTypes", "xmlNanoFTPProxy", "xmlNanoFTPScanProxy",
"xmlNanoHTTPScanProxy", "xmlResetLastError", "xmlCatalogConvert",
"xmlCatalogRemove", "xmlLoadCatalogs", "xmlCleanupCharEncodingHandlers",
"xmlInitCharEncodingHandlers", "xmlCatalogCleanup",
"xmlSchemaGetBuiltInType",
"htmlParseFile", "htmlCtxtReadFile", # loads the catalogs
"xmlTextReaderSchemaValidate", "xmlSchemaCleanupTypes", # initialize the schemas type system
"xmlCatalogResolve", "xmlIOParseDTD" # loads the catalogs
]
#
# Extra code needed for some test cases
#
extra_pre_call = {
"xmlSAXUserParseFile": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlSAXUserParseMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemoryRecover": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParserInputBufferCreateFd":
"if (fd >= 0) fd = -1;",
}
extra_post_call = {
"xmlAddChild":
"if (ret_val == NULL) { xmlFreeNode(cur) ; cur = NULL ; }",
"xmlAddEntity":
"if (ret_val != NULL) { xmlFreeNode(ret_val) ; ret_val = NULL; }",
"xmlAddChildList":
"if (ret_val == NULL) { xmlFreeNodeList(cur) ; cur = NULL ; }",
"xmlAddSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddNextSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddPrevSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlDocSetRootElement":
"if (doc == NULL) { xmlFreeNode(root) ; root = NULL ; }",
"xmlReplaceNode":
"""if (cur != NULL) {
xmlUnlinkNode(cur);
xmlFreeNode(cur) ; cur = NULL ; }
if (old != NULL) {
xmlUnlinkNode(old);
xmlFreeNode(old) ; old = NULL ; }
ret_val = NULL;""",
"xmlTextMerge":
"""if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
xmlUnlinkNode(second);
xmlFreeNode(second) ; second = NULL ; }""",
"xmlBuildQName":
"""if ((ret_val != NULL) && (ret_val != ncname) &&
(ret_val != prefix) && (ret_val != memory))
xmlFree(ret_val);
ret_val = NULL;""",
"xmlNewDocElementContent":
"""xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
"xmlDictReference": "xmlDictFree(dict);",
# Functions which deallocates one of their parameters
"xmlXPathConvertBoolean": """val = NULL;""",
"xmlXPathConvertNumber": """val = NULL;""",
"xmlXPathConvertString": """val = NULL;""",
"xmlSaveFileTo": """buf = NULL;""",
"xmlSaveFormatFileTo": """buf = NULL;""",
"xmlIOParseDTD": "input = NULL;",
"xmlRemoveProp": "cur = NULL;",
"xmlNewNs": "if ((node == NULL) && (ret_val != NULL)) xmlFreeNs(ret_val);",
"xmlCopyNamespace": "if (ret_val != NULL) xmlFreeNs(ret_val);",
"xmlCopyNamespaceList": "if (ret_val != NULL) xmlFreeNsList(ret_val);",
"xmlNewTextWriter": "if (ret_val != NULL) out = NULL;",
"xmlNewTextWriterPushParser": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;} if (ret_val != NULL) ctxt = NULL;",
"xmlNewIOInputStream": "if (ret_val != NULL) input = NULL;",
"htmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"htmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseExtParsedEnt": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlDOMWrapAdoptNode": "if ((node != NULL) && (node->parent == NULL)) {xmlUnlinkNode(node);xmlFreeNode(node);node = NULL;}",
"xmlBufferSetAllocationScheme": "if ((buf != NULL) && (scheme == XML_BUFFER_ALLOC_IMMUTABLE) && (buf->content != NULL) && (buf->content != static_buf_content)) { xmlFree(buf->content); buf->content = NULL;}"
}
modules = []
def is_skipped_module(name):
for mod in skipped_modules:
if mod == name:
return 1
return 0
def is_skipped_function(name):
for fun in skipped_functions:
if fun == name:
return 1
# Do not test destructors
if string.find(name, 'Free') != -1:
return 1
return 0
def is_skipped_memcheck(name):
for fun in skipped_memcheck:
if fun == name:
return 1
return 0
missing_types = {}
def add_missing_type(name, func):
try:
list = missing_types[name]
list.append(func)
except:
missing_types[name] = [func]
generated_param_types = []
def add_generated_param_type(name):
generated_param_types.append(name)
generated_return_types = []
def add_generated_return_type(name):
generated_return_types.append(name)
missing_functions = {}
missing_functions_nr = 0
def add_missing_functions(name, module):
global missing_functions_nr
missing_functions_nr = missing_functions_nr + 1
try:
list = missing_functions[module]
list.append(name)
except:
missing_functions[module] = [name]
#
# Provide the type generators and destructors for the parameters
#
def type_convert(str, name, info, module, function, pos):
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
res = string.replace(str, " *", "_ptr")
# res = string.replace(str, "*", "_ptr")
res = string.replace(res, " ", "_")
if res == 'const_char_ptr':
if string.find(name, "file") != -1 or \
string.find(name, "uri") != -1 or \
string.find(name, "URI") != -1 or \
string.find(info, "filename") != -1 or \
string.find(info, "URI") != -1 or \
string.find(info, "URL") != -1:
if string.find(function, "Save") != -1 or \
string.find(function, "Create") != -1 or \
string.find(function, "Write") != -1 or \
string.find(function, "Fetch") != -1:
return('fileoutput')
return('filepath')
if res == 'void_ptr':
if module == 'nanoftp' and name == 'ctx':
return('xmlNanoFTPCtxtPtr')
if function == 'xmlNanoFTPNewCtxt' or \
function == 'xmlNanoFTPConnectTo' or \
function == 'xmlNanoFTPOpen':
return('xmlNanoFTPCtxtPtr')
if module == 'nanohttp' and name == 'ctx':
return('xmlNanoHTTPCtxtPtr')
if function == 'xmlNanoHTTPMethod' or \
function == 'xmlNanoHTTPMethodRedir' or \
function == 'xmlNanoHTTPOpen' or \
function == 'xmlNanoHTTPOpenRedir':
return('xmlNanoHTTPCtxtPtr');
if function == 'xmlIOHTTPOpen':
return('xmlNanoHTTPCtxtPtr')
if string.find(name, "data") != -1:
return('userdata')
if string.find(name, "user") != -1:
return('userdata')
if res == 'xmlDoc_ptr':
res = 'xmlDocPtr'
if res == 'xmlNode_ptr':
res = 'xmlNodePtr'
if res == 'xmlDict_ptr':
res = 'xmlDictPtr'
if res == 'xmlNodePtr' and pos != 0:
if (function == 'xmlAddChild' and pos == 2) or \
(function == 'xmlAddChildList' and pos == 2) or \
(function == 'xmlAddNextSibling' and pos == 2) or \
(function == 'xmlAddSibling' and pos == 2) or \
(function == 'xmlDocSetRootElement' and pos == 2) or \
(function == 'xmlReplaceNode' and pos == 2) or \
(function == 'xmlTextMerge') or \
(function == 'xmlAddPrevSibling' and pos == 2):
return('xmlNodePtr_in');
if res == 'const xmlBufferPtr':
res = 'xmlBufferPtr'
if res == 'xmlChar_ptr' and name == 'name' and \
string.find(function, "EatName") != -1:
return('eaten_name')
if res == 'void_ptr*':
res = 'void_ptr_ptr'
if res == 'char_ptr*':
res = 'char_ptr_ptr'
if res == 'xmlChar_ptr*':
res = 'xmlChar_ptr_ptr'
if res == 'const_xmlChar_ptr*':
res = 'const_xmlChar_ptr_ptr'
if res == 'const_char_ptr*':
res = 'const_char_ptr_ptr'
if res == 'FILE_ptr' and module == 'debugXML':
res = 'debug_FILE_ptr';
if res == 'int' and name == 'options':
if module == 'parser' or module == 'xmlreader':
res = 'parseroptions'
return res
known_param_types = []
def is_known_param_type(name, rtype):
global test
for type in known_param_types:
if type == name:
return 1
for type in generated_param_types:
if type == name:
return 1
if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
if rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
define = 0
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""
#define gen_nb_%s 1
static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
return(NULL);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, crtype, name, name, rtype))
if define == 1:
test.write("#endif\n\n")
add_generated_param_type(name)
return 1
return 0
#
# Provide the type destructors for the return values
#
known_return_types = []
def is_known_return_type(name):
for type in known_return_types:
if type == name:
return 1
return 0
#
# Copy the beginning of the C test program result
#
try:
input = open("testapi.c", "r")
except:
input = open(srcPref + "testapi.c", "r")
test = open('testapi.c.new', 'w')
def compare_and_save():
global test
test.close()
try:
input = open("testapi.c", "r").read()
except:
input = ''
test = open('testapi.c.new', "r").read()
if input != test:
try:
os.system("rm testapi.c; mv testapi.c.new testapi.c")
except:
os.system("mv testapi.c.new testapi.c")
print("Updated testapi.c")
else:
print("Generated testapi.c is identical")
line = input.readline()
while line != "":
if line == "/* CUT HERE: everything below that line is generated */\n":
break;
if line[0:15] == "#define gen_nb_":
type = string.split(line[15:])[0]
known_param_types.append(type)
if line[0:19] == "static void desret_":
type = string.split(line[19:], '(')[0]
known_return_types.append(type)
test.write(line)
line = input.readline()
input.close()
if line == "":
print "Could not find the CUT marker in testapi.c skipping generation"
test.close()
sys.exit(0)
print("Scanned testapi.c: found %d parameters types and %d return types\n" % (
len(known_param_types), len(known_return_types)))
test.write("/* CUT HERE: everything below that line is generated */\n")
#
# Open the input API description
#
doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
if doc == None:
print "Failed to load doc/libxml2-api.xml"
sys.exit(1)
ctxt = doc.xpathNewContext()
#
# Generate a list of all function parameters and select only
# those used in the api tests
#
argtypes = {}
args = ctxt.xpathEval("/api/symbols/function/arg")
for arg in args:
mod = arg.xpathEval('string(../@file)')
func = arg.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = arg.xpathEval('string(@type)')
if not argtypes.has_key(type):
argtypes[type] = func
# similarly for return types
rettypes = {}
rets = ctxt.xpathEval("/api/symbols/function/return")
for ret in rets:
mod = ret.xpathEval('string(../@file)')
func = ret.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = ret.xpathEval('string(@type)')
if not rettypes.has_key(type):
rettypes[type] = func
#
# Generate constructors and return type handling for all enums
# which are used as function parameters
#
enums = ctxt.xpathEval("/api/symbols/typedef[@type='enum']")
for enum in enums:
module = enum.xpathEval('string(@file)')
name = enum.xpathEval('string(@name)')
#
# Skip any enums which are not in our filtered lists
#
if (name == None) or ((name not in argtypes) and (name not in rettypes)):
continue;
define = 0
if argtypes.has_key(name) and is_known_param_type(name, name) == 0:
values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
i = 0
vals = []
for value in values:
vname = value.xpathEval('string(@name)')
if vname == None:
continue;
i = i + 1
if i >= 5:
break;
vals.append(vname)
if vals == []:
print "Didn't find any value for enum %s" % (name)
continue
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
(name, name))
i = 1
for value in vals:
test.write(" if (no == %d) return(%s);\n" % (i, value))
i = i + 1
test.write(""" return(0);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, name));
known_param_types.append(name)
if (is_known_return_type(name) == 0) and (name in rettypes):
if define == 0 and modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
}
""" % (name, name))
known_return_types.append(name)
if define == 1:
test.write("#endif\n\n")
#
# Load the interfaces
#
headers = ctxt.xpathEval("/api/files/file")
for file in headers:
name = file.xpathEval('string(@name)')
if (name == None) or (name == ''):
continue
#
# Some module may be skipped because they don't really consists
# of user callable APIs
#
if is_skipped_module(name):
continue
#
# do not test deprecated APIs
#
desc = file.xpathEval('string(description)')
if string.find(desc, 'DEPRECATED') != -1:
print "Skipping deprecated interface %s" % name
continue;
test.write("#include <libxml/%s.h>\n" % name)
modules.append(name)
#
# Generate the callers signatures
#
for module in modules:
test.write("static int test_%s(void);\n" % module);
#
# Generate the top caller
#
test.write("""
/**
* testlibxml2:
*
* Main entry point of the tester for the full libxml2 module,
* it calls all the tester entry point for each module.
*
* Returns the number of error found
*/
static int
testlibxml2(void)
{
int test_ret = 0;
""")
for module in modules:
test.write(" test_ret += test_%s();\n" % module)
test.write("""
printf("Total: %d functions, %d tests, %d errors\\n",
function_tests, call_tests, test_ret);
return(test_ret);
}
""")
#
# How to handle a function
#
nb_tests = 0
def generate_test(module, node):
global test
global nb_tests
nb_cond = 0
no_gen = 0
name = node.xpathEval('string(@name)')
if is_skipped_function(name):
return
#
# check we know how to handle the args and return values
# and store the informations for the generation
#
try:
args = node.xpathEval("arg")
except:
args = []
t_args = []
n = 0
for arg in args:
n = n + 1
rtype = arg.xpathEval("string(@type)")
if rtype == 'void':
break;
info = arg.xpathEval("string(@info)")
nam = arg.xpathEval("string(@name)")
type = type_convert(rtype, nam, info, module, name, n)
if is_known_param_type(type, rtype) == 0:
add_missing_type(type, name);
no_gen = 1
if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
t_args.append((nam, type, rtype, crtype, info))
try:
rets = node.xpathEval("return")
except:
rets = []
t_ret = None
for ret in rets:
rtype = ret.xpathEval("string(@type)")
info = ret.xpathEval("string(@info)")
type = type_convert(rtype, 'return', info, module, name, 0)
if rtype == 'void':
break
if is_known_return_type(type) == 0:
add_missing_type(type, name);
no_gen = 1
t_ret = (type, rtype, info)
break
test.write("""
static int
test_%s(void) {
int test_ret = 0;
""" % (name))
if no_gen == 1:
add_missing_functions(name, module)
test.write("""
/* missing type support */
return(test_ret);
}
""")
return
try:
conds = node.xpathEval("cond")
for cond in conds:
test.write("#if %s\n" % (cond.get_content()))
nb_cond = nb_cond + 1
except:
pass
define = 0
if function_defines.has_key(name):
test.write("#ifdef %s\n" % (function_defines[name]))
define = 1
# Declare the memory usage counter
no_mem = is_skipped_memcheck(name)
if no_mem == 0:
test.write(" int mem_base;\n");
# Declare the return value
if t_ret != None:
test.write(" %s ret_val;\n" % (t_ret[1]))
# Declare the arguments
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# add declaration
test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
test.write(" int n_%s;\n" % (nam))
test.write("\n")
# Cascade loop on of each argument list of values
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
nam, nam, type, nam))
# log the memory usage
if no_mem == 0:
test.write(" mem_base = xmlMemBlocks();\n");
# prepare the call
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
i = i + 1;
# do the call, and clanup the result
if extra_pre_call.has_key(name):
test.write(" %s\n"% (extra_pre_call[name]))
if t_ret != None:
test.write("\n ret_val = %s(" % (name))
need = 0
for arg in t_args:
(nam, type, rtype, crtype, info) = arg
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam);
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" desret_%s(ret_val);\n" % t_ret[0])
else:
test.write("\n %s(" % (name));
need = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam)
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" call_tests++;\n");
# Free the arguments
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# This is a hack to prevent generating a destructor for the
# 'input' argument in xmlTextReaderSetup. There should be
# a better, more generic way to do this!
if string.find(info, 'destroy') == -1:
test.write(" des_%s(n_%s, " % (type, nam))
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s, %d);\n" % (nam, i))
i = i + 1;
test.write(" xmlResetLastError();\n");
# Check the memory usage
if no_mem == 0:
test.write(""" if (mem_base != xmlMemBlocks()) {
printf("Leak of %%d blocks found in %s",
xmlMemBlocks() - mem_base);
test_ret++;
""" % (name));
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
test.write(""" printf(" %%d", n_%s);\n""" % (nam))
test.write(""" printf("\\n");\n""")
test.write(" }\n")
for arg in t_args:
test.write(" }\n")
test.write(" function_tests++;\n")
#
# end of conditional
#
while nb_cond > 0:
test.write("#endif\n")
nb_cond = nb_cond -1
if define == 1:
test.write("#endif\n")
nb_tests = nb_tests + 1;
test.write("""
return(test_ret);
}
""")
#
# Generate all module callers
#
for module in modules:
# gather all the functions exported by that module
try:
functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
except:
print "Failed to gather functions from module %s" % (module)
continue;
# iterate over all functions in the module generating the test
i = 0
nb_tests_old = nb_tests
for function in functions:
i = i + 1
generate_test(module, function);
# header
test.write("""static int
test_%s(void) {
int test_ret = 0;
if (quiet == 0) printf("Testing %s : %d of %d functions ...\\n");
""" % (module, module, nb_tests - nb_tests_old, i))
# iterate over all functions in the module generating the call
for function in functions:
name = function.xpathEval('string(@name)')
if is_skipped_function(name):
continue
test.write(" test_ret += test_%s();\n" % (name))
# footer
test.write("""
if (test_ret != 0)
printf("Module %s: %%d errors\\n", test_ret);
return(test_ret);
}
""" % (module))
#
# Generate direct module caller
#
test.write("""static int
test_module(const char *module) {
""");
for module in modules:
test.write(""" if (!strcmp(module, "%s")) return(test_%s());\n""" % (
module, module))
test.write(""" return(0);
}
""");
print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
compare_and_save()
missing_list = []
for missing in missing_types.keys():
if missing == 'va_list' or missing == '...':
continue;
n = len(missing_types[missing])
missing_list.append((n, missing))
def compare_missing(a, b):
return b[0] - a[0]
missing_list.sort(compare_missing)
print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
lst = open("missing.lst", "w")
lst.write("Missing support for %d types" % (len(missing_list)))
lst.write("\n")
for miss in missing_list:
lst.write("%s: %d :" % (miss[1], miss[0]))
i = 0
for n in missing_types[miss[1]]:
i = i + 1
if i > 5:
lst.write(" ...")
break
lst.write(" %s" % (n))
lst.write("\n")
lst.write("\n")
lst.write("\n")
lst.write("Missing support per module");
for module in missing_functions.keys():
lst.write("module %s:\n %s\n" % (module, missing_functions[module]))
lst.close()
|
shusenl/scikit-learn | refs/heads/master | benchmarks/bench_mnist.py | 76 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
|
MycChiu/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/dirichlet_test.py | 35 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import dirichlet as dirichlet_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DirichletTest(test.TestCase):
def testSimpleShapes(self):
with self.test_session():
alpha = np.random.rand(3)
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
def testComplexShapes(self):
with self.test_session():
alpha = np.random.rand(3, 2, 2)
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
def testConcentrationProperty(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = dirichlet_lib.Dirichlet(alpha)
self.assertEqual([1, 3], dist.concentration.get_shape())
self.assertAllClose(alpha, dist.concentration.eval())
def testPdfXProper(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = dirichlet_lib.Dirichlet(alpha, validate_args=True)
dist.prob([.1, .3, .6]).eval()
dist.prob([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("samples must be positive"):
dist.prob([-1., 1.5, 0.5]).eval()
with self.assertRaisesOpError("samples must be positive"):
dist.prob([0., .1, .9]).eval()
with self.assertRaisesOpError(
"sample last-dimension must sum to `1`"):
dist.prob([.1, .2, .8]).eval()
def testPdfZeroBatches(self):
with self.test_session():
alpha = [1., 2]
x = [.5, .5]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose(1., pdf.eval())
self.assertEqual((), pdf.get_shape())
def testPdfZeroBatchesNontrivialX(self):
with self.test_session():
alpha = [1., 2]
x = [.3, .7]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose(7. / 5, pdf.eval())
self.assertEqual((), pdf.get_shape())
def testPdfUniformZeroBatches(self):
with self.test_session():
# Corresponds to a uniform distribution
alpha = [1., 1, 1]
x = [[.2, .5, .3], [.3, .4, .3]]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose([2., 2.], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
with self.test_session():
alpha = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = dirichlet_lib.Dirichlet(alpha)
pdf = dist.prob(x)
self.assertAllClose([1., 7. / 5], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
alpha = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 8. / 5], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.test_session():
alpha = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 3. / 2], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
alpha = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = dirichlet_lib.Dirichlet(alpha).prob(x)
self.assertAllClose([1., 3. / 2], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testMean(self):
with self.test_session():
alpha = [1., 2, 3]
expected_mean = stats.dirichlet.mean(alpha)
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.mean().get_shape(), [3])
self.assertAllClose(dirichlet.mean().eval(), expected_mean)
def testCovarianceFromSampling(self):
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
with self.test_session() as sess:
dist = dirichlet_lib.Dirichlet(alpha) # batch_shape=[2], event_shape=[3]
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.04)
self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.06)
self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.03)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.02)
def testVariance(self):
with self.test_session():
alpha = [1., 2, 3]
denominator = np.sum(alpha)**2 * (np.sum(alpha) + 1)
expected_covariance = np.diag(stats.dirichlet.var(alpha))
expected_covariance += [[0., -2, -3], [-2, 0, -6],
[-3, -6, 0]] / denominator
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.covariance().get_shape(), (3, 3))
self.assertAllClose(dirichlet.covariance().eval(), expected_covariance)
def testMode(self):
with self.test_session():
alpha = np.array([1.1, 2, 3])
expected_mode = (alpha - 1) / (np.sum(alpha) - 3)
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.mode().get_shape(), [3])
self.assertAllClose(dirichlet.mode().eval(), expected_mode)
def testModeInvalid(self):
with self.test_session():
alpha = np.array([1., 2, 3])
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha,
allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dirichlet.mode().eval()
def testModeEnableAllowNanStats(self):
with self.test_session():
alpha = np.array([1., 2, 3])
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha,
allow_nan_stats=True)
expected_mode = np.zeros_like(alpha) + np.nan
self.assertEqual(dirichlet.mode().get_shape(), [3])
self.assertAllClose(dirichlet.mode().eval(), expected_mode)
def testEntropy(self):
with self.test_session():
alpha = [1., 2, 3]
expected_entropy = stats.dirichlet.entropy(alpha)
dirichlet = dirichlet_lib.Dirichlet(concentration=alpha)
self.assertEqual(dirichlet.entropy().get_shape(), ())
self.assertAllClose(dirichlet.entropy().eval(), expected_entropy)
def testSample(self):
with self.test_session():
alpha = [1., 2]
dirichlet = dirichlet_lib.Dirichlet(alpha)
n = constant_op.constant(100000)
samples = dirichlet.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 2))
self.assertTrue(np.all(sample_values > 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values[:, 0],
stats.beta(
a=1., b=2.).cdf)[0],
0.01)
if __name__ == "__main__":
test.main()
|
soldag/home-assistant | refs/heads/dev | tests/components/moon/test_sensor.py | 7 | """The test for the moon sensor platform."""
from datetime import datetime
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
DAY1 = datetime(2017, 1, 1, 1, tzinfo=dt_util.UTC)
DAY2 = datetime(2017, 1, 18, 1, tzinfo=dt_util.UTC)
async def test_moon_day1(hass):
"""Test the Moon sensor."""
config = {"sensor": {"platform": "moon", "name": "moon_day1"}}
await async_setup_component(hass, HA_DOMAIN, {})
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert hass.states.get("sensor.moon_day1")
with patch(
"homeassistant.components.moon.sensor.dt_util.utcnow", return_value=DAY1
):
await async_update_entity(hass, "sensor.moon_day1")
assert hass.states.get("sensor.moon_day1").state == "waxing_crescent"
async def test_moon_day2(hass):
"""Test the Moon sensor."""
config = {"sensor": {"platform": "moon", "name": "moon_day2"}}
await async_setup_component(hass, HA_DOMAIN, {})
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert hass.states.get("sensor.moon_day2")
with patch(
"homeassistant.components.moon.sensor.dt_util.utcnow", return_value=DAY2
):
await async_update_entity(hass, "sensor.moon_day2")
assert hass.states.get("sensor.moon_day2").state == "waning_gibbous"
async def async_update_entity(hass, entity_id):
"""Run an update action for an entity."""
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
|
spiffytech/offlineimap | refs/heads/master | offlineimap/mbnames.py | 17 | # Mailbox name generator
# Copyright (C) 2002 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os.path
import re # for folderfilter
from threading import Lock
boxes = {}
config = None
accounts = None
mblock = Lock()
def init(conf, accts):
global config, accounts
config = conf
accounts = accts
def add(accountname, foldername):
if not accountname in boxes:
boxes[accountname] = []
if not foldername in boxes[accountname]:
boxes[accountname].append(foldername)
def write():
# See if we're ready to write it out.
for account in accounts:
if account not in boxes:
return
genmbnames()
def genmbnames():
"""Takes a configparser object and a boxlist, which is a list of hashes
containing 'accountname' and 'foldername' keys."""
mblock.acquire()
try:
localeval = config.getlocaleval()
if not config.getdefaultboolean("mbnames", "enabled", 0):
return
file = open(os.path.expanduser(config.get("mbnames", "filename")), "wt")
file.write(localeval.eval(config.get("mbnames", "header")))
folderfilter = lambda accountname, foldername: 1
if config.has_option("mbnames", "folderfilter"):
folderfilter = localeval.eval(config.get("mbnames", "folderfilter"),
{'re': re})
itemlist = []
for accountname in boxes.keys():
for foldername in boxes[accountname]:
if folderfilter(accountname, foldername):
itemlist.append(config.get("mbnames", "peritem", raw=1) % \
{'accountname': accountname,
'foldername': foldername})
file.write(localeval.eval(config.get("mbnames", "sep")).join(itemlist))
file.write(localeval.eval(config.get("mbnames", "footer")))
file.close()
finally:
mblock.release()
|
mitchellzen/pops | refs/heads/master | satchmo/apps/satchmo_utils/widgets.py | 7 | from django import forms
from django.utils.safestring import mark_safe
from l10n.l10n_settings import get_l10n_default_currency_symbol
from livesettings import config_value
from satchmo_utils.numbers import round_decimal
import logging
from django.utils.html import escape
from decimal import Decimal
log = logging.getLogger('satchmo_utils.widgets')
def _render_decimal(value, places=2, min_places=2):
# Check to make sure this is a Decimal before we try to round
# and format. If it's not, just pass it on.
# The admin validation will handle making sure only valid values get
# saved.
bad_decimal = False
try:
Decimal(value)
except:
bad_decimal = True
if value is not None and not bad_decimal:
roundfactor = "0." + "0"*(places-1) + "1"
if value < 0:
roundfactor = "-" + roundfactor
value = round_decimal(val=value, places=places, roundfactor=roundfactor, normalize=True)
log.debug('value: %s' % type(value))
parts = ("%f" % value).split('.')
n = parts[0]
d = ""
if len(parts) > 0:
d = parts[1]
elif min_places:
d = "0" * min_places
while len(d) < min_places:
d = "%s0" % d
while len(d) > min_places and d[-1] == '0':
d = d[:-1]
if len(d) > 0:
value = "%s.%s" % (n, d)
else:
value = n
return value
class BaseCurrencyWidget(forms.TextInput):
"""
A Text Input widget that shows the currency amount
"""
def __init__(self, attrs={}):
final_attrs = {'class': 'vCurrencyField'}
if attrs is not None:
final_attrs.update(attrs)
super(BaseCurrencyWidget, self).__init__(attrs=final_attrs)
class CurrencyWidget(BaseCurrencyWidget):
def render(self, name, value, attrs=None):
if value != '':
value = _render_decimal(value, places=8)
rendered = super(CurrencyWidget, self).render(name, value, attrs)
curr = get_l10n_default_currency_symbol()
curr = curr.replace("_", " ")
return mark_safe('<span class="currency">%s</span>%s' % (curr, rendered))
class TruncatedCurrencyWidget(BaseCurrencyWidget):
"""
A Text Input widget that shows the currency amount - stripped to two digits by default.
"""
def render(self, name, value, attrs=None):
value = _render_decimal(value, places=2)
rendered = super(TruncatedCurrencyWidget, self).render(name, value, attrs)
curr = get_l10n_default_currency_symbol()
curr = curr.replace("_", " ")
return mark_safe('<span class="currency">%s</span>%s' % (curr, rendered))
class StrippedDecimalWidget(forms.TextInput):
"""
A textinput widget that strips out the trailing zeroes.
"""
def __init__(self, attrs={}):
final_attrs = {'class': 'vDecimalField'}
if attrs is not None:
final_attrs.update(attrs)
super(StrippedDecimalWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
value = _render_decimal(value, places=8, min_places=0)
return super(StrippedDecimalWidget, self).render(name, value, attrs)
|
ramcn/demo3 | refs/heads/master | venv/lib/python3.4/site-packages/setuptools/tests/test_easy_install.py | 44 | # -*- coding: utf-8 -*-
"""Easy install Tests
"""
from __future__ import absolute_import
import sys
import os
import shutil
import tempfile
import site
import contextlib
import tarfile
import logging
import itertools
import distutils.errors
import pytest
try:
from unittest import mock
except ImportError:
import mock
from setuptools import sandbox
from setuptools import compat
from setuptools.compat import StringIO, BytesIO, urlparse
from setuptools.sandbox import run_setup
import setuptools.command.easy_install as ei
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import working_set
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
import pkg_resources
from .py26compat import tarfile_open
from . import contexts
from .textwrap import DALS
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
SETUP_PY = DALS("""
from setuptools import setup
setup(name='foo')
""")
class TestEasyInstallTest:
def test_install_site_py(self):
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
assert os.path.exists(sitepy)
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
header = ei.CommandSpec.best().from_environment().as_header()
expected = header + DALS("""
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""")
dist = FakeDist()
args = next(ei.ScriptWriter.get_args(dist))
name, script = itertools.islice(args, 2)
assert script == expected
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
assert cmd.package_index.scanned_urls == {}
# let's try without it (default behavior)
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
assert keys == ['link1', 'link2']
def test_write_exception(self):
"""
Test that `cant_write_to_target` is rendered as a DistutilsError.
"""
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.install_dir = os.getcwd()
with pytest.raises(distutils.errors.DistutilsError):
cmd.cant_write_to_target()
class TestPTHFileWriter:
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
assert not pth.dirty
pth.add(PRDistribution(os.getcwd()))
assert pth.dirty
def test_add_from_site_is_ignored(self):
location = '/test/location/does-not-have-to-exist'
# PthDistributions expects all locations to be normalized
location = pkg_resources.normalize_path(location)
pth = PthDistributions('does-not_exist', [location, ])
assert not pth.dirty
pth.add(PRDistribution(location))
assert not pth.dirty
@pytest.yield_fixture
def setup_context(tmpdir):
with (tmpdir/'setup.py').open('w') as f:
f.write(SETUP_PY)
with tmpdir.as_cwd():
yield tmpdir
@pytest.mark.usefixtures("user_override")
@pytest.mark.usefixtures("setup_context")
class TestUserInstallTest:
# prevent check that site-packages is writable. easy_install
# shouldn't be writing to system site-packages during finalize
# options, but while it does, bypass the behavior.
prev_sp_write = mock.patch(
'setuptools.command.easy_install.easy_install.check_site_dir',
mock.Mock(),
)
# simulate setuptools installed in user site packages
@mock.patch('setuptools.command.easy_install.__file__', site.USER_SITE)
@mock.patch('site.ENABLE_USER_SITE', True)
@prev_sp_write
def test_user_install_not_implied_user_site_enabled(self):
self.assert_not_user_site()
@mock.patch('site.ENABLE_USER_SITE', False)
@prev_sp_write
def test_user_install_not_implied_user_site_disabled(self):
self.assert_not_user_site()
@staticmethod
def assert_not_user_site():
# create a finalized easy_install command
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
assert not cmd.user, 'user should not be implied'
def test_multiproc_atexit(self):
pytest.importorskip('multiprocessing')
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
@pytest.fixture()
def foo_package(self, tmpdir):
egg_file = tmpdir / 'foo-1.0.egg-info'
with egg_file.open('w') as f:
f.write('Name: foo\n')
return str(tmpdir)
@pytest.yield_fixture()
def install_target(self, tmpdir):
target = str(tmpdir)
with mock.patch('sys.path', sys.path + [target]):
python_path = os.path.pathsep.join(sys.path)
with mock.patch.dict(os.environ, PYTHONPATH=python_path):
yield target
def test_local_index(self, foo_package, install_target):
"""
The local index must be used when easy_install locates installed
packages.
"""
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.install_dir = install_target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([foo_package])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(foo_package))
assert actual == expected
@contextlib.contextmanager
def user_install_setup_context(self, *args, **kwargs):
"""
Wrap sandbox.setup_context to patch easy_install in that context to
appear as user-installed.
"""
with self.orig_context(*args, **kwargs):
import setuptools.command.easy_install as ei
ei.__file__ = site.USER_SITE
yield
def patched_setup_context(self):
self.orig_context = sandbox.setup_context
return mock.patch(
'setuptools.sandbox.setup_context',
self.user_install_setup_context,
)
@pytest.yield_fixture
def distutils_package():
distutils_setup_py = SETUP_PY.replace(
'from setuptools import setup',
'from distutils.core import setup',
)
with contexts.tempdir(cd=os.chdir):
with open('setup.py', 'w') as f:
f.write(distutils_setup_py)
yield
class TestDistutilsPackage:
def test_bdist_egg_available_on_distutils_pkg(self, distutils_package):
run_setup('setup.py', ['bdist_egg'])
class TestSetupRequires:
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
with contexts.quiet():
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with contexts.tempdir() as temp_install_dir:
with contexts.environment(PYTHONPATH=temp_install_dir):
ei_params = [
'--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts',
'--install-dir', temp_install_dir,
dist_file,
]
with sandbox.save_argv(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
with pytest.raises(SystemExit):
easy_install_pkg.main(ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
assert 2 <= len(p_index.requests) <= 3
assert p_index.requests[0].path == '/does-not-exist/'
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with contexts.tempdir() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
script = DALS("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""")
make_trivial_sdist(dist_path, script)
yield dist_path
def test_setup_requires_overrides_version_conflict(self):
"""
Regression test for issue #323.
Ensures that a distribution's setup_requires requirements can still be
installed and used locally even if a conflicting version of that
requirement is already on the path.
"""
pr_state = pkg_resources.__getstate__()
fake_dist = PRDistribution('does-not-matter', project_name='foobar',
version='0.0')
working_set.add(fake_dist)
try:
with contexts.tempdir() as temp_dir:
test_pkg = create_setup_requires_package(temp_dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
with contexts.quiet() as (stdout, stderr):
# Don't even need to install the package, just
# running the setup.py at all is sufficient
run_setup(test_setup_py, ['--name'])
lines = stdout.readlines()
assert len(lines) > 0
assert lines[-1].strip(), 'test_pkg'
finally:
pkg_resources.__setstate__(pr_state)
def create_setup_requires_package(path):
"""Creates a source tree under path for a trivial test package that has a
single requirement in setup_requires--a tarball for that requirement is
also created and added to the dependency_links argument.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar==0.1'],
'dependency_links': [os.path.abspath(path)]
}
test_pkg = os.path.join(path, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
with open(test_setup_py, 'w') as f:
f.write(DALS("""
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
foobar_path = os.path.join(path, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
DALS("""
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
return test_pkg
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
with tarfile_open(dist_path, 'w:gz') as dist:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
class TestScriptHeader:
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
@pytest.mark.skipif(
sys.platform.startswith('java') and ei.is_sh(sys.executable),
reason="Test cannot run under java when executable is sh"
)
def test_get_script_header(self):
expected = '#!%s\n' % ei.nt_quote_arg(os.path.normpath(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python')
assert actual == expected
expected = '#!%s -x\n' % ei.nt_quote_arg(os.path.normpath
(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x')
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
expected = '#!%s -x\n' % self.non_ascii_exe
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable='"'+self.exe_with_spaces+'"')
expected = '#!"%s"\n' % self.exe_with_spaces
assert actual == expected
@pytest.mark.xfail(
compat.PY3 and os.environ.get("LC_CTYPE") in ("C", "POSIX"),
reason="Test fails in this locale on Python 3"
)
@mock.patch.dict(sys.modules, java=mock.Mock(lang=mock.Mock(System=
mock.Mock(getProperty=mock.Mock(return_value="")))))
@mock.patch('sys.platform', 'java1.5.0_13')
def test_get_script_header_jython_workaround(self, tmpdir):
# Create a mock sys.executable that uses a shebang line
header = DALS("""
#!/usr/bin/python
# -*- coding: utf-8 -*-
""")
exe = tmpdir / 'exe.py'
with exe.open('w') as f:
f.write(header)
exe = str(exe)
header = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python',
executable=exe)
assert header == '#!/usr/bin/env %s\n' % exe
expect_out = 'stdout' if sys.version_info < (2,7) else 'stderr'
with contexts.quiet() as (stdout, stderr):
# When options are included, generate a broken shebang line
# with a warning emitted
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x',
executable=exe)
assert candidate == '#!%s -x\n' % exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
with contexts.quiet() as (stdout, stderr):
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
assert candidate == '#!%s -x\n' % self.non_ascii_exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
class TestCommandSpec:
def test_custom_launch_command(self):
"""
Show how a custom CommandSpec could be used to specify a #! executable
which takes parameters.
"""
cmd = ei.CommandSpec(['/usr/bin/env', 'python3'])
assert cmd.as_header() == '#!/usr/bin/env python3\n'
def test_from_param_for_CommandSpec_is_passthrough(self):
"""
from_param should return an instance of a CommandSpec
"""
cmd = ei.CommandSpec(['python'])
cmd_new = ei.CommandSpec.from_param(cmd)
assert cmd is cmd_new
def test_from_environment_with_spaces_in_executable(self):
with mock.patch('sys.executable', TestScriptHeader.exe_with_spaces):
cmd = ei.CommandSpec.from_environment()
assert len(cmd) == 1
assert cmd.as_header().startswith('#!"')
def test_from_simple_string_uses_shlex(self):
"""
In order to support `executable = /usr/bin/env my-python`, make sure
from_param invokes shlex on that input.
"""
cmd = ei.CommandSpec.from_param('/usr/bin/env my-python')
assert len(cmd) == 2
assert '"' not in cmd.as_header()
def test_sys_executable(self):
"""
CommandSpec.from_string(sys.executable) should contain just that param.
"""
writer = ei.ScriptWriter.best()
cmd = writer.command_spec_class.from_string(sys.executable)
assert len(cmd) == 1
assert cmd[0] == sys.executable
class TestWindowsScriptWriter:
def test_header(self):
hdr = ei.WindowsScriptWriter.get_script_header('')
assert hdr.startswith('#!')
assert hdr.endswith('\n')
hdr = hdr.lstrip('#!')
hdr = hdr.rstrip('\n')
# header should not start with an escaped quote
assert not hdr.startswith('\\"')
|
XiaosongWei/crosswalk-test-suite | refs/heads/master | webapi/tct-fullscreen-nonw3c-tests/inst.wgt.py | 372 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
sergey-lance/Udacity-FSWDN-MultiuserBlog | refs/heads/master | main.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Undacity FSWDN
# Project: MultiuserBlog.
# by Sergey Ryzhikov (sergey-inform@ya.ru)
# License: GPLv2
#
import sys
import os
import jinja2
import webapp2
from webapp2_extras.routes import RedirectRoute, PathPrefixRoute
from webapp2_extras import auth
from webapp2_extras import sessions
import hmac
import logging
try:
from urlparse import urlparse # python2.7
except ImportError:
from urllib.parse import urlparse # python3
CSRF_PARAM_NAME = 'token'
## Template engine
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
autoescape=True,
)
jinja_env.globals['uri_for'] = webapp2.uri_for # make uri_for() available in templates
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class RequestHandler(webapp2.RequestHandler):
prevent_embedding = True # by default prevent embedding the page in <iframe> on another sites
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user_info
params['flashes'] = self.session.get_flashes()
params['csrf_helpers'] = {
'token': self.csrf_token, # property
'gen_token': self.gen_csrf_token, # function
'token_for': self.get_csrf_token_for, # function
'uri_for': self.get_csrf_uri_for, # function
}
return render_str(template, **params)
def render(self, template, **kw):
if self.prevent_embedding:
self.response.headers.add('X-Frame-Options', 'DENY')
self.write(self.render_str(template, **kw))
@webapp2.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
user_info = self.user_info
if user_info:
return self.user_model.get_by_id(user_info['user_id'])
else:
return None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
# this is needed for webapp2 sessions to work
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
## CSRF handlers
def gen_csrf_token(self, uri=None):
""" Generate a token to prevent CSRF attacks.
Token is unique for user session and URI:
token = hash(session.token + rquest.path)
uri: to generate token for another URI
"""
if not self.user_info: # no user session...
return None # so csrf make no sense
if uri is None:
uri = self.request.path
else:
uri = urlparse(uri).path
secret = self.user_info['token'] + uri
token = hmac.new(
key=bytearray(secret, 'utf-8'),
# digestmod=hashlib.sha256
).hexdigest()
return token
def check_csrf_token(self, token):
if self.csrf_token == token \
or self.csrf_token is None: # nothing to match
return True
return False
@property
def csrf_token(self):
""" Get CSRF token as a request property.
"""
return self.gen_csrf_token(self.request.path)
def get_csrf_token_for(self, route_name, *a, **kva):
""" Generate token for specified route.
"""
uri = webapp2.uri_for(route_name, *a, **kva)
uri_path = urlparse(uri).path # the same as request.path
return self.gen_csrf_token(uri=uri_path)
def get_csrf_uri_for(self, route_name, *a, **kva):
""" A handy function to generate csrf-aware URI's like /bebe?param=1&token=ab12cd34...
"""
token = self.get_csrf_token_for(route_name, **kva)
kva[CSRF_PARAM_NAME] = token
return webapp2.uri_for(route_name, *a, **kva)
def csrf_check(handler):
""" Decorator for CSRF token check.
Look for parameter with name 'CSRF_PARAM_NAME'
in POST for posts and in GET for other request types.
Aborts request if token is not valid.
"""
def _check_csrf_token(self, *args, **kwargs):
req = self.request
try:
if req.method == 'POST':
token = self.request.POST[CSRF_PARAM_NAME]
else:
token = self.request.GET[CSRF_PARAM_NAME]
except KeyError:
self.abort(401, explanation='CSRF token required.')
if self.check_csrf_token(token):
return handler(self, *args, **kwargs)
else:
self.abort(401, explanation='CSRF token doesn\'t match.')
return _check_csrf_token
## Application configuration
appconfig = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name', 'avatar'] # will be cached in session (no access to storage)
},
'webapp2_extras.sessions': {
'secret_key': 'BEBEBEChangeItOnProductionServerBEBEBE',
'cookie_args': {'httponly': True}, # enforce session cookies not to be accessible by JS
}
}
## Routing
Route = webapp2.Route
app = webapp2.WSGIApplication([
RedirectRoute('/', redirect_to='/blog/', name='home'),
RedirectRoute('/blog/', 'handlers.BlogFrontpage', strict_slash=True, name='blog-frontpage'),
PathPrefixRoute('/blog', [
Route('/newpost', 'handlers.BlogNewpost', name='blog-newpost'),
PathPrefixRoute(r'/<post_id:\d+>', [
Route('', 'handlers.BlogOnePost', name='blog-onepost'),
Route('/edit', 'handlers.BlogEdit', name='blog-edit'),
Route('/delete', 'handlers.BlogDelete', name='blog-delete'),
Route('/like', 'handlers.BlogLike', name='blog-like'),
Route('/comment', 'handlers.PostComment', name='blog-comment'),
PathPrefixRoute(r'/comments/<comment_id:\d+>', [
Route('/edit', 'handlers.EditComment', name='comment-edit'),
Route('/delete', 'handlers.DeleteComment', name='comment-delete'),
]),
]),
]),
Route('/login', 'auth.LoginHandler', name="login"),
Route('/logout', 'auth.LogoutHandler', name="logout"),
Route('/signup', 'auth.SignupHandler', name="signup"),
Route('/welcome', 'handlers.WelcomeHandler', name="welcome"),
Route('/flashtest', 'handlers.FlashTest', name="flashtest"), # for debug
], debug=True, config=appconfig)
|
jabesq/home-assistant | refs/heads/dev | homeassistant/components/googlehome/sensor.py | 7 | """Support for Google Home alarm sensor."""
from datetime import timedelta
import logging
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
from . import CLIENT, DOMAIN as GOOGLEHOME_DOMAIN, NAME
SCAN_INTERVAL = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
ICON = 'mdi:alarm'
SENSOR_TYPES = {
'timer': 'Timer',
'alarm': 'Alarm',
}
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the googlehome sensor platform."""
if discovery_info is None:
_LOGGER.warning(
"To use this you need to configure the 'googlehome' component")
return
await hass.data[CLIENT].update_info(discovery_info['host'])
data = hass.data[GOOGLEHOME_DOMAIN][discovery_info['host']]
info = data.get('info', {})
devices = []
for condition in SENSOR_TYPES:
device = GoogleHomeAlarm(hass.data[CLIENT], condition,
discovery_info, info.get('name', NAME))
devices.append(device)
async_add_entities(devices, True)
class GoogleHomeAlarm(Entity):
"""Representation of a GoogleHomeAlarm."""
def __init__(self, client, condition, config, name):
"""Initialize the GoogleHomeAlarm sensor."""
self._host = config['host']
self._client = client
self._condition = condition
self._name = None
self._state = None
self._available = True
self._name = "{} {}".format(name, SENSOR_TYPES[self._condition])
async def async_update(self):
"""Update the data."""
await self._client.update_alarms(self._host)
data = self.hass.data[GOOGLEHOME_DOMAIN][self._host]
alarms = data.get('alarms')[self._condition]
if not alarms:
self._available = False
return
self._available = True
time_date = dt_util.utc_from_timestamp(min(element['fire_time']
for element in alarms)
/ 1000)
self._state = time_date.isoformat()
@property
def state(self):
"""Return the state."""
return self._state
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def available(self):
"""Return the availability state."""
return self._available
@property
def icon(self):
"""Return the icon."""
return ICON
|
langurmonkey/gaiasky | refs/heads/master | assets/scripts/tests/post-runnable-test.py | 2 | # This script tests posting and parking runnables that run on the main loop thread
# Created by Toni Sagrista
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters
"""
Prints to both gaia sky and python
"""
def lprint(string):
gs.print(string)
print(string)
class PrintRunnable(object):
def run(self):
lprint("Hello from Python!")
class Java:
implements = ["java.lang.Runnable"]
class FrameCounterRunnable(object):
def __init__(self):
self.n = 0
def run(self):
self.n = self.n + 1
if self.n % 30 == 0:
lprint("Number of frames: %d" % self.n)
class Java:
implements = ["java.lang.Runnable"]
gateway = ClientServer(java_parameters=JavaParameters(auto_convert=True),
python_parameters=PythonParameters())
gs = gateway.entry_point
# We post a simple runnable which prints "Hello from Python!" through the event interface once
gs.postRunnable(PrintRunnable())
# We park a runnable which counts the frames and prints the current number
# of frames every 30 of them
gs.parkRunnable("frame_counter", FrameCounterRunnable())
gs.sleep(15.0)
# We unpark the frame counter
gs.unparkRunnable("frame_counter")
lprint("Exiting script")
gateway.shutdown()
|
Batterfii/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_c/models.py | 415 | from django.db import models
class C1(models.Model):
pass
class C2(models.Model):
a1 = models.ForeignKey('lookuperror_a.A1', models.CASCADE)
class C3(models.Model):
pass
|
ryuunosukeyoshi/PartnerPoi-Bot | refs/heads/master | lib/youtube_dl/extractor/dhm.py | 64 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_duration
class DHMIE(InfoExtractor):
IE_DESC = 'Filmarchiv - Deutsches Historisches Museum'
_VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/',
'md5': '11c475f670209bf6acca0b2b7ef51827',
'info_dict': {
'id': 'the-marshallplan-at-work-in-west-germany',
'ext': 'flv',
'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE',
'description': 'md5:1fabd480c153f97b07add61c44407c82',
'duration': 660,
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/',
'md5': '09890226332476a3e3f6f2cb74734aa5',
'info_dict': {
'id': 'rolle-1',
'ext': 'flv',
'title': 'ROLLE 1',
'thumbnail': r're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'],
webpage, 'title').strip()
description = self._html_search_regex(
r'<p><strong>Description:</strong>(.+?)</p>',
webpage, 'description', default=None)
duration = parse_duration(self._search_regex(
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
entries[0].update({
'title': title,
'description': description,
'duration': duration,
})
return self.playlist_result(entries, playlist_id)
|
cgvarela/grpc | refs/heads/master | src/python/grpcio_test/grpc_test/_adapter/_future_invocation_asynchronous_event_service_test.py | 14 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""One of the tests of the Face layer of RPC Framework."""
import unittest
from grpc_test._adapter import _face_test_case
from grpc_test.framework.face.testing import future_invocation_asynchronous_event_service_test_case as test_case
class FutureInvocationAsynchronousEventServiceTest(
_face_test_case.FaceTestCase,
test_case.FutureInvocationAsynchronousEventServiceTestCase,
unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
|
rogersb11/SourceDrops | refs/heads/n7100-kk | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
diegocortassa/TACTIC | refs/heads/master | src/pyasm/prod/render/render_context.py | 1 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['BaseRenderContext', 'AssetRenderContext', 'ShotRenderContext']
import os
from pyasm.common import *
from pyasm.prod.biz import FrameRange
class BaseRenderContext(Base):
'''The context under which a render take place. This includes all
of the settings and specific flags for a particular render'''
def __init__(self, policy=None):
self.sobject = None
self.snapshot = None
self.snapshot_xml = None
self.snapshot_sobject = None
self.context = None
self.policy = policy
# by default, just render the first frame
self.frame_range = FrameRange(1,1,1)
# FIXME: this is maya specific
self.camera = "persp"
self.layer_names = []
self.override = ""
# FIXME: not general enough
self.shot = None
def set_policy(self, policy):
self.policy = policy
def set_override(self, override):
'''set overrides to render parameters'''
self.override = override
def get_override(self):
return self.override
# information from policy
def get_resolution(self):
return self.policy.get_resolution()
def get_layer_names(self):
return self.layer_names
def add_layer(self, layer_name):
self.layer_names.append(layer_name)
def get_input_path(self):
'''gets the input file to be rendered'''
snapshot = self.get_snapshot()
lib_dir = snapshot.get_lib_dir()
# FIXME: big assumption that snapshot type == file_type
# FIXME: maya files only ????
filename = snapshot.get_file_name_by_type("maya")
if not filename:
filename = snapshot.get_file_name_by_type("xsi")
if not filename:
filename = snapshot.get_file_name_by_type("main")
if not filename:
raise TacticException("Cannot render snapshot [%s] because file is not supported" % snapshot.get_code() )
input_path = "%s/%s" % (lib_dir,filename)
return input_path
def get_output_prefix(self):
# FIXME: should get this from naming conventions
return "image_test"
def get_output_ext(self):
# FIXME: should take this from render policy
return "png"
def get_output_padding(self):
return 4
def get_output_pattern(self):
# ie: "image.jpg.####"
return "%s.%s.####" % (self.get_output_prefix(), self.get_output_ext() )
def get_render_dir(self):
ticket = Environment.get_security().get_ticket_key()
tmpdir = Environment.get_tmp_dir()
render_dir = "%s/temp/%s" % (tmpdir, ticket)
System().makedirs(render_dir)
return render_dir
def set_shot(self, shot):
self.shot = shot
# setting the shot always sets the frames
self.frame_range = shot.get_frame_range()
def get_shot(self):
return self.shot
def set_sobject(self, sobject):
'''set the sobject that is being rendered'''
self.sobject = sobject
def get_sobject(self):
return self.sobject
def set_camera(self, camera):
print "Overriding camera: ", camera
self.camera = camera
def get_camera(self):
return self.camera
def set_frame_range(self, frame_range):
self.frame_range = frame_range
# if the policy sets a frame by, then use it
frame_by = self.policy.get_value("frame_by")
if frame_by:
self.frame_range.set_frame_by(int(frame_by))
def set_frame_range_values(self, start, end, by):
frame_range = FrameRange(start, end, by)
self.set_frame_range(frame_range)
def get_frame_range(self):
return self.frame_range
def set_snapshot(self, snapshot):
assert snapshot != None
self.snapshot = snapshot
self.snapshot_xml = snapshot.get_value("snapshot")
#self.sobject = self.snapshot.get_sobject()
self.snapshot_sobject = self.snapshot.get_sobject()
def set_snapshot_xml(self, snapshot_xml):
self.snapshot_xml = snapshot_xml
def get_snapshot(self):
return self.snapshot
def get_snapshot_xml(self):
return self.snapshot_xml
def set_context(self, context):
self.context = context
def get_context(self):
return self.context
def set_policy(self, policy):
self.policy = policy
def get_extra_settings(self):
# these extra settings are determined by the policy
return self.policy.get_value("extra_settings")
def get_name(self):
return self.__class__.__name__
def get_xml_data(self):
'''create an XML document which can be stored in the queue for
for informaiton about this render context.'''
xml = Xml()
xml.create_doc("data")
root = xml.get_root_node()
if self.snapshot:
element = xml.create_text_element("search_key", self.sobject.get_search_key())
root.appendChild(element)
element = xml.create_text_element("snapshot_code", self.snapshot.get_code())
root.appendChild(element)
elif self.sobject:
element = xml.create_text_element("search_key", self.sobject.get_search_key())
root.appendChild(element)
# add information about the frames
element = xml.create_text_element("prefix", self.get_output_prefix() )
root.appendChild(element)
element = xml.create_text_element("ext", self.get_output_ext() )
root.appendChild(element)
element = xml.create_text_element("padding", str(self.get_output_padding() ))
root.appendChild(element)
element = xml.create_text_element("file_range", self.frame_range.get_key() )
root.appendChild(element)
element = xml.create_text_element("pattern", self.get_output_pattern() )
root.appendChild(element)
# add layer information
for layer_name in self.layer_names:
element = xml.create_text_element("layer_name", layer_name )
root.appendChild(element)
return xml.to_string()
class AssetRenderContext(BaseRenderContext):
'''Convenience class to render assets thumbnails'''
def __init__(self, sobject):
super(AssetRenderContext,self).__init__()
self.set_sobject(sobject)
self.set_context("publish")
# check if there is an associate render_stage sobject.
search = Search("prod/render_stage")
search.add_sobject_filter(sobject)
search.add_filter("context", self.context)
render_stage = search.get_sobject()
if render_stage != None:
snapshot = Snapshot.get_latest_by_sobject(render_stage, self.context)
else:
loader_context = ProdLoaderContext()
snapshot = loader_context.get_snapshot_by_sobject( \
sobject, self.context)
self.set_snapshot(snapshot)
if snapshot == None:
raise RenderException("snapshot for [%s] [%s] does not exist" % \
(sobject.get_search_type(), sobject.get_id() ))
# TODO: should look for cameras and render all of them
snapshot_xml = snapshot.get_snapshot_xml()
instances = snapshot_xml.get_values("snapshot/ref/@instance")
for instance in instances:
if instance.startswith("camera"):
# HACK
#self.camera = instance
self.camera = "%s:%s" % (instance, "camera100")
camera = self.camera
# set up the asset with a camera
if camera == "persp":
self.set_snapshot_xml('''
<snapshot>
<ref snapshot_code='%s'/>
<mel>
select -clear
xform -ro -25 -45 0 %s
viewFit %s
setAttr %s.preScale 1.5
</mel>
</snapshot>
''' % (snapshot.get_code(), camera, camera, camera)
)
else:
self.set_snapshot_xml('''
<snapshot>
<ref snapshot_code='%s'/>
</snapshot>
''' % (snapshot.get_code())
)
# extra commands to add a light set
#<ref search_type='prod/asset?prod=prod2' search_id='36' version='-1' context='publish'/>
#viewFit -f 10 %s
class ShotRenderContext(BaseRenderContext):
pass
|
crawfordsm/zSALT | refs/heads/master | zsalt/redshift.py | 1 | import sys
import pyfits
import numpy as np
from PySpectrograph import Spectrum
from PySpectrograph.Utilities.fit import interfit
import pylab as pl
def ncor(x, y):
"""Calculate the normalized correlation of two arrays"""
d=np.correlate(x,x)*np.correlate(y,y)
if d<=0: return 0
return np.correlate(x,y)/d**0.5
def xcor_redshift(spectra, template, sub=False, z1=0, z2=1, zstep=0.001):
"""Meaure the redshift of a spectra by cross correlating it
with a template
returns an array of correlation values
"""
zvalue=np.arange(z1,z2,zstep)
cc_arr=np.zeros(len(zvalue))
sflux=continuum_subtract(spectra)
tflux=continuum_subtract(template)
for i,z in enumerate(zvalue):
nflux=np.interp(spectra.wavelength, template.wavelength*(1+z), tflux)
cc_arr[i]=ncor(sflux, nflux)
return zvalue, cc_arr
def continuum_subtract(spec, function='polynomial', order=7):
"""Fit a function to a spectra and subtract the continuum"""
wc=interfit(spec.wavelength, spec.flux, function=function, order=order)
wc.interfit()
return spec.flux-wc(spec.wavelength)
def loadtext(infile):
warr, farr=np.loadtxt(infile, usecols=(0,1), unpack=True)
spec=Spectrum.Spectrum(warr, farr, stype='continuum')
return spec
def loadiraf(hdu):
farr=hdu[0].data
xarr=np.arange(len(farr))
warr=hdu[0].header['CRVAL1']+hdu[0].header['CDELT1']*(xarr+hdu[0].header['CRPIX1'])
mask=(farr>10)
spec=Spectrum.Spectrum(warr[mask], farr[mask], stype='continuum')
return spec
def loadsdss(hdu):
farr=hdu[0].data[0]
xarr=np.arange(len(farr))
warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))
spec=Spectrum.Spectrum(warr, farr, stype='continuum')
return spec
if __name__=='__main__':
if sys.argv[1].count('fits'):
hdu=pyfits.open(sys.argv[1])
spec=loadiraf(hdu)
else:
spec=loadtext(sys.argv[1])
thdu=pyfits.open(sys.argv[2])
template=loadsdss(thdu)
z_arr, cc_arr=xcor_redshift(spec, template, z1=0.0001, z2=1.20, zstep=0.0001)
z=z_arr[cc_arr.argmax()]
#z_arr, cc_arr=xcor_redshift(spec, template, z1=z-0.05, z2=z+0.05, zstep=0.0001)
#z=z_arr[cc_arr.argmax()]
print z
pl.figure()
pl.plot(z_arr, cc_arr)
pl.figure()
cflux=np.convolve(spec.flux, np.ones(10), mode='same')
pl.plot(spec.wavelength, cflux)
nflux=np.interp(spec.wavelength, (1+z)*template.wavelength, template.flux)
#pl.plot((1+z)*template.wavelength, template.flux*spec.flux.mean()/template.flux.mean())
pl.plot(spec.wavelength, nflux*cflux.mean()/nflux.mean())
pl.show()
|
westinedu/similarinterest | refs/heads/master | djangoappengine/mapreduce/__init__.py | 12133432 | |
elventear/ansible-modules-core | refs/heads/devel | source_control/__init__.py | 12133432 | |
siutanwong/scikit-learn | refs/heads/master | sklearn/neighbors/tests/__init__.py | 12133432 | |
Alexander-M-Waldman/local_currency_site | refs/heads/master | lib/python2.7/site-packages/django/db/backends/base/__init__.py | 12133432 | |
jaredly/pyjamas | refs/heads/master | pgen/lib2to3/compiler/parser.py | 6 | from lib2to3.pgen2.driver import load_grammar
from lib2to3.pgen2.driver import Driver
import os
gpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "Grammar.txt")
g = load_grammar(gpath)
def suite(text):
d = Driver(g )
return d.parse_string(text)
# dummy
def st2tuple(tree, line_info=1):
return tree
|
eldarion/formly | refs/heads/master | formly/tests/factories.py | 1 | import factory
from formly.models import (
Field,
FieldChoice,
FieldResult,
OrdinalChoice,
OrdinalScale,
Page,
Survey,
SurveyResult,
)
class OrdinalChoiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = OrdinalChoice
label = factory.Sequence(lambda n: "label-{}".format(n))
class OrdinalScaleFactory(factory.django.DjangoModelFactory):
class Meta:
model = OrdinalScale
name = factory.Sequence(lambda n: "scale-{}".format(n))
kind = OrdinalScale.ORDINAL_KIND_RATING
class SurveyFactory(factory.django.DjangoModelFactory):
class Meta:
model = Survey
class Params:
user = None
name = factory.Sequence(lambda n: "survey-{}".format(n))
creator = factory.LazyAttribute(lambda o: o.user)
class SurveyResultFactory(factory.django.DjangoModelFactory):
class Meta:
model = SurveyResult
class PageFactory(factory.django.DjangoModelFactory):
class Meta:
model = Page
class FieldFactory(factory.django.DjangoModelFactory):
class Meta:
model = Field
label = factory.Sequence(lambda n: "field-label-{}".format(n))
field_type = Field.TEXT_FIELD
ordinal = factory.Sequence(lambda n: n)
class FieldChoiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = FieldChoice
label = factory.Sequence(lambda n: "choice-label-{}".format(n))
class FieldResultFactory(factory.django.DjangoModelFactory):
class Meta:
model = FieldResult
|
40223231/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_collections.py | 603 | # "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
|
VitalPet/sale-workflow | refs/heads/8.0 | sale_partner_order_policy/__openerp__.py | 22 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Sale Partner Order Policy module for Odoo
# Copyright (C) 2014 Akretion (http://www.akretion.com).
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Partner Order Policy',
'version': '1.0',
'category': 'Sales Management',
'license': 'AGPL-3',
'summary': "Adds customer create invoice method on partner form",
'description': """
This module adds a new field on the partner form in the *Accouting* tab:
*Customer Create Invoice*. The value of this field will be used when you
create a new Sale Order with this partner as customer.
Beware that this module depends not only on *sale*, but also on *stock*.
As there is only one create invoice method when the *stock* module is not
installed, you should not install this module if the *stock* module is not
installed.
This module has been written by Alexis de Lattre
<alexis.delattre@akretion.com>
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['sale_stock'],
'data': ['partner_view.xml'],
'demo': ['partner_demo.xml'],
'installable': True,
}
|
scrollback/kuma | refs/heads/master | vendor/packages/pylint/test/input/func_e99xx.py | 6 | """test string format error
"""
__revision__ = 1
PARG_1 = PARG_2 = PARG_3 = 1
def pprint():
"""Test string format
"""
print "%s %s" % {'PARG_1': 1, 'PARG_2': 2} # E9906
print "%s" % (PARG_1, PARG_2) # E9905
print "%(PARG_1)d %d" % {'PARG_1': 1, 'PARG_2': 2} # E9902
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1} # E9904
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1, 'PARG_2':2, 'PARG_3':3} # W9901
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1, 2:3} # W9900 E9904
print "%(PARG_1)d %(PARG_2)d" % (2, 3) # 9903
print "%(PARG_1)d %(PARG_2)d" % [2, 3] # 9903
print "%2z" % PARG_1
print "strange format %2" % PARG_2
|
anudr01d/anudr01d.github.io | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/__init__.py | 363 | # -*- coding: utf-8 -*-
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os.path
import fnmatch
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound
ns = globals()
for fcls in FORMATTERS:
ns[fcls.__name__] = fcls
del fcls
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters'] + [cls.__name__ for cls in FORMATTERS]
_formatter_alias_cache = {}
_formatter_filename_cache = []
def _init_formatter_cache():
if _formatter_alias_cache:
return
for cls in get_all_formatters():
for alias in cls.aliases:
_formatter_alias_cache[alias] = cls
for fn in cls.filenames:
_formatter_filename_cache.append((fn, cls))
def find_formatter_class(name):
_init_formatter_cache()
cls = _formatter_alias_cache.get(name, None)
return cls
def get_formatter_by_name(name, **options):
_init_formatter_cache()
cls = _formatter_alias_cache.get(name, None)
if not cls:
raise ClassNotFound("No formatter found for name %r" % name)
return cls(**options)
def get_formatter_for_filename(fn, **options):
_init_formatter_cache()
fn = os.path.basename(fn)
for pattern, cls in _formatter_filename_cache:
if fnmatch.fnmatch(fn, pattern):
return cls(**options)
raise ClassNotFound("No formatter found for file name %r" % fn)
def get_all_formatters():
"""Return a generator for all formatters."""
for formatter in FORMATTERS:
yield formatter
for _, formatter in find_plugin_formatters():
yield formatter
|
arameshkumar/base-nuxeo-drive | refs/heads/master | nuxeo-drive-client/nxdrive/tests/test_blacklist_queue.py | 2 | '''
Created on 2 juil. 2015
@author: Remi Cattiau
'''
import unittest
from nxdrive.engine.blacklist_queue import BlacklistQueue
from time import sleep
class BlacklistQueueTest(unittest.TestCase):
def testDelay(self):
sleep_time = 3
# Push two items with a delay of 1s
queue = BlacklistQueue(delay=1)
queue.push(1, "Item1")
queue.push(2, "Item2")
# Verify no item is returned back before 1s
item = queue.get()
self.assertIsNone(item)
sleep(sleep_time)
# Verfiy we get the two items now
item = queue.get()
self.assertIsNotNone(item)
self.assertEqual(item.get(), "Item1")
self.assertEqual(item.get_id(), 1)
item = queue.get()
self.assertIsNotNone(item)
self.assertEqual(item.get(), "Item2")
self.assertEqual(item.get_id(), 2)
self.assertEqual(item._count, 1)
# Repush item without increasing delay
queue.repush(item, increase_wait=False)
item = queue.get()
self.assertIsNone(item)
sleep(sleep_time)
# We should get the repushed item after 1s wait
item = queue.get()
self.assertIsNotNone(item)
self.assertEqual(item.get(), "Item2")
self.assertEqual(item.get_id(), 2)
self.assertEqual(item._count, 2)
# Repush item with increase
queue.repush(item, increase_wait=True)
sleep(sleep_time)
item = queue.get()
self.assertIsNone(item)
sleep(sleep_time)
item = queue.get()
self.assertIsNotNone(item)
self.assertEqual(item.get(), "Item2")
self.assertEqual(item.get_id(), 2)
self.assertEqual(item._count, 3)
item = queue.get()
self.assertIsNone(item)
|
abought/osf.io | refs/heads/develop | api_tests/nodes/serializers/test_serializers.py | 4 | # -*- coding: utf-8 -*-
from urlparse import urlparse
from nose.tools import * # flake8: noqa
from dateutil.parser import parse as parse_date
from tests.base import DbTestCase, assert_datetime_equal
from tests.utils import make_drf_request
from tests.factories import UserFactory, NodeFactory, RegistrationFactory, ProjectFactory
from framework.auth import Auth
from api.nodes.serializers import NodeSerializer
from api.registrations.serializers import RegistrationSerializer
from api.base.settings.defaults import API_BASE
class TestNodeSerializer(DbTestCase):
def setUp(self):
super(TestNodeSerializer, self).setUp()
self.user = UserFactory()
def test_node_serialization(self):
parent = ProjectFactory(creator=self.user)
node = NodeFactory(creator=self.user, parent=parent)
req = make_drf_request()
result = NodeSerializer(node, context={'request': req}).data
data = result['data']
assert_equal(data['id'], node._id)
assert_equal(data['type'], 'nodes')
# Attributes
attributes = data['attributes']
assert_equal(attributes['title'], node.title)
assert_equal(attributes['description'], node.description)
assert_equal(attributes['public'], node.is_public)
assert_equal(attributes['tags'], [str(each) for each in node.tags])
assert_equal(attributes['category'], node.category)
assert_equal(attributes['registration'], node.is_registration)
assert_equal(attributes['fork'], node.is_fork)
assert_equal(attributes['collection'], node.is_collection)
# Relationships
relationships = data['relationships']
assert_in('children', relationships)
assert_in('contributors', relationships)
assert_in('files', relationships)
assert_in('parent', relationships)
assert_in('affiliated_institutions', relationships)
parent_link = relationships['parent']['links']['related']['href']
assert_equal(
urlparse(parent_link).path,
'/{}nodes/{}/'.format(API_BASE, parent._id)
)
assert_in('registrations', relationships)
# Not a fork, so forked_from is removed entirely
assert_not_in('forked_from', relationships)
def test_fork_serialization(self):
node = NodeFactory(creator=self.user)
fork = node.fork_node(auth=Auth(user=node.creator))
result = NodeSerializer(fork, context={'request': make_drf_request()}).data
data = result['data']
# Relationships
relationships = data['relationships']
forked_from = relationships['forked_from']['links']['related']['href']
assert_equal(
urlparse(forked_from).path,
'/{}nodes/{}/'.format(API_BASE, node._id)
)
class TestNodeRegistrationSerializer(DbTestCase):
def test_serialization(self):
user = UserFactory()
req = make_drf_request()
reg = RegistrationFactory(creator=user)
result = RegistrationSerializer(reg, context={'request': req}).data
data = result['data']
assert_equal(data['id'], reg._id)
assert_equal(data['type'], 'registrations')
should_not_relate_to_registrations = [
'registered_from',
'registered_by',
'registration_schema'
]
# Attributes
attributes = data['attributes']
assert_datetime_equal(
parse_date(attributes['date_registered']),
reg.registered_date
)
assert_equal(attributes['withdrawn'], reg.is_retracted)
# Relationships
relationships = data['relationships']
relationship_urls = {}
for relationship in relationships:
relationship_urls[relationship]=relationships[relationship]['links']['related']['href']
assert_in('registered_by', relationships)
registered_by = relationships['registered_by']['links']['related']['href']
assert_equal(
urlparse(registered_by).path,
'/{}users/{}/'.format(API_BASE, user._id)
)
assert_in('registered_from', relationships)
registered_from = relationships['registered_from']['links']['related']['href']
assert_equal(
urlparse(registered_from).path,
'/{}nodes/{}/'.format(API_BASE, reg.registered_from._id)
)
for relationship in relationship_urls:
if relationship in should_not_relate_to_registrations:
assert_not_in('/{}registrations/'.format(API_BASE), relationship_urls[relationship])
else:
assert_in('/{}registrations/'.format(API_BASE), relationship_urls[relationship],
'For key {}'.format(relationship))
|
menzenski/tagger-tester | refs/heads/master | razmetka/tag/tag.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import subprocess32
from nltk.tag.stanford import StanfordPOSTagger
from .config import DATA_DIR_NAME, PATH_TO_DATA_DIR
from .files import write_to_directory
class FilePair(object):
"""Pair of files: one for training and one for testing."""
def __init__(self, idx, testfile, trainfile, separator='_', props=''):
"""Initialize the TrainingSuite object.
Parameters
----------
idx (int) : index number to keep files straight
testfile (str) : filename containing the file to tag/test
trainfile (str) : filename containing the training file
separator (basestring) : the character used to separate words
from their POS tags in the training file and the output.
Default is underscore '_'; slash '/' is also common.
props (str) : prefix for naming/saving the properties file
"""
# one-digit numbers should be prefaced with a leading zero
self.idx = str(idx).rjust(2, '0')
self.testfile = testfile
self.trainfile = trainfile
if props == '':
self.props_name = 'props_{}.props'.format(self.idx)
else:
self.props_name = '{}{}.props'.format(props, self.idx)
# self.all_files = file_dict
self.sep = separator
self.prop_template = (
"model = {p_model}\n"
"trainFile = {p_train_file}\n"
"tagSeparator = {p_tag_separator}\n"
"encoding = {p_encoding}\n"
"verbose = {p_verbose}\n"
"verboseResults = {p_verbose_results}\n"
"tokenize = {p_tokenize}\n"
"arch = {p_arch}\n"
"learnClosedClassTags = {p_learn_closed_class_tags}\n"
"closedClassTagThreshold = {p_closed_class_tag_threshold}\n"
)
def write_props(self, props_name=None, model=None, train_file=None,
tag_separator=None, encoding="UTF-8", verbose="true",
verbose_results="true", tokenize="false", arch="generic",
learn_closed_class_tags='', closed_class_tag_threshold=5):
"""Write a props file to disk."""
if props_name == None:
props_name = self.props_name
if model == None:
model_name = 'model_{}.model'.format(self.idx)
model = os.path.join(PATH_TO_DATA_DIR, model_name)
if train_file == None:
train_file = os.path.join(PATH_TO_DATA_DIR, self.trainfile)
if tag_separator == None:
tag_separator = self.sep
output_string = self.prop_template.format(
p_model=model, p_train_file=train_file,
p_tag_separator=tag_separator, p_encoding=encoding,
p_verbose=verbose, p_verbose_results=verbose_results,
p_tokenize=tokenize, p_arch=arch,
p_learn_closed_class_tags=learn_closed_class_tags,
p_closed_class_tag_threshold=closed_class_tag_threshold
)
write_to_directory(dir_name=DATA_DIR_NAME, file_name=props_name,
a_string=output_string)
|
fiduswriter/fiduswriter | refs/heads/master | fiduswriter/fixturemedia/management/__init__.py | 12133432 | |
gnuhub/intellij-community | refs/heads/master | python/testData/refactoring/move/relativeImportsToModulesInSameMovedPackageNotUpdated/before/src/pkg/subpkg/subsubpkg/__init__.py | 12133432 | |
gfreed/android_external_chromium-org | refs/heads/android-4.4 | tools/find_runtime_symbols/reduce_debugline.py | 161 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reduces result of 'readelf -wL' to just a list of starting addresses.
It lists up all addresses where the corresponding source files change. The
list is sorted in ascending order. See tests/reduce_debugline_test.py for
examples.
This script assumes that the result of 'readelf -wL' ends with an empty line.
Note: the option '-wL' has the same meaning with '--debug-dump=decodedline'.
"""
import re
import sys
_FILENAME_PATTERN = re.compile('(CU: |)(.+)\:')
def reduce_decoded_debugline(input_file):
filename = ''
starting_dict = {}
started = False
for line in input_file:
line = line.strip()
unpacked = line.split(None, 2)
if len(unpacked) == 3 and unpacked[2].startswith('0x'):
if not started and filename:
started = True
starting_dict[int(unpacked[2], 16)] = filename
else:
started = False
if line.endswith(':'):
matched = _FILENAME_PATTERN.match(line)
if matched:
filename = matched.group(2)
starting_list = []
prev_filename = ''
for address in sorted(starting_dict):
curr_filename = starting_dict[address]
if prev_filename != curr_filename:
starting_list.append((address, starting_dict[address]))
prev_filename = curr_filename
return starting_list
def main():
if len(sys.argv) != 1:
print >> sys.stderr, 'Unsupported arguments'
return 1
starting_list = reduce_decoded_debugline(sys.stdin)
bits64 = starting_list[-1][0] > 0xffffffff
for address, filename in starting_list:
if bits64:
print '%016x %s' % (address, filename)
else:
print '%08x %s' % (address, filename)
if __name__ == '__main__':
sys.exit(main())
|
Ingenico-ePayments/connect-sdk-python3 | refs/heads/master | tests/unit/test_client.py | 1 | import base64
import unittest
from datetime import timedelta
from unittest.mock import Mock, MagicMock
from ingenico.connect.sdk.connection import Connection
from ingenico.connect.sdk.defaultimpl.default_marshaller import DefaultMarshaller
from ingenico.connect.sdk.factory import Factory
from ingenico.connect.sdk.pooled_connection import PooledConnection
from ingenico.connect.sdk.request_header import RequestHeader
from tests.unit.test_factory import PROPERTIES_URI, API_KEY_ID, SECRET_API_KEY
class ClientTest(unittest.TestCase):
"""Tests for the Client class that test if
the function Client.with_client_meta_info correctly returns a client that is only modified if necessary.
Also contains tests testing if connection settings are propagated properly to the connection object
"""
def test_with_client_meta_info(self):
"""Tests if the function withClientMetaInfo alters a client when it needs to and does nothing if not required"""
client1 = Factory.create_client_from_file(PROPERTIES_URI, API_KEY_ID, SECRET_API_KEY)
# client2 = client1.with_client_meta_info(None)
client2 = client1.with_client_meta_info(None)
client_meta_info = DefaultMarshaller.INSTANCE().marshal({"test": "test"})
client3 = client1.with_client_meta_info(client_meta_info)
client4 = client3.with_client_meta_info(client_meta_info)
client5 = client3.with_client_meta_info(None)
self.assertIsNone(client1._client_headers)
self.assertIs(client1, client2)
self.assertIsNot(client1, client3)
self.assertClientHeaders(client3, client_meta_info)
self.assertIs(client3, client4)
self.assertIsNot(client3, client5)
self.assertIsNone(client5._client_headers)
def assertClientHeaders(self, client, client_meta_info):
"""Checks that the 'ClientMetaInfo' header with client_meta_info is stored properly in the client"""
headers = client._client_headers
header_value = base64.b64encode(client_meta_info.encode("utf-8"))
expected = RequestHeader("X-GCS-ClientMetaInfo", header_value)
found = False
for header in headers:
if str(expected) == str(header):
found = True
self.assertTrue(found, "header {0} was not found in {1}".format(expected, headers))
def test_close_idle_connection_not_pooled(self):
"""Tests that the setting to close an idle connection in a client propagates to the connection
for an unpooled connection
"""
mock = MagicMock(spec=Connection(), autospec=True)
function_mock = Mock(name="close_idle_connections_mock")
mock.attach_mock(function_mock, "close_idle_connections")
session = Factory.create_session_from_file(
configuration_file_name=PROPERTIES_URI, connection=mock,
api_key_id=API_KEY_ID, secret_api_key=SECRET_API_KEY)
client = Factory.create_client_from_session(session)
client.close_idle_connections(timedelta(seconds=5)) # seconds
function_mock.assert_not_called()
def test_close_idle_connection_pooled(self):
"""Tests that the setting to close an idle connection in a client propagates to the connection
for a pooled connection
"""
pooled_mock = MagicMock(spec=PooledConnection(), autospec=True)
function_mock = Mock(name="close_idle_connections_mock")
pooled_mock.attach_mock(function_mock, "close_idle_connections")
session = Factory.create_session_from_file(
configuration_file_name=PROPERTIES_URI, connection=pooled_mock,
api_key_id=API_KEY_ID, secret_api_key=SECRET_API_KEY)
client = Factory.create_client_from_session(session)
client.close_idle_connections(timedelta(seconds=5)) # seconds
function_mock.assert_called_once_with(timedelta(seconds=5))
def test_close_expired_connections_not_pooled(self):
"""Tests that the setting to close an expired connection in a client does not propagate to the connection
for an unpooled connection
"""
mock = MagicMock(spec=Connection(), autospec=True)
function_mock = Mock(name="close_expired_connections_mock")
mock.attach_mock(function_mock, "close_expired_connections")
session = Factory.create_session_from_file(
configuration_file_name=PROPERTIES_URI,
api_key_id=API_KEY_ID, secret_api_key=SECRET_API_KEY, connection=mock)
client = Factory.create_client_from_session(session)
client.close_expired_connections()
function_mock.assert_not_called()
def test_close_expired_connections_pooled(self):
"""Tests that the setting to close an expired connection in a client propagates to the connection
for a pooled connection
"""
pooled_mock = MagicMock(spec=PooledConnection(), autospec=True)
function_mock = Mock(name="close_expired_connections_mock")
pooled_mock.attach_mock(function_mock, "close_expired_connections")
session = Factory.create_session_from_file(
configuration_file_name=PROPERTIES_URI, connection=pooled_mock,
api_key_id=API_KEY_ID, secret_api_key=SECRET_API_KEY)
client = Factory.create_client_from_session(session)
client.close_expired_connections()
function_mock.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
angelbot/geoincentives | refs/heads/master | coffin/views/generic/list_detail.py | 5 | from coffin.template import loader
from django.views.generic import list_detail as _list_detail
import functools
object_list = functools.partial(_list_detail.object_list, template_loader=loader)
object_detail = functools.partial(_list_detail.object_detail, template_loader=loader)
|
fireduck64/electrum | refs/heads/master | gui/qt/contact_list.py | 3 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import webbrowser
from electrum.i18n import _
from electrum.bitcoin import is_address
from electrum.util import block_explorer_URL, format_satoshis, format_time, age
from electrum.plugins import run_hook
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from util import MyTreeWidget, pr_tooltips, pr_icons
class ContactList(MyTreeWidget):
filter_columns = [0, 1] # Key, Value
def __init__(self, parent):
MyTreeWidget.__init__(self, parent, self.create_menu, [_('Name'), _('Address')], 0, [0])
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
def on_permit_edit(self, item, column):
# openalias items shouldn't be editable
return item.text(1) != "openalias"
def on_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.parent.contacts.pop(prior)
self.parent.set_contact(unicode(item.text(0)), unicode(item.text(1)))
def import_contacts(self):
wallet_folder = self.parent.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self.parent, "Select your wallet file", wallet_folder))
if not filename:
return
self.parent.contacts.import_file(filename)
self.on_update()
def create_menu(self, position):
menu = QMenu()
selected = self.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog())
menu.addAction(_("Import file"), lambda: self.parent.import_contacts())
else:
names = [unicode(item.text(0)) for item in selected]
keys = [unicode(item.text(1)) for item in selected]
column = self.currentColumn()
column_title = self.headerItem().text(column)
column_data = '\n'.join([unicode(item.text(column)) for item in selected])
menu.addAction(_("Copy %s")%column_title, lambda: self.parent.app.clipboard().setText(column_data))
if column in self.editable_columns:
menu.addAction(_("Edit %s")%column_title, lambda: self.editItem(item, column))
menu.addAction(_("Pay to"), lambda: self.parent.payto_contacts(keys))
menu.addAction(_("Delete"), lambda: self.parent.delete_contacts(keys))
URLs = [block_explorer_URL(self.config, 'addr', key) for key in filter(is_address, keys)]
if URLs:
menu.addAction(_("View on block explorer"), lambda: map(webbrowser.open, URLs))
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.viewport().mapToGlobal(position))
def on_update(self):
item = self.currentItem()
current_key = item.data(0, Qt.UserRole).toString() if item else None
self.clear()
for key in sorted(self.parent.contacts.keys()):
_type, name = self.parent.contacts[key]
item = QTreeWidgetItem([name, key])
item.setData(0, Qt.UserRole, key)
self.addTopLevelItem(item)
if key == current_key:
self.setCurrentItem(item)
run_hook('update_contacts_tab', self)
|
prachidamle/cattle | refs/heads/master | tests/integration/cattletest/core/test_machines.py | 3 | from common_fixtures import * # NOQA
from cattle import ApiError
from test_physical_host import disable_go_machine_service # NOQA
@pytest.fixture(scope='module')
def update_ping_settings(request, super_client):
# These settings need changed because they control how the logic of the
# ping handlers behave in cattle. We need to update them so that we can
# ensure the ping logic will fully run.
settings = super_client.list_setting()
originals = []
def update_setting(new_value, s):
originals.append((setting, {'value': s.value}))
s = super_client.update(s, {'value': new_value})
wait_setting_active(super_client, s)
for setting in settings:
if setting.name == 'agent.ping.resources.every' and setting.value != 1:
update_setting('1', setting)
if setting.name == 'agent.resource.monitor.cache.resource.seconds' \
and setting.value != 0:
update_setting('0', setting)
def revert_settings():
for s in originals:
super_client.update(s[0], s[1])
request.addfinalizer(revert_settings)
@pytest.fixture(scope='module')
def machine_context(admin_user_client):
return create_context(admin_user_client, create_project=True,
add_host=True)
@pytest.fixture(scope='module')
def admin_client(machine_context):
return machine_context.client
@pytest.fixture(scope='module')
def admin_account(machine_context):
return machine_context.project
def test_machine_lifecycle(super_client, admin_client, admin_account,
update_ping_settings):
name = random_str()
machine = admin_client.create_machine(name=name,
virtualboxConfig={})
machine = admin_client.wait_success(machine)
assert machine.state == 'active'
assert machine.virtualboxConfig is not None
external_id = super_client.reload(machine).externalId
assert external_id is not None
# Create an agent with the externalId specified. The agent simulator will
# mimic how the go-machine-service would use this external_id to bootstrap
# an agent onto the physical host with the proper PHYSICAL_HOST_UUID set.
scope = 'io.cattle.platform.agent.connection.simulator' \
'.AgentConnectionSimulator'
uri = 'sim://{}'.format(random_str())
data = {scope: {}}
data[scope]['addPhysicalHost'] = True
data[scope]['externalId'] = external_id
account_id = get_plain_id(super_client, admin_account)
data[scope]['agentResourcesAccountId'] = account_id
data['agentResourcesAccountId'] = account_id
agent = super_client.create_agent(uri=uri, data=data)
agent = super_client.wait_success(agent)
wait_for(lambda: len(agent.hosts()) == 1)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
assert host.physicalHostId == machine.id
assert machine.accountId == host.accountId
# Need to force a ping because they cause physical hosts to be created
# under non-machine use cases. Ensures the machine isnt overridden
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
time.sleep(.1) # The ping needs time to execute
agent = super_client.reload(agent)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
physical_hosts = host.physicalHost()
assert physical_hosts.id == machine.id
machine = admin_client.wait_success(machine.remove())
assert machine.state == 'removed'
agent = super_client.wait_success(super_client.reload(machine).agent())
assert agent.state == 'removed'
host = admin_client.wait_success(admin_client.reload(host))
assert host.state == 'removed'
def test_machine_driver_config(admin_client):
name = "test-%s" % random_str()
vbox_config = {
"memory": "2048",
"diskSize": "40000",
"boot2dockerUrl": "http://localhost/random",
}
ca = "ca-1"
key = "key-1"
host = admin_client.create_machine(name=name,
virtualboxConfig=vbox_config,
authCertificateAuthority=ca,
authKey=key)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert vbox_config == host.virtualboxConfig
assert ca == host.authCertificateAuthority
assert key == host.authKey
assert host.driver == 'virtualbox'
name = "test-%s" % random_str()
digoc_config = {
"image": "img1",
"region": "reg1",
"size": "40000",
"accessToken": "ac-1",
"ipv6": True,
"privateNetworking": True,
"backups": True
}
host = admin_client.create_machine(name=name,
digitaloceanConfig=digoc_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert digoc_config == host.digitaloceanConfig
assert host.driver == 'digitalocean'
name = "test-%s" % random_str()
ec2_config = {
"accessKey": "accesskey1",
"ami": "ami1",
"iamInstanceProfile": "profile1",
"instanceType": "type1",
"monitoring": False,
"privateAddressOnly": False,
"region": "us-east-1",
"requestSpotInstance": False,
"rootSize": "60GB",
"secretKey": "secretkey1",
"securityGroup": "docker-machine",
"sessionToken": "sessiontoken1",
"spotPrice": "spotPrice1",
"sshUser": "sshUser1",
"subnetId": "5678",
"usePrivateAddress": False,
"vpcId": "1234",
"zone": "us-east-1a",
}
host = admin_client.create_machine(name=name,
amazonec2Config=ec2_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert ec2_config == host.amazonec2Config
assert host.driver == 'amazonec2'
name = "test-%s" % random_str()
packet_config = {
"apiKey": "apikey1",
"billingCycle": "hourly",
"facilityCode": "ewr1",
"os": "centos_7",
"plan": "baremetal_1",
"projectId": "projectId",
}
host = admin_client.create_machine(name=name,
packetConfig=packet_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert packet_config == host.packetConfig
assert host.driver == 'packet'
name = "test-%s" % random_str()
azure_config = {
"dockerPort": "dockerPort",
"dockerSwarmMasterPort": "dockerSwarmMasterPort1",
"image": "image",
"location": "location",
"password": "password",
"publishSettingsFile": "publishSettingsFile",
"size": "size",
"sshPort": "sshPort1",
"subscriptionCert": "subscriptionCert",
"subscriptionId": "subscriptionId",
"username": "username",
}
host = admin_client.create_machine(name=name,
azureConfig=azure_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert azure_config == host.azureConfig
assert host.driver == 'azure'
name = "test-%s" % random_str()
rackspace_config = {
"apiKey": "apiKey",
"dockerInstall": "dockerInstall",
"endpointType": "endpointType",
"flavorId": "flavorId",
"imageId": "imageId",
"region": "region",
"sshPort": "sshPort",
"sshUser": "sshUser",
"username": "username",
}
host = admin_client.create_machine(name=name,
rackspaceConfig=rackspace_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert rackspace_config == host.rackspaceConfig
assert host.driver == 'rackspace'
def test_machine_validation(admin_client):
name = "test-%s" % random_str()
# Can't set two drivers
try:
admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig={"accessToken": "a"})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should not have been able to set two drivers."
# Must set at least one driver
try:
admin_client.create_machine(name=name)
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should have been required to set a driver."
# Property present, but None/nil/null is acceptable
host = admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig=None)
assert host is not None
def test_digitalocean_config_validation(admin_client):
name = "test-%s" % random_str()
# accessToken is required
try:
admin_client.create_machine(name=name,
digitaloceanConfig={})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'MissingRequired'
else:
assert False, 'Should have got MissingRequired for accessToken'
|
chromium2014/src | refs/heads/master | tools/perf/profile_creators/small_profile_creator.py | 10 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import profile_creator
import page_sets
class SmallProfileCreator(profile_creator.ProfileCreator):
"""
Runs a browser through a series of operations to fill in a small test profile.
"""
def __init__(self):
super(SmallProfileCreator, self).__init__()
self._page_set = page_sets.Typical25PageSet()
# Open all links in the same tab save for the last _NUM_TABS links which
# are each opened in a new tab.
self._NUM_TABS = 5
def TabForPage(self, page, browser):
idx = page.page_set.pages.index(page)
# The last _NUM_TABS pages open a new tab.
if idx <= (len(page.page_set.pages) - self._NUM_TABS):
return browser.tabs[0]
else:
return browser.tabs.New()
def MeasurePage(self, _, tab, results):
tab.WaitForDocumentReadyStateToBeComplete()
|
openstack/manila | refs/heads/master | manila/policies/share_replica.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from manila.policies import base
BASE_POLICY_NAME = 'share_replica:%s'
DEPRECATED_REASON = """
The share replica API now supports system scope and default roles.
"""
deprecated_replica_create = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'create',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_get_all = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'get_all',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_show = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'show',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_delete = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'delete',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_force_delete = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'force_delete',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_promote = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'promote',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_resync = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'resync',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_reset_state = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'reset_replica_state',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_replica_reset_status = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'reset_status',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
share_replica_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'create',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Create share replica.",
operations=[
{
'method': 'POST',
'path': '/share-replicas',
}
],
deprecated_rule=deprecated_replica_create
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'get_all',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description="Get all share replicas.",
operations=[
{
'method': 'GET',
'path': '/share-replicas',
},
{
'method': 'GET',
'path': '/share-replicas/detail',
},
{
'method': 'GET',
'path': '/share-replicas/detail?share_id={share_id}',
}
],
deprecated_rule=deprecated_replica_get_all
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description="Get details of a share replica.",
operations=[
{
'method': 'GET',
'path': '/share-replicas/{share_replica_id}',
}
],
deprecated_rule=deprecated_replica_show
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Delete a share replica.",
operations=[
{
'method': 'DELETE',
'path': '/share-replicas/{share_replica_id}',
}
],
deprecated_rule=deprecated_replica_delete
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force_delete',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Force delete a share replica.",
operations=[
{
'method': 'POST',
'path': '/share-replicas/{share_replica_id}/action',
}
],
deprecated_rule=deprecated_replica_force_delete
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'promote',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Promote a non-active share replica to active.",
operations=[
{
'method': 'POST',
'path': '/share-replicas/{share_replica_id}/action',
}
],
deprecated_rule=deprecated_replica_promote
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'resync',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Resync a share replica that is out of sync.",
operations=[
{
'method': 'POST',
'path': '/share-replicas/{share_replica_id}/action',
}
],
deprecated_rule=deprecated_replica_resync
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'reset_replica_state',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Reset share replica's replica_state attribute.",
operations=[
{
'method': 'POST',
'path': '/share-replicas/{share_replica_id}/action',
}
],
deprecated_rule=deprecated_replica_reset_state
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'reset_status',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Reset share replica's status.",
operations=[
{
'method': 'POST',
'path': '/share-replicas/{share_replica_id}/action',
}
],
deprecated_rule=deprecated_replica_reset_status
),
]
def list_rules():
return share_replica_policies
|
kingvuplus/ts-gui-3 | refs/heads/master | lib/python/Components/Renderer/Listbox.py | 118 | from Renderer import Renderer
from enigma import eListbox
# the listbox renderer is the listbox, but no listbox content.
# the content will be provided by the source (or converter).
# the source should emit the 'changed' signal whenever
# it has a new listbox content.
# the source needs to have the 'content' property for the
# used listbox content
# it should expose exactly the non-content related functions
# of the eListbox class. more or less.
class Listbox(Renderer, object):
def __init__(self):
Renderer.__init__(self)
self.__content = None
self.__wrap_around = True
self.__selection_enabled = True
self.__scrollbarMode = "showOnDemand"
GUI_WIDGET = eListbox
def contentChanged(self):
self.content = self.source.content
def setContent(self, content):
self.__content = content
if self.instance is not None:
self.instance.setContent(self.__content)
content = property(lambda self: self.__content, setContent)
def postWidgetCreate(self, instance):
if self.__content is not None:
instance.setContent(self.__content)
instance.selectionChanged.get().append(self.selectionChanged)
self.wrap_around = self.wrap_around # trigger
self.selection_enabled = self.selection_enabled # trigger
self.scrollbarMode = self.scrollbarMode # trigger
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def setWrapAround(self, wrap_around):
self.__wrap_around = wrap_around
if self.instance is not None:
self.instance.setWrapAround(self.__wrap_around)
wrap_around = property(lambda self: self.__wrap_around, setWrapAround)
def selectionChanged(self):
self.source.selectionChanged(self.index)
def getIndex(self):
if self.instance is None:
return 0
return self.instance.getCurrentIndex()
def moveToIndex(self, index):
if self.instance is None:
return
self.instance.moveSelectionTo(index)
index = property(getIndex, moveToIndex)
def move(self, direction):
if self.instance is not None:
self.instance.moveSelection(direction)
def setSelectionEnabled(self, enabled):
self.__selection_enabled = enabled
if self.instance is not None:
self.instance.setSelectionEnable(enabled)
selection_enabled = property(lambda self: self.__selection_enabled, setSelectionEnabled)
def setScrollbarMode(self, mode):
self.__scrollbarMode = mode
if self.instance is not None:
self.instance.setScrollbarMode(int(
{ "showOnDemand": 0,
"showAlways": 1,
"showNever": 2,
}[mode]))
scrollbarMode = property(lambda self: self.__scrollbarMode, setScrollbarMode)
def changed(self, what):
if hasattr(self.source, "selectionEnabled"):
self.selection_enabled = self.source.selectionEnabled
if hasattr(self.source, "scrollbarMode"):
self.scrollbarMode = self.source.scrollbarMode
if len(what) > 1 and isinstance(what[1], str) and what[1] == "style":
return
self.content = self.source.content
def entry_changed(self, index):
if self.instance is not None:
self.instance.entryChanged(index)
|
genonfire/bbgo | refs/heads/master | blogs/migrations/__init__.py | 12133432 | |
ggiscan/OnlineClerk | refs/heads/master | core/Interactor/core/tests/__init__.py | 12133432 | |
mrfuxi/django | refs/heads/master | tests/migrations/migrations_test_apps/alter_fk/author_app/__init__.py | 12133432 | |
nion-software/nionswift | refs/heads/master | nion/swift/test/ToolbarPanel_test.py | 12133432 | |
saurabh6790/trufil_app | refs/heads/master | stock/report/serial_no_status/__init__.py | 12133432 | |
NDManh/numbbo | refs/heads/master | code-postprocessing/bbob_pproc/readalign.py | 3 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper routines to read in data files.
The terms horizontal and vertical below refer to the horizontal
(fixed-target) and vertical (fixed-budget) views. When considering
convergence graphs of function values over times, we can view it as:
* costs for different fixed targets represented by horizontal cuts.
* function values for different fixed budgets represented by vertical
cuts.
COCO collects experimental data with respect to these two complementary
views. This module provides data structures and methods for dealing with
the experimental data.
"""
from __future__ import absolute_import
import numpy
import warnings
from pdb import set_trace
#GLOBAL VARIABLES
idxEvals = 0 # index of the column where to find the evaluations
# Single objective case
idxFSingle = 2 # index of the column where to find the function values
nbPtsFSingle = 5 # nb of target function values for each decade.
# Bi-objective case
idxFBi = 1 # index of the column where to find the function values
nbPtsFBi = 10 # nb of target function values for each decade.
#CLASS DEFINITIONS
class MultiReader(list):
"""List of data arrays to be aligned.
The main purpose of this class is to be used as a single container
of the data arrays to be aligned by :py:meth:`alignData()` in the
parent module.
A data array is defined as an array where rows correspond to
recordings at different moments of an experiment. Elements of these
rows correspond to different measures.
These data arrays can be aligned along the time or the function
values for instance.
This class is part abstract. Some methods have to be defined by
inheriting classes depending on wanted alignment:
* :py:meth:`isFinished()`, True when all the data is read.
* :py:meth:`getInitialValue()`, returns the initial alignment
value.
* :py:meth:`newCurrentValue()`, returns the next alignment value.
* :py:meth:`align()`, process all the elements of self to make
them aligned.
Some attributes have to be defined as well :py:attr:`idx`,
the index of the column with alignment values in the data array,
:py:attr:`idxData`, the index of the column with the actual data.
"""
# TODO: this class and all inheriting class may have to be redesigned for
# other kind of problems to work.
# idx: index of the column in the data array of the alignment value.
# idxData: index of the column in the data array for the data of concern.
def __init__(self, data, isHArray=False):
for i in data:
if len(i) > 0: # ie. if the data array is not empty.
self.append(self.SingleReader(i, isHArray))
def currentLine(self):
"""Aggregates currentLines information."""
return numpy.array(list(i.currentLine[self.idxData] for i in self))
def currentValues(self):
"""Gets the list of the current alignment values."""
return list(i.currentLine[self.idx] for i in self)
def nextValues(self):
"""Gets the list of the next alignment values."""
return list(i.nextLine[self.idx] for i in self if not i.isFinished)
#def isFinished(self):
"""When all the data is read."""
#pass
#def getInitialValue(self):
"""Returns the initial alignment value."""
#pass
#def newCurrentValue(self):
"""Returns the next alignment value."""
#pass
#def align(self, currentValue):
"""Process all the elements of self to make them aligned."""
#pass
class SingleReader:
"""Single data array reader class."""
def __init__(self, data, isHArray=False):
if len(data) == 0:
raise ValueError, 'Empty data array.'
self.data = numpy.array(data)
self.it = self.data.__iter__()
self.isNearlyFinished = False
self.isFinished = False
self.currentLine = None
self.nextLine = self.it.next()
if isHArray:
self.idxEvals = range(1, numpy.shape(data)[1])
else:
self.idxEvals = idxEvals
def next(self):
"""Returns the next (last if undefined) line of the array data."""
if not self.isFinished:
if not self.isNearlyFinished: # the next line is still defined
self.currentLine = self.nextLine.copy()
# Update nextLine
try:
self.nextLine = self.it.next()
except StopIteration:
self.isNearlyFinished = True
else:
self.isFinished = True
self.currentLine[self.idxEvals] = numpy.nan
#TODO: the line above was not valid for the MultiArrayReader
return self.currentLine
class VMultiReader(MultiReader):
"""List of data arrays to be aligned vertically.
Aligned vertically means, all number of function evaluations are the
closest from below or equal to the alignment number of function
evaluations.
"""
idx = idxEvals # the alignment value is the number of function evaluations.
def __init__(self, data, isBiobjective):
super(VMultiReader, self).__init__(data, isBiobjective)
self.idxData = idxFBi if isBiobjective else idxFSingle # the data of concern are the function values.
def isFinished(self):
return all(i.isFinished for i in self)
def getInitialValue(self):
for i in self:
i.next()
res = self.currentValues()
return min(res)
def newCurrentValue(self):
res = self.nextValues()
if res:
return min(self.nextValues())
else:
return None
def align(self, currentValue):
for i in self:
while not i.isFinished:
if i.nextLine[self.idx] > currentValue:
break
i.next()
return numpy.insert(self.currentLine(), 0, currentValue)
class HMultiReader(MultiReader):
"""List of data arrays to be aligned horizontally.
Aligned horizontally means all the function values are lesser than
(or equal to) the current alignment function value.
"""
idxData = idxEvals # the data of concern are the number of function evals.
def __init__(self, data, isBiobjective):
super(HMultiReader, self).__init__(data, isBiobjective)
# the alignment value is the function value.
self.idx = idxFBi if isBiobjective else idxFSingle
self.nbPtsF = nbPtsFBi if isBiobjective else nbPtsFSingle
self.idxCurrentF = numpy.inf # Minimization
# idxCurrentF is a float for the extreme case where it is infinite.
# else it is an integer and then is the 'i' in 10**(i/nbPtsF)
def isFinished(self):
"""Is finished when we found the last alignment value reached."""
currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)
if currentValue == 0:
return True
for i in self:
while i.nextLine[self.idx] > currentValue and not i.isNearlyFinished:
i.next();
return not any(i.nextLine[self.idx] <= currentValue for i in self)
def getInitialValue(self):
for i in self:
i.next()
fvalues = self.currentValues()
self.idxCurrentF = numpy.ceil(numpy.log10(max(fvalues) if max(fvalues) > 0 else 1e-19) * self.nbPtsF)
# Returns the smallest 10^i/nbPtsF value larger than max(Fvalues)
return numpy.power(10, self.idxCurrentF / self.nbPtsF)
def newCurrentValue(self):
self.idxCurrentF -= 1
return numpy.power(10, self.idxCurrentF / self.nbPtsF)
def align(self, currentValue):
fvalues = []
for i in self:
while not i.isFinished:
if i.currentLine[self.idx] <= currentValue:
break
i.next()
if i.currentLine[self.idx] <= currentValue:
fvalues.append(i.currentLine[self.idx])
#This should not happen
if not fvalues:
raise ValueError, 'Value %g is not reached.'
if max(fvalues) <= 0.:
self.idxCurrentF = -numpy.inf
currentValue = 0.
else:
self.idxCurrentF = min(self.idxCurrentF,
numpy.ceil(numpy.log10(max(fvalues)) * self.nbPtsF))
# Above line may return: Warning: divide by zero encountered in
# log10 in the case of negative fvalues.
# In the case of negative values for fvalues, self.idxCurrentF
# should be -numpy.inf at the condition that
# numpy.power(10, -inf) == 0 is true
# The update of idxCurrentF is done so all the intermediate
# function value trigger reached are not written, only the smallest
currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)
return numpy.insert(self.currentLine(), 0, currentValue)
class ArrayMultiReader(MultiReader):
"""Class of *aligned* data arrays to be aligned together.
This class is used for dealing with the output of
:py:class:`MultiReader`:
* From *raw* data arrays, :py:class:`MultiReader` generates aligned
data arrays (first column is the alignment value, subsequent
columns are aligned data).
* This class also generates aligned data arrays but from other
aligned data arrays.
"""
idx = 0 # We expect the alignment value to be the 1st column.
def __init__(self, data, isHArray=False):
#super(ArrayMultiReader, self).__init__(data, True)
MultiReader.__init__(self, data, isHArray)
#for i in self:
#i.nbRuns = (numpy.shape(i.data)[1] - 1)
def currentLine(self):
"""Aggregates currentLines information."""
res = []
res.extend(list(i.currentLine[1:] for i in self))
return numpy.hstack(res)
class VArrayMultiReader(ArrayMultiReader, VMultiReader):
"""Wrapper class of *aligned* data arrays to be aligned vertically."""
def __init__(self, data):
ArrayMultiReader.__init__(self, data)
#TODO: Should this use super?
class HArrayMultiReader(ArrayMultiReader, HMultiReader):
"""Wrapper class of *aligned* data arrays to be aligned horizontally."""
def __init__(self, data, isBiobjective):
ArrayMultiReader.__init__(self, data, isHArray=True)
#TODO: Should this use super?
self.nbPtsF = nbPtsFBi if isBiobjective else nbPtsFSingle
self.idxCurrentF = numpy.inf #Minimization
#FUNCTION DEFINITIONS
def alignData(data, isBiobjective):
"""Aligns the data from a list of data arrays.
This method returns an array for which the alignment value is the
first column and the aligned values are in subsequent columns.
"""
#TODO: is template dependent.
idxF = idxFBi if isBiobjective else idxFSingle
res = []
currentValue = data.getInitialValue()
#set_trace()
if data.isFinished():
res.append(data.align(currentValue))
while not data.isFinished():
res.append(data.align(currentValue))
currentValue = data.newCurrentValue()
return (numpy.vstack(res), numpy.array(list(i.nextLine[idxEvals] for i in data)),
numpy.array(list(i.nextLine[idxF] for i in data)))
# Hack: at this point nextLine contains all information on the last line
# of the data.
def alignArrayData(data):
"""Aligns the data from a list of aligned arrays.
This method returns an array for which the alignment value is the first
column and the aligned values are in subsequent columns.
"""
#TODO: is template dependent.
res = []
currentValue = data.getInitialValue()
#set_trace()
if data.isFinished():
res.append(data.align(currentValue))
while not data.isFinished():
res.append(data.align(currentValue))
currentValue = data.newCurrentValue()
return numpy.vstack(res)
# Hack: at this point nextLine contains all information on the last line
# of the data.
def split(dataFiles, dim=None):
"""Split a list of data files into arrays corresponding to data sets."""
dataSets = []
for fil in dataFiles:
with open(fil, 'r') as f:
# This doesnt work with windows.
# content = numpy.loadtxt(fil, comments='%')
lines = f.readlines()
content = []
# Save values in array content. Check for nan and inf.
for line in lines:
# skip if comment
if line.startswith('%'):
if content:
dataSets.append(numpy.vstack(content))
content = []
continue
# else remove end-of-line sign
# and split into single strings
data = line.strip('\n').split()
if dim and len(data) != dim + 5:
warnings.warn('Incomplete line %s in ' % (line) +
'data file %s: ' % (fil))
continue
for id in xrange(len(data)):
if data[id] in ('Inf', 'inf'):
data[id] = numpy.inf
elif data[id] in ('-Inf', '-inf'):
data[id] = -numpy.inf
elif data[id] in ('NaN', 'nan'):
data[id] = numpy.nan
else:
data[id] = float(data[id])
content.append(numpy.array(data))
#Check that it always have the same length?
if content:
dataSets.append(numpy.vstack(content))
return dataSets
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/tangible/hair/sullustan/shared_sul_hair_s02_f.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/sullustan/shared_sul_hair_s02_f.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
lucywyman/slides-ii | refs/heads/master | v/lib/python2.7/site-packages/pip/commands/uninstall.py | 395 | from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
session = self._build_session(options)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
session=session,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(filename,
options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
requirement_set.uninstall(auto_confirm=options.yes)
|
grantsewell/nzbToMedia | refs/heads/master | libs/babelfish/country.py | 42 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from collections import namedtuple
from functools import partial
from pkg_resources import resource_stream # @UnresolvedImport
from .converters import ConverterManager
from . import basestr
COUNTRIES = {}
COUNTRY_MATRIX = []
#: The namedtuple used in the :data:`COUNTRY_MATRIX`
IsoCountry = namedtuple('IsoCountry', ['name', 'alpha2'])
f = resource_stream('babelfish', 'data/iso-3166-1.txt')
f.readline()
for l in f:
iso_country = IsoCountry(*l.decode('utf-8').strip().split(';'))
COUNTRIES[iso_country.alpha2] = iso_country.name
COUNTRY_MATRIX.append(iso_country)
f.close()
class CountryConverterManager(ConverterManager):
""":class:`~babelfish.converters.ConverterManager` for country converters"""
entry_point = 'babelfish.country_converters'
internal_converters = ['name = babelfish.converters.countryname:CountryNameConverter']
country_converters = CountryConverterManager()
class CountryMeta(type):
"""The :class:`Country` metaclass
Dynamically redirect :meth:`Country.frommycode` to :meth:`Country.fromcode` with the ``mycode`` `converter`
"""
def __getattr__(cls, name):
if name.startswith('from'):
return partial(cls.fromcode, converter=name[4:])
return type.__getattribute__(cls, name)
class Country(CountryMeta(str('CountryBase'), (object,), {})):
"""A country on Earth
A country is represented by a 2-letter code from the ISO-3166 standard
:param string country: 2-letter ISO-3166 country code
"""
def __init__(self, country):
if country not in COUNTRIES:
raise ValueError('%r is not a valid country' % country)
#: ISO-3166 2-letter country code
self.alpha2 = country
@classmethod
def fromcode(cls, code, converter):
"""Create a :class:`Country` by its `code` using `converter` to
:meth:`~babelfish.converters.CountryReverseConverter.reverse` it
:param string code: the code to reverse
:param string converter: name of the :class:`~babelfish.converters.CountryReverseConverter` to use
:return: the corresponding :class:`Country` instance
:rtype: :class:`Country`
"""
return cls(country_converters[converter].reverse(code))
def __getstate__(self):
return self.alpha2
def __setstate__(self, state):
self.alpha2 = state
def __getattr__(self, name):
return country_converters[name].convert(self.alpha2)
def __hash__(self):
return hash(self.alpha2)
def __eq__(self, other):
if isinstance(other, basestr):
return str(self) == other
if not isinstance(other, Country):
return False
return self.alpha2 == other.alpha2
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<Country [%s]>' % self
def __str__(self):
return self.alpha2
|
StefanKjartansson/django-icelandic-addresses | refs/heads/master | ice_addresses/south_migrations/__init__.py | 12133432 | |
benjaminrigaud/django | refs/heads/master | tests/requests/__init__.py | 12133432 | |
ojengwa/oh-mainline | refs/heads/master | vendor/packages/scrapy/scrapyd/tests/__init__.py | 12133432 | |
jillson/chrononaut | refs/heads/master | adventure/migrations/__init__.py | 12133432 | |
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django_1_3/django/contrib/comments/views/__init__.py | 12133432 | |
Garrett-R/scikit-learn | refs/heads/master | examples/cluster/plot_digits_linkage.py | 369 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
Split-Screen/android_kernel_huawei_msm8928 | refs/heads/pac-5.1 | tools/perf/scripts/python/sched-migration.py | 11215 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
tumbl3w33d/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcp_compute_target_pool.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_pool
description:
- Represents a TargetPool resource, used for Load Balancing.
short_description: Creates a GCP TargetPool
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
backup_pool:
description:
- This field is applicable only when the containing target pool is serving a forwarding
rule as the primary pool, and its failoverRatio field is properly set to a value
between [0, 1].
- 'backupPool and failoverRatio together define the fallback behavior of the primary
target pool: if the ratio of the healthy instances in the primary pool is at
or below failoverRatio, traffic arriving at the load-balanced IP will be directed
to the backup pool.'
- In case where failoverRatio and backupPool are not set, or all the instances
in the backup pool are unhealthy, the traffic will be directed back to the primary
pool in the "force" mode, where traffic will be spread to the healthy instances
with the best effort, or to all instances when no instance is healthy.
- 'This field represents a link to a TargetPool resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_target_pool task and then set this backup_pool field to "{{
name-of-resource }}"'
required: false
type: dict
description:
description:
- An optional description of this resource.
required: false
type: str
failover_ratio:
description:
- This field is applicable only when the containing target pool is serving a forwarding
rule as the primary pool (i.e., not as a backup pool to some other target pool).
The value of the field must be in [0, 1].
- 'If set, backupPool must also be set. They together define the fallback behavior
of the primary target pool: if the ratio of the healthy instances in the primary
pool is at or below this number, traffic arriving at the load-balanced IP will
be directed to the backup pool.'
- In case where failoverRatio is not set or all the instances in the backup pool
are unhealthy, the traffic will be directed back to the primary pool in the
"force" mode, where traffic will be spread to the healthy instances with the
best effort, or to all instances when no instance is healthy.
required: false
type: str
health_check:
description:
- A reference to a HttpHealthCheck resource.
- A member instance in this pool is considered healthy if and only if the health
checks pass. If not specified it means all member instances will be considered
healthy at all times.
- 'This field represents a link to a HttpHealthCheck resource in GCP. It can be
specified in two ways. First, you can place a dictionary with key ''selfLink''
and value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_http_health_check task and then set this
health_check field to "{{ name-of-resource }}"'
required: false
type: dict
instances:
description:
- A list of virtual machine instances serving this pool.
- They must live in zones contained in the same region as this pool.
required: false
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
session_affinity:
description:
- 'Session affinity option. Must be one of these values: - NONE: Connections from
the same client IP may go to any instance in the pool.'
- "- CLIENT_IP: Connections from the same client IP will go to the same instance
in the pool while that instance remains healthy."
- "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol
will go to the same instance in the pool while that instance remains healthy."
- 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PROTO"'
required: false
type: str
region:
description:
- The region where the target pool resides.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetPools)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/network/target-pools)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a target pool
gcp_compute_target_pool:
name: test_object
region: us-west1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
backupPool:
description:
- This field is applicable only when the containing target pool is serving a forwarding
rule as the primary pool, and its failoverRatio field is properly set to a value
between [0, 1].
- 'backupPool and failoverRatio together define the fallback behavior of the primary
target pool: if the ratio of the healthy instances in the primary pool is at or
below failoverRatio, traffic arriving at the load-balanced IP will be directed
to the backup pool.'
- In case where failoverRatio and backupPool are not set, or all the instances in
the backup pool are unhealthy, the traffic will be directed back to the primary
pool in the "force" mode, where traffic will be spread to the healthy instances
with the best effort, or to all instances when no instance is healthy.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
failoverRatio:
description:
- This field is applicable only when the containing target pool is serving a forwarding
rule as the primary pool (i.e., not as a backup pool to some other target pool).
The value of the field must be in [0, 1].
- 'If set, backupPool must also be set. They together define the fallback behavior
of the primary target pool: if the ratio of the healthy instances in the primary
pool is at or below this number, traffic arriving at the load-balanced IP will
be directed to the backup pool.'
- In case where failoverRatio is not set or all the instances in the backup pool
are unhealthy, the traffic will be directed back to the primary pool in the "force"
mode, where traffic will be spread to the healthy instances with the best effort,
or to all instances when no instance is healthy.
returned: success
type: str
healthCheck:
description:
- A reference to a HttpHealthCheck resource.
- A member instance in this pool is considered healthy if and only if the health
checks pass. If not specified it means all member instances will be considered
healthy at all times.
returned: success
type: dict
id:
description:
- The unique identifier for the resource.
returned: success
type: int
instances:
description:
- A list of virtual machine instances serving this pool.
- They must live in zones contained in the same region as this pool.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sessionAffinity:
description:
- 'Session affinity option. Must be one of these values: - NONE: Connections from
the same client IP may go to any instance in the pool.'
- "- CLIENT_IP: Connections from the same client IP will go to the same instance
in the pool while that instance remains healthy."
- "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol
will go to the same instance in the pool while that instance remains healthy."
returned: success
type: str
region:
description:
- The region where the target pool resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
backup_pool=dict(type='dict'),
description=dict(type='str'),
failover_ratio=dict(type='str'),
health_check=dict(type='dict'),
instances=dict(type='list', elements='dict'),
name=dict(required=True, type='str'),
session_affinity=dict(type='str'),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetPool'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetPool',
u'backupPool': replace_resource_dict(module.params.get(u'backup_pool', {}), 'selfLink'),
u'description': module.params.get('description'),
u'failoverRatio': module.params.get('failover_ratio'),
u'healthCheck': replace_resource_dict(module.params.get(u'health_check', {}), 'selfLink'),
u'instances': replace_resource_dict(module.params.get('instances', []), 'selfLink'),
u'name': module.params.get('name'),
u'sessionAffinity': module.params.get('session_affinity'),
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'backupPool': replace_resource_dict(module.params.get(u'backup_pool', {}), 'selfLink'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'failoverRatio': response.get(u'failoverRatio'),
u'healthCheck': response.get(u'healthCheck'),
u'id': response.get(u'id'),
u'instances': response.get(u'instances'),
u'name': module.params.get('name'),
u'sessionAffinity': module.params.get('session_affinity'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetPool')
if response:
return decode_response(response, module)
else:
return {}
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
# Mask the fact healthChecks array is actually a single object of type
# HttpHealthCheck.
#
# Google Compute Engine API defines healthChecks as a list but it can only
# take [0, 1] elements. To make it simpler to declare we'll map that to a
# single object and encode/decode as appropriate.
def encode_request(request, module):
if 'healthCheck' in request:
request['healthChecks'] = [request['healthCheck']]
del request['healthCheck']
return request
# Mask healthChecks into a single element.
# @see encode_request for details
def decode_response(response, module):
if response['kind'] != 'compute#targetPool':
return response
# Map healthChecks[0] => healthCheck
if 'healthChecks' in response:
if not response['healthChecks']:
response['healthCheck'] = response['healthChecks'][0]
del response['healthChecks']
return response
if __name__ == '__main__':
main()
|
cp16net/virgo-base | refs/heads/master | tools/gyp/test/mac/gyptest-installname.py | 244 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'installname'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetInstallname(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
m = r.search(o)
assert m
return m.group(1)
if (GetInstallname('libdefault_installname.dylib') !=
'/usr/local/lib/libdefault_installname.dylib'):
test.fail_test()
if (GetInstallname('My Framework.framework/My Framework') !=
'/Library/Frameworks/My Framework.framework/'
'Versions/A/My Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname.dylib') !=
'Trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('libexplicit_installname_base.dylib') !=
'@executable_path/../../../libexplicit_installname_base.dylib'):
test.fail_test()
if (GetInstallname('My Other Framework.framework/My Other Framework') !=
'@executable_path/../../../My Other Framework.framework/'
'Versions/A/My Other Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_base.dylib') !=
'/usr/local/lib/libexplicit_installname_with_base.dylib'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') !=
'@executable_path/../libexplicit_installname_with_explicit_base.dylib'):
test.fail_test()
if (GetInstallname('libboth_base_and_installname.dylib') !=
'Still trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('install_name_with_info_plist.framework/'
'install_name_with_info_plist') !=
'/Library/Frameworks/install_name_with_info_plist.framework/'
'Versions/A/install_name_with_info_plist'):
test.fail_test()
if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in
test.stdout()):
test.fail_test()
test.pass_test()
|
openpolis/open_municipio | refs/heads/dev-senigallia | open_municipio/idioticon/templatetags/__init__.py | 37 | __author__ = 'guglielmo'
|
AuthenticEshkinKot/criu | refs/heads/master | test/others/rpc/test.py | 9 | #!/usr/bin/python2
import socket, os, imp, sys
import rpc_pb2 as rpc
import argparse
parser = argparse.ArgumentParser(description="Test dump/restore using CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed")
args = vars(parser.parse_args())
# Connect to service socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
s.connect(args['socket'])
# Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.leave_running = True
req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Send request
s.send(req.SerializeToString())
# Recv response
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.DUMP:
print 'Unexpected msg type'
sys.exit(-1)
else:
if resp.success:
print 'Success'
else:
print 'Fail'
sys.exit(-1)
if resp.dump.restored:
print 'Restored'
|
TouK/vumi | refs/heads/touk-develop | vumi/dispatchers/simple/tests/__init__.py | 12133432 | |
UCL/pyoracc | refs/heads/master | pyoracc/atf/common/__init__.py | 12133432 | |
qnub/django-cms | refs/heads/develop | cms/test_utils/project/emailuserapp/__init__.py | 12133432 | |
eriser/omaha | refs/heads/master | installers/__init__.py | 12133432 | |
idjaw/horizon | refs/heads/master | horizon/forms/fields.py | 14 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
import six
from django.core.exceptions import ValidationError # noqa
from django.core import urlresolvers
from django.forms import fields
from django.forms.util import flatatt # noqa
from django.forms import widgets
from django.utils.encoding import force_text
from django.utils.functional import Promise # noqa
from django.utils import html
from django.utils.translation import ugettext_lazy as _
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(fields.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class SelectWidget(widgets.Select):
"""Customizable select widget, that allows to render
data-xxx attributes from choices. This widget also
allows user to specify additional html attributes
for choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
.. attribute:: transform_html_attrs
A callable used to render additional HTML attributes
for the option object. It returns a dictionary
containing the html attributes and their values.
For example, to define a title attribute for the
choices::
helpText = { 'Apple': 'This is a fruit',
'Carrot': 'This is a vegetable' }
def get_title(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
....
....
widget=forms.SelectWidget( attrs={'class': 'switchable',
'data-slug': 'source'},
transform_html_attrs=get_title )
self.fields[<field name>].choices =
([
('apple','Apple'),
('carrot','Carrot')
])
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None,
transform_html_attrs=None):
self.data_attrs = data_attrs
self.transform = transform
self.transform_html_attrs = transform_html_attrs
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
other_html = (u' selected="selected"'
if option_value in selected_choices else '')
if callable(self.transform_html_attrs):
html_attrs = self.transform_html_attrs(option_label)
other_html += flatatt(html_attrs)
if not isinstance(option_label, (six.string_types, Promise)):
for data_attr in self.data_attrs:
data_value = html.conditional_escape(
force_text(getattr(option_label,
data_attr, "")))
other_html += ' data-%s="%s"' % (data_attr, data_value)
if callable(self.transform):
option_label = self.transform(option_label)
return u'<option value="%s"%s>%s</option>' % (
html.escape(option_value), other_html,
html.conditional_escape(force_text(option_label)))
class DynamicSelectWidget(widgets.Select):
"""A subclass of the ``Select`` widget which renders extra attributes for
use in callbacks to handle dynamic changes to the available choices.
"""
_data_add_url_attr = "data-add-item-url"
def render(self, *args, **kwargs):
add_item_url = self.get_add_item_url()
if add_item_url is not None:
self.attrs[self._data_add_url_attr] = add_item_url
return super(DynamicSelectWidget, self).render(*args, **kwargs)
def get_add_item_url(self):
if callable(self.add_item_link):
return self.add_item_link()
try:
if self.add_item_link_args:
return urlresolvers.reverse(self.add_item_link,
args=self.add_item_link_args)
else:
return urlresolvers.reverse(self.add_item_link)
except urlresolvers.NoReverseMatch:
return self.add_item_link
class DynamicChoiceField(fields.ChoiceField):
"""A subclass of ``ChoiceField`` with additional properties that make
dynamically updating its elements easier.
Notably, the field declaration takes an extra argument, ``add_item_link``
which may be a string or callable defining the URL that should be used
for the "add" link associated with the field.
"""
widget = DynamicSelectWidget
def __init__(self,
add_item_link=None,
add_item_link_args=None,
*args,
**kwargs):
super(DynamicChoiceField, self).__init__(*args, **kwargs)
self.widget.add_item_link = add_item_link
self.widget.add_item_link_args = add_item_link_args
class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField):
"""Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``."""
pass
|
elifesciences/elife-tools | refs/heads/develop | elifetools/tests/fixtures/test_full_award_groups/__init__.py | 12133432 | |
ericholscher/django | refs/heads/master | django/contrib/sitemaps/tests/urls/__init__.py | 12133432 | |
deepsrijit1105/edx-platform | refs/heads/master | openedx/core/djangoapps/self_paced/__init__.py | 12133432 | |
sanoma/django-arctic | refs/heads/develop | example/dashboard/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.