repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
rayNymous/nupic | refs/heads/master | tests/unit/nupic/research/monitor_mixin/metric_test.py | 35 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.research.monitor_mixin.metric import Metric
from nupic.research.monitor_mixin.trace import CountsTrace, BoolsTrace
class MetricTest(unittest.TestCase):
def setUp(self):
self.trace = CountsTrace(self, "# active cells")
self.trace.data = [1, 2, 3, 4, 5, 0]
def testCreateFromTrace(self):
metric = Metric.createFromTrace(self.trace)
self.assertEqual(metric.title, self.trace.title)
self.assertEqual(metric.min, 0)
self.assertEqual(metric.max, 5)
self.assertEqual(metric.sum, 15)
self.assertEqual(metric.mean, 2.5)
self.assertEqual(metric.standardDeviation, 1.707825127659933)
def testCreateFromTraceExcludeResets(self):
resetTrace = BoolsTrace(self, "resets")
resetTrace.data = [True, False, False, True, False, False]
metric = Metric.createFromTrace(self.trace, excludeResets=resetTrace)
self.assertEqual(metric.title, self.trace.title)
self.assertEqual(metric.min, 0)
self.assertEqual(metric.max, 5)
self.assertEqual(metric.sum, 10)
self.assertEqual(metric.mean, 2.5)
self.assertEqual(metric.standardDeviation, 1.8027756377319946)
if __name__ == '__main__':
unittest.main()
|
fujunwei/chromium-crosswalk | refs/heads/master | third_party/boringssl/update_gypi_and_asm.py | 3 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can b
# found in the LICENSE file.
"""Enumerates the BoringSSL source in src/ and generates two gypi files:
boringssl.gypi and boringssl_tests.gypi."""
import os
import subprocess
import sys
# OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for
# that platform and the extension used by asm files.
OS_ARCH_COMBOS = [
('linux', 'arm', 'elf', [], 'S'),
('linux', 'aarch64', 'linux64', [], 'S'),
('linux', 'x86', 'elf', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'),
('linux', 'x86_64', 'elf', [], 'S'),
('mac', 'x86', 'macosx', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'),
('mac', 'x86_64', 'macosx', [], 'S'),
('win', 'x86', 'win32n', ['-DOPENSSL_IA32_SSE2'], 'asm'),
('win', 'x86_64', 'nasm', [], 'asm'),
]
# NON_PERL_FILES enumerates assembly files that are not processed by the
# perlasm system.
NON_PERL_FILES = {
('linux', 'arm'): [
'src/crypto/poly1305/poly1305_arm_asm.S',
'src/crypto/chacha/chacha_vec_arm.S',
'src/crypto/cpu-arm-asm.S',
],
}
FILE_HEADER = """# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by update_gypi_and_asm.py. Do not edit manually.
"""
def FindCMakeFiles(directory):
"""Returns list of all CMakeLists.txt files recursively in directory."""
cmakefiles = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
if filename == 'CMakeLists.txt':
cmakefiles.append(os.path.join(path, filename))
return cmakefiles
def NoTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent and not dent.startswith('example_')
def OnlyTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove
non-test sources."""
if is_dir:
return True
return '_test.' in dent or dent.startswith('example_')
def FindCFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the C source
files that pass filter_func."""
cfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if not filename.endswith('.c') and not filename.endswith('.cc'):
continue
if not filter_func(filename, False):
continue
cfiles.append(os.path.join(path, filename))
for (i, dirname) in enumerate(dirnames):
if not filter_func(dirname, True):
del dirnames[i]
return cfiles
def ExtractPerlAsmFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a list of all the perlasm() directives found in the file."""
perlasms = []
with open(cmakefile) as f:
for line in f:
line = line.strip()
if not line.startswith('perlasm('):
continue
if not line.endswith(')'):
raise ValueError('Bad perlasm line in %s' % cmakefile)
# Remove "perlasm(" from start and ")" from end
params = line[8:-1].split()
if len(params) < 2:
raise ValueError('Bad perlasm line in %s' % cmakefile)
perlasms.append({
'extra_args': params[2:],
'input': os.path.join(os.path.dirname(cmakefile), params[1]),
'output': os.path.join(os.path.dirname(cmakefile), params[0]),
})
return perlasms
def ReadPerlAsmOperations():
"""Returns a list of all perlasm() directives found in CMake config files in
src/."""
perlasms = []
cmakefiles = FindCMakeFiles('src')
for cmakefile in cmakefiles:
perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile))
return perlasms
def PerlAsm(output_filename, input_filename, perlasm_style, extra_args):
"""Runs the a perlasm script and puts the output into output_filename."""
base_dir = os.path.dirname(output_filename)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
output = subprocess.check_output(
['perl', input_filename, perlasm_style] + extra_args)
with open(output_filename, 'w+') as out_file:
out_file.write(output)
def ArchForAsmFilename(filename):
"""Returns the architectures that a given asm file should be compiled for
based on substrings in the filename."""
if 'x86_64' in filename or 'avx2' in filename:
return ['x86_64']
elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename:
return ['x86']
elif 'armx' in filename:
return ['arm', 'aarch64']
elif 'armv8' in filename:
return ['aarch64']
elif 'arm' in filename:
return ['arm']
else:
raise ValueError('Unknown arch for asm filename: ' + filename)
def WriteAsmFiles(perlasms):
"""Generates asm files from perlasm directives for each supported OS x
platform combination."""
asmfiles = {}
for osarch in OS_ARCH_COMBOS:
(osname, arch, perlasm_style, extra_args, asm_ext) = osarch
key = (osname, arch)
outDir = '%s-%s' % key
for perlasm in perlasms:
filename = os.path.basename(perlasm['input'])
output = perlasm['output']
if not output.startswith('src'):
raise ValueError('output missing src: %s' % output)
output = os.path.join(outDir, output[4:])
output = output.replace('${ASM_EXT}', asm_ext)
if arch in ArchForAsmFilename(filename):
PerlAsm(output, perlasm['input'], perlasm_style,
perlasm['extra_args'] + extra_args)
asmfiles.setdefault(key, []).append(output)
for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems():
asmfiles.setdefault(key, []).extend(non_perl_asm_files)
return asmfiles
def PrintVariableSection(out, name, files):
out.write(' \'%s\': [\n' % name)
for f in sorted(files):
out.write(' \'%s\',\n' % f)
out.write(' ],\n')
def main():
crypto_c_files = FindCFiles(os.path.join('src', 'crypto'), NoTests)
ssl_c_files = FindCFiles(os.path.join('src', 'ssl'), NoTests)
# Generate err_data.c
with open('err_data.c', 'w+') as err_data:
subprocess.check_call(['go', 'run', 'err_data_generate.go'],
cwd=os.path.join('src', 'crypto', 'err'),
stdout=err_data)
crypto_c_files.append('err_data.c')
with open('boringssl.gypi', 'w+') as gypi:
gypi.write(FILE_HEADER + '{\n \'variables\': {\n')
PrintVariableSection(
gypi, 'boringssl_lib_sources', crypto_c_files + ssl_c_files)
perlasms = ReadPerlAsmOperations()
for ((osname, arch), asm_files) in sorted(
WriteAsmFiles(perlasms).iteritems()):
PrintVariableSection(gypi, 'boringssl_%s_%s_sources' %
(osname, arch), asm_files)
gypi.write(' }\n}\n')
test_c_files = FindCFiles(os.path.join('src', 'crypto'), OnlyTests)
test_c_files += FindCFiles(os.path.join('src', 'ssl'), OnlyTests)
with open('boringssl_tests.gypi', 'w+') as test_gypi:
test_gypi.write(FILE_HEADER + '{\n \'targets\': [\n')
test_names = []
for test in sorted(test_c_files):
test_name = 'boringssl_%s' % os.path.splitext(os.path.basename(test))[0]
test_gypi.write(""" {
'target_name': '%s',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'%s',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},\n""" % (test_name, test))
test_names.append(test_name)
test_names.sort()
test_gypi.write(""" ],
'variables': {
'boringssl_test_targets': [\n""")
for test in test_names:
test_gypi.write(""" '%s',\n""" % test)
test_gypi.write(' ],\n }\n}\n')
return 0
if __name__ == '__main__':
sys.exit(main())
|
geoalchimista/chflux | refs/heads/master | chflux/io/parsers.py | 1 | """PyChamberFlux I/O module containing a collection of data parsers."""
import pandas as pd
# A collection of parsers for timestamps stored in multiple columns.
# Supports only the ISO 8601 format (year-month-day).
# Does not support month-first (American) or day-first (European) format.
timestamp_parsers = {
# date only
'ymd': lambda s: pd.to_datetime(s, format='%Y %m %d'),
# down to minute
'ymdhm': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M'),
# down to second
'ymdhms': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M %S'),
# down to nanosecond
'ymdhmsf': lambda s: pd.to_datetime(s, format='%Y %m %d %H %M %S %f')
}
def parse_timestamp():
pass
|
benreynwar/rfgnocchi | refs/heads/master | xilinx/__init__.py | 12133432 | |
theshteves/tweet-the-wolf | refs/heads/master | pips/cryptography/hazmat/primitives/kdf/pbkdf2.py | 70 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import PBKDF2HMACBackend
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
@utils.register_interface(KeyDerivationFunction)
class PBKDF2HMAC(object):
def __init__(self, algorithm, length, salt, iterations, backend):
if not isinstance(backend, PBKDF2HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement PBKDF2HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not backend.pbkdf2_hmac_supported(algorithm):
raise UnsupportedAlgorithm(
"{0} is not supported for PBKDF2 by this backend.".format(
algorithm.name),
_Reasons.UNSUPPORTED_HASH
)
self._used = False
self._algorithm = algorithm
self._length = length
if not isinstance(salt, bytes):
raise TypeError("salt must be bytes.")
self._salt = salt
self._iterations = iterations
self._backend = backend
def derive(self, key_material):
if self._used:
raise AlreadyFinalized("PBKDF2 instances can only be used once.")
self._used = True
if not isinstance(key_material, bytes):
raise TypeError("key_material must be bytes.")
return self._backend.derive_pbkdf2_hmac(
self._algorithm,
self._length,
self._salt,
self._iterations,
key_material
)
def verify(self, key_material, expected_key):
derived_key = self.derive(key_material)
if not constant_time.bytes_eq(derived_key, expected_key):
raise InvalidKey("Keys do not match.")
|
machinecoin-project/machinecoin | refs/heads/0.17 | test/functional/p2p_segwit.py | 2 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
wait_until,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
p2p.send_message(tx_message)
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
if with_witness:
p2p.send_message(msg_witness_block(block))
else:
p2p.send_message(msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(MachinecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.version = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
wait_until(lambda: 'reject' in self.test_node.last_message and self.test_node.last_message["reject"].reason == b"unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propogate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time()) + 10)
self.nodes[2].setmocktime(int(time.time()) + 10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older machinecoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the script_sig, should also fail.
spend_tx.vin[0].script_sig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let machinecoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, b'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
if __name__ == '__main__':
SegWitTest().main()
|
litchfield/django | refs/heads/master | tests/select_related/__init__.py | 12133432 | |
tomkralidis/geonode | refs/heads/master | geonode/documents/management/commands/__init__.py | 12133432 | |
TomHeatwole/osf.io | refs/heads/develop | website/addons/forward/tests/__init__.py | 12133432 | |
xujun10110/sleepy-puppy-1 | refs/heads/master | sleepypuppy/collector/__init__.py | 12133432 | |
Just-D/chromium-1 | refs/heads/master | third_party/cython/src/Cython/Compiler/Tests/TestVisitor.py | 133 | from Cython.Compiler.ModuleNode import ModuleNode
from Cython.Compiler.Symtab import ModuleScope
from Cython.TestUtils import TransformTest
from Cython.Compiler.Visitor import MethodDispatcherTransform
from Cython.Compiler.ParseTreeTransforms import (
NormalizeTree, AnalyseDeclarationsTransform,
AnalyseExpressionsTransform, InterpretCompilerDirectives)
class TestMethodDispatcherTransform(TransformTest):
_tree = None
def _build_tree(self):
if self._tree is None:
context = None
def fake_module(node):
scope = ModuleScope('test', None, None)
return ModuleNode(node.pos, doc=None, body=node,
scope=scope, full_module_name='test',
directive_comments={})
pipeline = [
fake_module,
NormalizeTree(context),
InterpretCompilerDirectives(context, {}),
AnalyseDeclarationsTransform(context),
AnalyseExpressionsTransform(context),
]
self._tree = self.run_pipeline(pipeline, u"""
cdef bytes s = b'asdfg'
cdef dict d = {1:2}
x = s * 3
d.get('test')
""")
return self._tree
def test_builtin_method(self):
calls = [0]
class Test(MethodDispatcherTransform):
def _handle_simple_method_dict_get(self, node, func, args, unbound):
calls[0] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls[0])
def test_binop_method(self):
calls = {'bytes': 0, 'object': 0}
class Test(MethodDispatcherTransform):
def _handle_simple_method_bytes___mul__(self, node, func, args, unbound):
calls['bytes'] += 1
return node
def _handle_simple_method_object___mul__(self, node, func, args, unbound):
calls['object'] += 1
return node
tree = self._build_tree()
Test(None)(tree)
self.assertEqual(1, calls['bytes'])
self.assertEqual(0, calls['object'])
|
ferabra/edx-platform | refs/heads/master | common/djangoapps/config_models/admin.py | 84 | """
Admin site models for managing :class:`.ConfigurationModel` subclasses
"""
from django.forms import models
from django.contrib import admin
from django.contrib.admin import ListFilter
from django.core.cache import get_cache, InvalidCacheBackendError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
try:
cache = get_cache('configuration') # pylint: disable=invalid-name
except InvalidCacheBackendError:
from django.core.cache import cache
# pylint: disable=protected-access
class ConfigurationModelAdmin(admin.ModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for :class:`.ConfigurationModel` subclasses
"""
date_hierarchy = 'change_date'
def get_actions(self, request):
return {
'revert': (ConfigurationModelAdmin.revert, 'revert', _('Revert to the selected configuration'))
}
def get_list_display(self, request):
return self.model._meta.get_all_field_names()
# Don't allow deletion of configuration
def has_delete_permission(self, request, obj=None):
return False
# Make all fields read-only when editing an object
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.model._meta.get_all_field_names()
return self.readonly_fields
def add_view(self, request, form_url='', extra_context=None):
# Prepopulate new configuration entries with the value of the current config
get = request.GET.copy()
get.update(models.model_to_dict(self.model.current()))
request.GET = get
return super(ConfigurationModelAdmin, self).add_view(request, form_url, extra_context)
# Hide the save buttons in the change view
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['readonly'] = True
return super(ConfigurationModelAdmin, self).change_view(
request,
object_id,
form_url,
extra_context=extra_context
)
def save_model(self, request, obj, form, change):
obj.changed_by = request.user
super(ConfigurationModelAdmin, self).save_model(request, obj, form, change)
cache.delete(obj.cache_key_name(*(getattr(obj, key_name) for key_name in obj.KEY_FIELDS)))
cache.delete(obj.key_values_cache_key_name())
def revert(self, request, queryset):
"""
Admin action to revert a configuration back to the selected value
"""
if queryset.count() != 1:
self.message_user(request, _("Please select a single configuration to revert to."))
return
target = queryset[0]
target.id = None
self.save_model(request, target, None, False)
self.message_user(request, _("Reverted configuration."))
return HttpResponseRedirect(
reverse(
'admin:{}_{}_change'.format(
self.model._meta.app_label,
self.model._meta.module_name,
),
args=(target.id,),
)
)
class ShowHistoryFilter(ListFilter):
"""
Admin change view filter to show only the most recent (i.e. the "current") row for each
unique key value.
"""
title = _('Status')
parameter_name = 'show_history'
def __init__(self, request, params, model, model_admin):
super(ShowHistoryFilter, self).__init__(request, params, model, model_admin)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def has_output(self):
""" Should this filter be shown? """
return True
def choices(self, cl):
""" Returns choices ready to be output in the template. """
show_all = self.used_parameters.get(self.parameter_name) == "1"
return (
{
'display': _('Current Configuration'),
'selected': not show_all,
'query_string': cl.get_query_string({}, [self.parameter_name]),
},
{
'display': _('All (Show History)'),
'selected': show_all,
'query_string': cl.get_query_string({self.parameter_name: "1"}, []),
}
)
def queryset(self, request, queryset):
""" Filter the queryset. No-op since it's done by KeyedConfigurationModelAdmin """
return queryset
def expected_parameters(self):
""" List the query string params used by this filter """
return [self.parameter_name]
class KeyedConfigurationModelAdmin(ConfigurationModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for :class:`.ConfigurationModel` subclasses that
use extra keys (i.e. they have KEY_FIELDS set).
"""
date_hierarchy = None
list_filter = (ShowHistoryFilter, )
def queryset(self, request):
"""
Annote the queryset with an 'is_active' property that's true iff that row is the most
recently added row for that particular set of KEY_FIELDS values.
Filter the queryset to show only is_active rows by default.
"""
if request.GET.get(ShowHistoryFilter.parameter_name) == '1':
queryset = self.model.objects.with_active_flag()
else:
# Show only the most recent row for each key.
queryset = self.model.objects.current_set()
ordering = self.get_ordering(request)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_list_display(self, request):
""" Add a link to each row for creating a new row using the chosen row as a template """
return self.model._meta.get_all_field_names() + ['edit_link']
def add_view(self, request, form_url='', extra_context=None):
# Prepopulate new configuration entries with the value of the current config, if given:
if 'source' in request.GET:
get = request.GET.copy()
source_id = int(get.pop('source')[0])
source = get_object_or_404(self.model, pk=source_id)
get.update(models.model_to_dict(source))
request.GET = get
# Call our grandparent's add_view, skipping the parent code
# because the parent code has a different way to prepopulate new configuration entries
# with the value of the latest config, which doesn't make sense for keyed models.
# pylint: disable=bad-super-call
return super(ConfigurationModelAdmin, self).add_view(request, form_url, extra_context)
def edit_link(self, inst):
""" Edit link for the change view """
if not inst.is_active:
return u'--'
update_url = reverse('admin:{}_{}_add'.format(self.model._meta.app_label, self.model._meta.module_name))
update_url += "?source={}".format(inst.pk)
return u'<a href="{}">{}</a>'.format(update_url, _('Update'))
edit_link.allow_tags = True
edit_link.short_description = _('Update')
|
Jeongseob/xen-coboost-sched | refs/heads/master | tools/python/xen/xend/server/SrvXendLog.py | 52 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
from xen.web import static
from xen.xend import XendLogging
from xen.web.SrvDir import SrvDir
class SrvXendLog(SrvDir):
"""Xend log.
"""
def __init__(self):
SrvDir.__init__(self)
self.logfile = static.File(XendLogging.getLogFilename(),
defaultType="text/plain")
self.logfile.type = "text/plain"
self.logfile.encoding = None
def render_GET(self, req):
return self.logfile.render(req)
|
ProfessionalIT/professionalit-webiste | refs/heads/master | sdk/google_appengine/lib/django-1.5/django/contrib/localflavor/us/__init__.py | 12133432 | |
mathieui/pleaseshare | refs/heads/master | pleaseshare/torrent/__init__.py | 12133432 | |
CINPLA/expipe-dev | refs/heads/master | exdir-browser/exdirbrowser/models/__init__.py | 12133432 | |
longzhi/Zappa | refs/heads/master | zappa/core.py | 1 | from __future__ import print_function
import boto3
import botocore
import glob
import json
import logging
import os
import random
import requests
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import troposphere
import troposphere.apigateway
import zipfile
from builtins import int, bytes
from botocore.exceptions import ClientError
from distutils.dir_util import copy_tree
from io import BytesIO, open
from lambda_packages import lambda_packages
from setuptools import find_packages
from tqdm import tqdm
# Zappa imports
from .utilities import (copytree,
add_event_source,
remove_event_source,
human_size,
get_topic_name,
contains_python_files_or_subdirs,
get_venv_from_python_version)
##
# Logging Config
##
logging.basicConfig(format='%(levelname)s:%(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-west-1', 'eu-west-2',
'ap-northeast-1', 'ap-northeast-2',
'ap-southeast-1', 'ap-southeast-2']
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-west-1', 'eu-west-2',
'ap-northeast-1', 'ap-northeast-2',
'ap-southeast-1', 'ap-southeast-2']
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
'*.exe', '*.DS_Store', '*.Python', '*.git', '.git/*', '*.zip', '*.tar.gz',
'*.hg', '*.egg-info', 'pip', 'docutils*', 'setuputils*'
]
##
# Classes
##
class Zappa(object):
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = [
'ANY'
]
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
cloudwatch_log_levels = ['OFF', 'ERROR', 'INFO']
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
runtime='python2.7'
):
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
self.runtime = runtime
if self.runtime == 'python2.7':
self.manylinux_wheel_file_suffix = 'cp27mu-manylinux1_x86_64.whl'
else:
self.manylinux_wheel_file_suffix = 'cp36m-manylinux1_x86_64.whl'
# Some common invokations, such as DB migrations,
# can take longer than the default.
# Note that this is set to 300s, but if connected to
# APIGW, Lambda will max out at 30s.
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
'region_name': aws_region,
'connect_timeout': 5,
'read_timeout': 300
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
self.s3_client = self.boto_session.client('s3')
self.lambda_client = self.boto_session.client('lambda', config=long_config)
self.events_client = self.boto_session.client('events')
self.apigateway_client = self.boto_session.client('apigateway')
self.logs_client = self.boto_session.client('logs')
self.iam_client = self.boto_session.client('iam')
self.iam = self.boto_session.resource('iam')
self.cloudwatch = self.boto_session.client('cloudwatch')
self.route53 = self.boto_session.client('route53')
self.sns_client = self.boto_session.client('sns')
self.cf_client = self.boto_session.client('cloudformation')
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def cache_param(self, value):
'''Returns a troposphere Ref to a value cached as a parameter.'''
if value not in self.cf_parameters:
keyname = chr(ord('A') + len(self.cf_parameters))
param = self.cf_template.add_parameter(troposphere.Parameter(
keyname, Type="String", Default=value
))
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, 'rb') as df:
egg_path = df.read().decode('utf-8').splitlines()[0].strip()
pkgs = set([x.split(".")[0] for x in find_packages(egg_path, exclude=['test', 'tests'])])
for pkg in pkgs:
copytree(os.path.join(egg_path, pkg), os.path.join(temp_package_path, pkg), symlinks=False)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
import pip
deps = []
if not installed_distros:
installed_distros = pip.get_installed_distributions()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import pip
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), 'handler_venv')
if os.sys.platform == 'win32':
current_site_packages_dir = os.path.join(current_venv, 'Lib', 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'Lib', 'site-packages')
else:
current_site_packages_dir = os.path.join(current_venv, 'lib', get_venv_from_python_version(), 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'lib', get_venv_from_python_version(), 'site-packages')
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == 'zappa']
for z in zappa_things:
copytree(os.path.join(current_site_packages_dir, z), os.path.join(venv_site_packages_dir, z))
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list('zappa')
pkg_list = ['{0!s}=={1!s}'.format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append('setuptools')
pip.main(["install", "--quiet", "--target", venv_site_packages_dir] + pkg_list)
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output('pyenv help', stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv"
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
env_name = f.read()[:-1]
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip( self,
prefix='lambda_package',
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
use_precompiled_packages=True,
include=None,
venv=None
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
cwd = os.getcwd()
zip_fname = prefix + '-' + str(int(time.time())) + '.zip'
zip_path = os.path.join(cwd, zip_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
# Exclude the zip itself
exclude.append(zip_path)
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = os.path.join(tempfile.gettempdir(), str(int(time.time())))
os.makedirs(temp_project_path)
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(cwd, temp_project_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(cwd, temp_project_path, symlinks=False)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Then, do site site-packages..
egg_links = []
temp_package_path = os.path.join(tempfile.gettempdir(), str(int(time.time() + 1)))
if os.sys.platform == 'win32':
site_packages = os.path.join(venv, 'Lib', 'site-packages')
else:
site_packages = os.path.join(venv, 'lib', get_venv_from_python_version(), 'site-packages')
egg_links.extend(glob.glob(os.path.join(site_packages, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages, temp_package_path, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(venv, 'lib64', get_venv_from_python_version(), 'site-packages')
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages_64, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages_64, temp_package_path, symlinks=False)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages_name_set = [package.project_name.lower() for package in
pip.get_installed_distributions() if package.project_name in package_to_keep or
package.location in [site_packages, site_packages_64]]
# First, try lambda packages
for name, details in lambda_packages.items():
if name.lower() in installed_packages_name_set:
tar = tarfile.open(details['path'], mode="r:gz")
for member in tar.getmembers():
# If we can, trash the local version.
if member.isdir():
shutil.rmtree(os.path.join(temp_project_path, member.name), ignore_errors=True)
continue
tar.extract(member, temp_project_path)
progress = tqdm(total=len(installed_packages_name_set), unit_scale=False, unit='pkg')
# Then try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
try:
for installed_package_name in installed_packages_name_set:
if installed_package_name not in lambda_packages:
wheel_url = self.get_manylinux_wheel(installed_package_name)
if wheel_url:
resp = requests.get(wheel_url, timeout=2, stream=True)
resp.raw.decode_content = True
zipresp = resp.raw
with zipfile.ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(temp_project_path)
progress.update()
except Exception:
pass # XXX - What should we do here?
progress.close()
# Then zip it all up..
print("Packaging project as zip..")
try:
# import zlib
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
zipf = zipfile.ZipFile(zip_path, 'w', compression_method)
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == '.pyc' and root[-10:] == 'migrations':
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == '.py' and root[-10:] != 'migrations':
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + 'c'
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(os.path.join(root.replace(temp_project_path, '').lstrip(os.sep), filename))
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), 'rb') as f:
zipf.writestr(zipi, f.read(), compression_method)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files
# Related: https://github.com/Miserlou/Zappa/issues/766
if '__init__.py' not in files and contains_python_files_or_subdirs(dirs, files):
tmp_init = os.path.join(temp_project_path, '__init__.py')
open(tmp_init, 'a').close()
os.chmod(tmp_init, 0o755)
zipf.write(tmp_init,
os.path.join(root.replace(temp_project_path, ''),
os.path.join(root.replace(temp_project_path, ''), '__init__.py')))
# And, we're done!
zipf.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return zip_fname
def get_manylinux_wheel(self, package):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
"""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
try:
res = requests.get(url, timeout=1.5)
data = res.json()
version = data['info']['version']
for f in data['releases'][version]:
if f['filename'].endswith(self.manylinux_wheel_file_suffix):
return f['url']
except Exception as e: # pragma: no cover
return None
return None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == 'us-east-1':
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': self.aws_region},
)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(total=float(os.path.getsize(source_path)), unit_scale=True, unit='B')
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path,
Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function( self,
bucket,
s3_key,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime='python2.7',
environment_variables=None,
aws_kms_key_arn=None
):
"""
Given a bucket and key of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not environment_variables:
environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ''
response = self.lambda_client.create_function(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Code={
'S3Bucket': bucket,
'S3Key': s3_key,
},
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={'Variables': environment_variables},
KMSKeyArn=aws_kms_key_arn
)
return response['FunctionArn']
def update_lambda_function(self, bucket, s3_key, function_name, publish=True):
"""
Given a bucket and key of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
"""
print("Updating Lambda function code..")
response = self.lambda_client.update_function_code(
FunctionName=function_name,
S3Bucket=bucket,
S3Key=s3_key,
Publish=publish
)
return response['FunctionArn']
def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python2.7',
environment_variables=None,
aws_kms_key_arn=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not environment_variables:
environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ''
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': environment_variables},
KMSKeyArn=aws_kms_key_arn
)
return response['FunctionArn']
def invoke_lambda_function( self,
function_name,
payload,
invocation_type='Event',
log_type='Tail',
client_context=None,
qualifier=None
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload
)
def rollback_lambda_function_version(self, function_name, versions_back=1, publish=True):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(FunctionName=function_name)
# Take into account $LATEST
if len(response['Versions']) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [int(revision['Version']) for revision in response['Versions'] if revision['Version'] != '$LATEST']
revisions.sort(reverse=True)
response = self.lambda_client.get_function(FunctionName='function:{}:{}'.format(function_name, revisions[versions_back]))
response = requests.get(response['Code']['Location'])
if response.status_code != 200:
print("Failed to get version {} of {} code".format(versions_back, function_name))
return False
response = self.lambda_client.update_function_code(FunctionName=function_name, ZipFile=response.content, Publish=publish) # pragma: no cover
return response['FunctionArn']
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(
FunctionName=function_name)
return response['Configuration']['FunctionArn']
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get('Versions', [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# API Gateway
##
def create_api_gateway_routes( self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type='NONE',
authorizer=None,
cors_options=None,
description=None
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi('Api')
restapi.Name = api_name or lambda_arn.split(':')[-1]
if not description:
description = 'Created automatically by Zappa.'
restapi.Description = description
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, 'RootResourceId')
invocations_uri = 'arn:aws:apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get('arn', lambda_arn)
lambda_uri = 'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format(
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods(restapi, root_id, api_key_required, invocations_uri,
authorization_type, authorizer_resource, 0)
if cors_options is not None:
self.create_and_setup_cors(restapi, root_id, invocations_uri, 0, cors_options)
resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed')
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods(restapi, resource, api_key_required, invocations_uri,
authorization_type, authorizer_resource, 1) # pragma: no cover
if cors_options is not None:
self.create_and_setup_cors(restapi, resource, invocations_uri, 1, cors_options) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get('validation_expression', None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization')
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = identity_validation_expression
if authorizer_type == 'TOKEN':
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == 'COGNITO_USER_POOLS':
authorizer_resource.ProviderARNs = authorizer.get('provider_arns')
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods( self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = 'none'
integration.Credentials = credentials
integration.IntegrationHttpMethod = 'POST'
integration.IntegrationResponses = []
integration.PassthroughBehavior = 'NEVER'
integration.Type = 'AWS_PROXY'
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {
"application/json": "Empty"
}
response_headers = {
"Access-Control-Allow-Headers": "'%s'" % ",".join(config.get(
"allowed_headers", ["Content-Type", "X-Amz-Date",
"Authorization", "X-Api-Key",
"X-Amz-Security-Token"])),
"Access-Control-Allow-Methods": "'%s'" % ",".join(config.get(
"allowed_methods", ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"])),
"Access-Control-Allow-Origin": "'%s'" % config.get(
"allowed_origin", "*")
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [
method_response
]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = 'MOCK'
integration.PassthroughBehavior = 'NEVER'
integration.RequestTemplates = {
"application/json": "{\"statusCode\": 200}"
}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {
"application/json": ""
}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [
integration_response
]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway( self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size='0.5',
variables=None,
cloudwatch_log_level='OFF',
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {}
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
def add_binary_support(self, api_id):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" not in response or "*/*" not in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': "add",
'path': '/binaryMediaTypes/*~1*'
}
]
)
def remove_binary_support(self, api_id):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = '{}/{}'.format(api_id, stage_name)
for api_key in response.get('items'):
if stage_key in api_key.get('stageKeys'):
yield api_key.get('id')
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id']))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
)
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
'op': 'add',
'path': '/stages',
'value': '{}/{}'.format(api_id, stage_name)
}
]
)
def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(
limit=500
)
for api in all_apis['items']:
if api['name'] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)'
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
)
def update_stage_config( self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api['id'],
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
except: # pragma: no cover
print('No Zappa stack named {0}'.format(name))
return False
tags = {x['Key']:x['Value'] for x in stack['Tags']}
if tags.get('ZappaProject') == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter('stack_delete_complete')
print('Waiting for stack {0} to be deleted..'.format(name))
waiter.wait(StackName=name)
return True
else:
print('ZappaProject tag not found on {0}, doing nothing'.format(name))
return False
def create_stack_template( self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn("Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization")
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.add_description('Automatically generated with Zappa')
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(lambda_arn, lambda_name, api_key_required,
auth_type, authorizer, cors_options, description)
return self.cf_template
def update_stack(self, name, working_bucket, wait=False, update_only=False):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + '-template-' + str(int(time.time())) + '.json'
with open(template, 'wb') as out:
out.write(bytes(self.cf_template.to_json(indent=None, separators=(',',':')), "utf-8"))
self.upload_to_s3(template, working_bucket)
url = 'https://s3.amazonaws.com/{0}/{1}'.format(working_bucket, template)
tags = [{'Key':'ZappaProject','Value':name}]
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print('CloudFormation stack missing, re-deploy to enable updates')
return
if not update:
self.cf_client.create_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to create (this can take a bit)..'.format(name))
else:
try:
self.cf_client.update_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to update..'.format(name))
except botocore.client.ClientError as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator('list_stack_resources')
progress = tqdm(total=total_resources, unit='res')
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result['Stacks']:
continue # might need to wait a bit
if result['Stacks'][0]['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result['Stacks'][0]['StackStatus'] in [
'DELETE_COMPLETE',
'DELETE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE'
]:
raise EnvironmentError("Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`.")
count = 0
for result in sr.paginate(StackName=name):
done = (1 for x in result['StackResourceSummaries']
if 'COMPLETE' in x['ResourceStatus'])
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
return {x['OutputKey']: x['OutputValue'] for x in stack['Outputs']}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='',
restApiId=api_id,
stage=stage
)
return agw_response['distributionDomainName']
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name
if is_apex:
record_set = {
'Name': domain_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': 'Z2FDTNDATAQYW2', # This is a magic value that means "CloudFront"
'DNSName': dns_name,
'EvaluateTargetHealth': False
}
}
else:
record_set = {
'Name': domain_name,
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': dns_name
}
],
'TTL': 60
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': record_set
}
]
}
)
return response
def update_domain_name(self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True):
"""
This doesn't quite do what it seems like it should do.
Unfortunately, there is currently no way to programatically rotate the
certificate for a currently deployed domain.
So, what we can do instead is delete the record of it and then recreate it.
The problem is that this causes a period of downtime. This could take up to 40 minutes,
in theory, but in practice this seems to only take (way) less than a minute, making it
at least somewhat acceptable.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
self.apigateway_client.delete_domain_name(domainName=domain_name)
dns_name = self.create_domain_name(domain_name,
certificate_name,
certificate_body,
certificate_private_key,
certificate_chain,
certificate_arn,
lambda_name,
stage)
if route53:
self.update_route53_records(domain_name, dns_name)
return
def get_domain_name(self, domain_name):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
try:
zones = self.route53.list_hosted_zones()
for zone in zones['HostedZones']:
records = self.route53.list_resource_record_sets(HostedZoneId=zone['Id'])
for record in records['ResourceRecordSets']:
if record['Type'] in ('CNAME', 'A') and record['Name'][:-1] == domain_name:
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj['Statement'].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name,
AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, 'zappa-permissions')
try:
if policy.policy_document != attach_policy_obj:
print("Updating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print("Creating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and \
set(role.assume_role_policy_document['Statement'][0]['Principal']['Service']) != set(assume_policy_obj['Statement'][0]['Principal']['Service']):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name,
PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(
FunctionName=lambda_name
)
if policy_response['ResponseMetadata']['HTTPStatusCode'] == 200:
statement = json.loads(policy_response['Policy'])['Statement']
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=s['Sid']
)
if delete_response['ResponseMetadata']['HTTPStatusCode'] != 204:
logger.error('Failed to delete an obsolete policy statement: {}'.format())
else:
logger.debug('Failed to load Lambda function policy: {}'.format(policy_response))
except ClientError as e:
if e.args[0].find('ResourceNotFoundException') > -1:
logger.debug('No policy found, must be first run.')
else:
logger.error('Unexpected client error {}'.format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug('Adding new permission to invoke Lambda function: {}'.format(lambda_name))
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)),
Action='lambda:InvokeFunction',
Principal=principal,
SourceArn=source_arn,
)
if permission_response['ResponseMetadata']['HTTPStatusCode'] != 201:
print('Problem creating permission to invoke Lambda function')
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The two stream sources - DynamoDB and Kinesis - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ['dynamodb', 'kinesis']
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(lambda_name=lambda_name, lambda_arn=lambda_arn, events=events,
excluded_source_services=pull_services)
for event in events:
function = event['function']
expression = event.get('expression', None)
event_source = event.get('event_source', None)
name = self.get_scheduled_event_name(event, function, lambda_name)
description = event.get('description', function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
rule_response = self.events_client.put_rule(
Name=name,
ScheduleExpression=expression,
State='ENABLED',
Description=description,
RoleArn=self.credentials_arn
)
if 'RuleArn' in rule_response:
logger.debug('Rule created. ARN {}'.format(rule_response['RuleArn']))
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(lambda_name, 'events.amazonaws.com', rule_response['RuleArn'])
# Create the CloudWatch event ARN for this function.
target_response = self.events_client.put_targets(
Rule=name,
Targets=[
{
'Id': 'Id' + ''.join(random.choice(string.digits) for _ in range(12)),
'Arn': lambda_arn,
}
]
)
if target_response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Scheduled {}!".format(name))
else:
print("Problem scheduling {}.".format(name))
elif event_source:
service = self.service_from_arn(event_source['arn'])
if service not in pull_services:
svc = ','.join(event['event_source']['events'])
self.create_event_permission(
lambda_name,
service + '.amazonaws.com',
event['event_source']['arn']
)
else:
svc = service
rule_response = add_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
if rule_response == 'successful':
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == 'failed':
print("Problem creating {} event schedule for {}!".format(svc, function))
elif rule_response == 'exists':
print("{} event schedule for {} already exists - Nothing to do here.".format(svc, function))
elif rule_response == 'dryrun':
print("Dryrun for creating {} event schedule for {}!!".format(svc, function))
else:
print("Could not create event {} - Please define either an expression or an event source".format(name))
@staticmethod
def get_scheduled_event_name(event, function, lambda_name):
name = event.get('name', function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = '{}-{}'.format(name, function)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
return Zappa.get_event_name(lambda_name, name)
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return '{prefix:.{width}}-{postfix}'.format(prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name)[:64]
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug('Deleting existing rule {}'.format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response['Error']['Code']
if error_code == 'AccessDeniedException':
raise
else:
logger.debug('No target found for this rule: {} {}'.format(rule_name, e.args[0]))
return
if 'Targets' in targets and targets['Targets']:
self.events_client.remove_targets(Rule=rule_name, Ids=[x['Id'] for x in targets['Targets']])
else: # pragma: no cover
logger.debug('No target to delete')
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response['RuleNames']
# Iterate when the results are paginated
while 'NextToken' in response:
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn,
NextToken=response['NextToken'])
rule_names.extend(response['RuleNames'])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print('Unscheduled ' + rule_name + '.')
non_cwe = [e for e in events if 'event_source' in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event['function']
name = event.get('name', function)
event_source = event.get('event_source', function)
service = self.service_from_arn(event_source['arn'])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
print("Removed event " + name + " (" + str(event_source['events']) + ").")
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(
Name=topic_name)['TopicArn']
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol='lambda',
Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name,
principal='sns.amazonaws.com',
source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={
"arn": topic_arn,
"events": ["sns:Publish"]
},
lambda_arn=lambda_arn,
target_function="zappa.async.route_task",
boto_session=self.boto_session
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()['Subscriptions']:
if topic_name in sub['TopicArn']:
self.sns_client.delete_topic(TopicArn=sub['TopicArn'])
removed_arns.append(sub['TopicArn'])
return removed_arns
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern='', limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = '/aws/lambda/' + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name,
descending=True,
orderBy='LastEventTime'
)
all_streams = streams['logStreams']
all_names = [stream['logStreamName'] for stream in all_streams]
events = []
response = {}
while not response or 'nextToken' in response:
extra_args = {}
if 'nextToken' in response:
extra_args['nextToken'] = response['nextToken']
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args
)
if response and 'events' in response:
events += response['events']
return sorted(events, key=lambda k: k['timestamp'])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group('/aws/lambda/{}'.format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api['id'])['item']:
self.remove_log_group('API-Gateway-Execution-Logs_{}/{}'.format(rest_api['id'], stage['stageName']))
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.route53.list_hosted_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [zone for zone in all_zones['HostedZones'] if not zone['Config']['PrivateZone']]
zones = {zone['Name'][:-1]: zone['Id'] for zone in public_zones if zone['Name'][:-1] in domain}
if zones:
keys = max(zones.keys(), key=lambda a: len(a)) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('UPSERT', domain, txt_challenge)
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('DELETE', domain, txt_challenge)
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challege, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': '_acme-challenge.{0}'.format(domain),
'Type': 'TXT',
'TTL': 60,
'ResourceRecords': [{
'Value': '"{0}"'.format(txt_challenge)
}]
}
}]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(profile_name=profile_name, region_name=self.aws_region)
elif os.environ.get('AWS_ACCESS_KEY_ID') and os.environ.get('AWS_SECRET_ACCESS_KEY'):
region_name = os.environ.get('AWS_DEFAULT_REGION') or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get('AWS_ACCESS_KEY_ID'),
"aws_secret_access_key": os.environ.get('AWS_SECRET_ACCESS_KEY'),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get("AWS_SESSION_TOKEN")
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(':')[2]
|
HackerTool/Sark | refs/heads/master | sark/qt.py | 3 | import os
import sys
import idaapi
from . import exceptions
# This nasty piece of code is here to force the loading of IDA's PySide.
# Without it, Python attempts to load PySide from the site-packages directory,
# and failing, as it does not play nicely with IDA.
old_path = sys.path[:]
try:
ida_python_path = os.path.dirname(idaapi.__file__)
sys.path.insert(0, ida_python_path)
from PySide import QtGui, QtCore
finally:
sys.path = old_path
def capture_widget(widget, path=None):
"""Grab an image of a Qt widget
Args:
widget: The Qt Widget to capture
path (optional): The path to save to. If not provided - will return image data.
Returns:
If a path is provided, the image will be saved to it.
If not, the PNG buffer will be returned.
"""
pixmap = QtGui.QPixmap.grabWidget(widget)
if path:
pixmap.save(path)
else:
image_buffer = QtCore.QBuffer()
image_buffer.open(QtCore.QIODevice.ReadWrite)
pixmap.save(image_buffer, "PNG")
return image_buffer.data().data()
def form_to_widget(tform):
class Ctx(object):
QtGui = QtGui
return idaapi.PluginForm.FormToPySideWidget(tform, ctx=Ctx())
def get_widget(title):
"""Get the Qt widget of the IDA window with the given title."""
tform = idaapi.find_tform(title)
if not tform:
raise exceptions.FormNotFound("No form titled {!r} found.".format(title))
return form_to_widget(tform)
def resize_widget(widget, width, height):
"""Resize a Qt widget."""
widget.setGeometry(0, 0, width, height)
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_tform()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_tform("Output window")
widget = form_to_widget(tform)
window = widget.window()
return window
class MenuManager(object):
"""IDA Menu Manipulation
Use this class to add your own top-level menus.
While this is discouraged by the SDK:
> You should not change top level menu, or the Edit,Plugins submenus
(documentation for `attach_action_to_menu`, kernwin.hpp)
Adding top-level menus is useful sometimes.
Nonetheless, you should be careful and make sure to remove all your menus
when you are done. Leaving them handing would force users to restart IDA
to remove them.
Usage of this class should be as follows:
>>> # Use the manager to add top-level menus
>>> menu_manager = MenuManager()
>>> menu_manager.add_menu("My Menu")
>>> # Use the standard API to add menu items
>>> idaapi.attach_action_to_menu("My Menu/", ":My-Action:", idaapi.SETMENU_APP)
>>> # When a menu is not needed, remove it
>>> menu_manager.remove_menu("My Menu")
>>> # When you are done with the manager (and want to remove all menus you added,)
>>> # clear it before deleting.
>>> menu_manager.clear()
"""
def __init__(self):
super(MenuManager, self).__init__()
self._window = get_window()
self._menu = self._window.findChild(QtGui.QMenuBar)
self._menus = {}
def add_menu(self, name):
"""Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name.
"""
if name in self._menus:
raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name))
menu = self._menu.addMenu(name)
self._menus[name] = menu
def remove_menu(self, name):
"""Remove a top-level menu.
Only removes menus created by the same menu manager.
"""
if name not in self._menus:
raise exceptions.MenuNotFound(
"Menu {!r} was not found. It might be deleted, or belong to another menu manager.".format(name))
self._menu.removeAction(self._menus[name].menuAction())
del self._menus[name]
def clear(self):
"""Clear all menus created by this manager."""
for menu in self._menus.itervalues():
self._menu.removeAction(menu.menuAction())
self._menus = {}
|
rhertzog/django | refs/heads/master | tests/resolve_url/urls.py | 100 | from django.conf.urls import url
def some_view(request):
pass
urlpatterns = [
url(r'^some-url/$', some_view, name='some-view'),
]
|
WeblateOrg/weblate | refs/heads/main | weblate/api/apps.py | 2 | #
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = "weblate.api"
label = "api"
verbose_name = "API"
|
kangxu/crosswalk-test-suite | refs/heads/master | apptools/apptools-ios-tests/apptools/manifest_name.py | 3 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
import shutil
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_name_number(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["name"] = "000"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_name_symbol(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["name"] = "[]*&^%!@#$%^&*()<>"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_name_chinese(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
shutil.copyfile(comm.ConstPath + "/../testapp/manifest_name_chinese/manifest.json", comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json")
buildcmd = comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_name_special_characters(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["name"] = "/n"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_name_blank(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["name"] = ""
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
if __name__ == '__main__':
unittest.main()
|
plotly/plotly.py | refs/heads/master | packages/python/plotly/plotly/validators/parcoords/line/_color.py | 1 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="parcoords.line", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
colorscale_path=kwargs.pop("colorscale_path", "parcoords.line.colorscale"),
**kwargs
)
|
agincel/AdamTestBot | refs/heads/master | requests/packages/urllib3/util/response.py | 3 | def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
|
gameduell/duell | refs/heads/master | bin/mac/python2.7.9/lib/python2.7/heapq.py | 50 | # -*- coding: latin-1 -*-
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, imap, izip, tee, chain
from operator import itemgetter
def cmp_lt(x, y):
# Use __lt__ if available; otherwise, try __le__.
# In Py3.x, only __lt__ will be called.
return (x < y) if hasattr(x, '__lt__') else (not y <= x)
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and cmp_lt(heap[0], item):
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(xrange(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and cmp_lt(item, heap[0]):
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if cmp_lt(newitem, parent):
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom __cmp__ methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not cmp_lt(heap[childpos], heap[rightpos]):
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if cmp_lt(parent, newitem):
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not cmp_lt(heap[rightpos], heap[childpos]):
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.next
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while 1:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
for v in next.__self__:
yield v
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = izip(iterable, count()) # decorate
result = _nsmallest(n, it)
return map(itemgetter(0), result) # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return map(itemgetter(2), result) # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = izip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return map(itemgetter(0), result) # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return map(itemgetter(2), result) # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print sort
import doctest
doctest.testmod()
|
edx/ecommerce-worker | refs/heads/master | ecommerce_worker/celery_app.py | 1 | from __future__ import absolute_import
import os
from celery import Celery
from ecommerce_worker.configuration import CONFIGURATION_MODULE
# Set the default configuration module, if one is not aleady defined.
os.environ.setdefault(CONFIGURATION_MODULE, 'ecommerce_worker.configuration.local')
app = Celery('ecommerce_worker')
# See http://celery.readthedocs.org/en/latest/userguide/application.html#config-from-envvar.
app.config_from_envvar(CONFIGURATION_MODULE)
|
JazzeYoung/VeryDeepAutoEncoder | refs/heads/master | pylearn2/pylearn2/datasets/wiskott.py | 45 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as N
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
class Wiskott(dense_design_matrix.DenseDesignMatrix):
"""
.. todo::
WRITEME
"""
def __init__(self):
path = "${PYLEARN2_DATA_PATH}/wiskott/wiskott"\
+ "_fish_layer0_15_standard_64x64_shuffled.npy"
X = 1. - load(path)
view_converter = dense_design_matrix.DefaultViewConverter((64, 64, 1))
super(Wiskott, self).__init__(X=X, view_converter=view_converter)
assert not N.any(N.isnan(self.X))
|
msmbuilder/msmbuilder-legacy | refs/heads/master | MSMBuilder/reduce/tICA.py | 1 | from __future__ import print_function, division, absolute_import
import numpy as np
import scipy.linalg
from time import time
import logging
from mdtraj import io
from mdtraj.utils.six.moves import cPickle
from msmbuilder.metrics import Vectorized
from msmbuilder.reduce import AbstractDimReduction
logger = logging.getLogger(__name__)
class tICA(AbstractDimReduction):
"""
tICA is a class for calculating the matrices required to do time-structure
based independent component analysis (tICA). It can be
used to calculate both the time-lag correlation matrix and covariance
matrix. The advantage it has is that you can calculate the matrix for a
large dataset by "training" smaller pieces of the dataset at a time.
Notes
-----
It can be shown that the time-lag correlation matrix is the same as:
C = E[Outer(X[t], X[t+lag])] - Outer(E[X[t]], E[X[t+lag]])
Because of this it is possible to calculate running sums corresponding
to variables A, B, D:
A = E[X[t]]
B = E[X[t+lag]]
D = E[Outer(X[t], X[t+lag])]
Then at the end we can calculate C:
C = D - Outer(A, B)
Finally we can get a symmetrized C' from our estimate of C, for
example by adding the transpose:
C' = (C + C^T) / 2
There is, in fact, an MLE estimator for ech matrix C, and S:
S = E[Outer(X[t], X[t])]
The MLE estimators are:
\mu = 1 / (2(N - lag)) \sum_{t=1}^{N - lag} X[t] + X[t + lag]
C = 1 / (2(N - lag)) * \sum_{t=1}^{N - lag} Outer(X[t] - \mu, X[t + lag] - \mu) + Outer(X[t + lag] - \mu, X[t] - \mu)
S = 1 / (2(N - lag)) * \sum_{t=1}^{N - lag} Outer(X[t] - \mu, X[t] - \mu) + Outer(X[t + lag] - \mu, X[t + lag] - \mu)
"""
def __init__(self, lag, calc_cov_mat=True, prep_metric=None, size=None):
"""
Create an empty tICA object.
To add data to the object, use the train method.
Parameters
----------
lag: int
The lag to use in calculating the time-lag correlation
matrix. If zero, then only the covariance matrix is
calculated
calc_cov_mat: bool, optional
if lag > 0, then will also calculate the covariance matrix
prep_metric: msmbuilder.metrics.Vectorized subclass instance, optional
metric to use to prepare trajectories. If not specified, then
you must pass prepared trajectories to the train method, via
the kwarg "prep_trajectory"
size: int, optional
the size is the number of coordinates for the vector
representation of the protein. If None, then the first
trained vector will be used to initialize it.
Notes
-----
To load an already constructed tICA object, use `tICA.load()`.
"""
self.corrs = None
self.sum_t = None
self.sum_t_dt = None
# The above containers hold a running sum that is used to
# calculate the time-lag correlation matrix as well as the
# covariance matrix
self.corrs_lag0 = None # needed for calculating the covariance
# matrix
self.sum_all = None
self.trained_frames = 0
self.total_frames = 0
# Track how many frames we've trained
self.lag = int(lag)
if self.lag < 0:
raise Exception("lag must be non-negative.")
elif self.lag == 0: # If we have lag=0 then we don't need to
# calculate the covariance matrix twice
self.calc_cov_mat = False
else:
self.calc_cov_mat = calc_cov_mat
if prep_metric is None:
self.prep_metric = None
logger.warn("no metric specified, you must pass prepared"
" trajectories to the train and project methods")
else:
if not isinstance(prep_metric, Vectorized):
raise Exception("prep_metric must be an instance of a "
"subclass of msmbuilder.metrics.Vectorized")
self.prep_metric = prep_metric
self.size = size
if not self.size is None:
self.initialize(size)
# containers for the solutions:
self.timelag_corr_mat = None
self.cov_mat = None
self.vals = None
self.vecs = None
self._sorted = False
def initialize(self, size):
"""
initialize the containers for the calculation
Parameters
----------
size : int
The size of the square matrix will be (size, size)
"""
self.size = size
self.corrs = np.zeros((size, size), dtype=float)
self.sum_t = np.zeros(size, dtype=float)
self.sum_t_dt = np.zeros(size, dtype=float)
self.sum_all = np.zeros(size, dtype=float)
if self.calc_cov_mat:
self.corrs_lag0_t = np.zeros((size, size), dtype=float)
self.corrs_lag0_t_dt = np.zeros((size, size), dtype=float)
def train(self, trajectory=None, prep_trajectory=None):
"""
add a trajectory to the calculation
Parameters:
-----------
trajectory: msmbuilder.Trajectory, optional
trajectory object
prep_trajectory: np.ndarray, optional
prepared trajectory object
Remarks:
--------
must input one of trajectory or prep_trajectory (if both
are given, then prep_trajectory is used.)
"""
if not prep_trajectory is None:
data_vector = prep_trajectory
elif not trajectory is None:
data_vector = self.prep_metric.prepare_trajectory(trajectory)
else:
raise Exception("need to input one of trajectory or prep_trajectory")
a = time() # For debugging we are tracking the time each step takes
if self.size is None:
# then we haven't started yet, so set up the containers
self.initialize(size=data_vector.shape[1])
if data_vector.shape[1] != self.size:
raise Exception("Input vector is not the right size. axis=1 should "
"be length %d. Vector has shape %s" %
(self.size, str(data_vector.shape)))
if data_vector.shape[0] <= self.lag:
logger.warn("Data vector is too short (%d) "
"for this lag (%d)", data_vector.shape[0], self.lag)
return
b = time()
if self.lag != 0:
self.corrs += data_vector[:-self.lag].T.dot(data_vector[self.lag:])
self.sum_t += data_vector[:-self.lag].sum(axis=0)
self.sum_t_dt += data_vector[self.lag:].sum(axis=0)
else:
self.corrs += data_vector.T.dot(data_vector)
self.sum_t += data_vector.sum(axis=0)
self.sum_t_dt += data_vector.sum(axis=0)
if self.calc_cov_mat:
self.corrs_lag0_t += data_vector[:-self.lag].T.dot(data_vector[:-self.lag])
self.corrs_lag0_t_dt += data_vector[self.lag:].T.dot(data_vector[self.lag:])
self.sum_all += data_vector.sum(axis=0)
self.total_frames += data_vector.shape[0]
self.trained_frames += data_vector.shape[0] - self.lag
# this accounts for us having finite trajectories, so we really are
# only calculating expectation values over N - \Delta t total samples
c = time()
logger.debug("Setup: %f, Corrs: %f" % (b - a, c - b))
# Probably should just get rid of this..
def get_current_estimate(self):
"""Calculate the current estimate of the time-lag correlation
matrix and the covariance matrix (if asked for).
These estimates come from an MLE argument assuming that the data {X_t, X_t+dt}
are distributed as a multivariate normal. Of course, this assumption
is not very true, but this is merely one way to enforce that the
timelag correlation matrix is symmetric.
The MLE has nice properties, as well, such as the eigenvalues that result
from solving the tICA equation are always bounded between -1 and 1, which
is not the case when one merely symmetrizes the timelag correlation matrix
while estimating the covariance matrix and mean in the usual manner.
See Shukla, D et. al. In Preparation for details, or email Christian
Schwantes (schwancr@stanford.edu).
"""
two_N = 2. * float(self.trained_frames)
# ^^ denominator in all of these expressions...
mle_mean = (self.sum_t + self.sum_t_dt) / two_N
outer_means = np.outer(mle_mean, mle_mean)
time_lag_corr = (self.corrs + self.corrs.T) / two_N
timelag_corr_mat = time_lag_corr - outer_means
self.timelag_corr_mat = timelag_corr_mat
if self.calc_cov_mat:
cov_mat = (self.corrs_lag0_t + self.corrs_lag0_t_dt) / two_N
cov_mat -= np.outer(mle_mean, mle_mean)
self.cov_mat = cov_mat
return timelag_corr_mat, cov_mat
return timelag_corr_mat
def _sort(self):
"""
sort the eigenvectors by their eigenvalues.
"""
if self.vals is None:
self.solve()
ind = np.argsort(self.vals)[::-1]
# in order of decreasing value
self.vals = self.vals[ind]
self.vecs = self.vecs[:, ind]
self._sorted = True
def solve(self, pca_cutoff=0):
"""
Solve the eigenvalue problem. We can translate into the
PCA space and remove directions that have zero variance.
If there are directions with zero variance, then the tICA
eigenvalues will be complex or greater than one.
Parameters:
-----------
pca_cutoff : float, optional
pca_cutoff to throw out PCs with variance less than this
cutoff. Default is zero, but you should really check
your covariance matrix to see if you need this.
"""
if self.timelag_corr_mat is None or self.cov_mat is None:
self.get_current_estimate()
# should really add check if we're just doing PCA, but I
# don't know why anyone would use this class to do PCA...
# maybe I should just remove that ability...
if pca_cutoff <= 0:
lhs = self.timelag_corr_mat
rhs = self.cov_mat
else:
pca_vals, pca_vecs = np.linalg.eigh(self.cov_mat)
good_ind = np.where(pca_vals > pca_cutoff)[0]
pca_vals = pca_vals[good_ind]
pca_vecs = pca_vecs[:, good_ind]
lhs = pca_vecs.T.dot(self.timelag_corr_mat).dot(pca_vecs)
rhs = pca_vecs.T.dot(self.cov_mat).dot(pca_vecs)
vals, vecs = scipy.linalg.eig(lhs, b=rhs)
if pca_cutoff <= 0:
self.vals = vals
self.vecs = vecs
else:
self.vals = vals
self.vecs = pca_vecs.dot(vecs)
if np.abs(self.vals.imag).max() > 1E-10:
logger.warn("you have non-real eigenvalues. This usually means "
"you need to throw out some coordinates by doing tICA "
"in PCA space.")
else:
self.vals = self.vals.real
if np.abs(self.vecs.imag).max() > 1E-10:
logger.warn("you have non-real eigenvector entries...")
else:
self.vecs = self.vecs.real
self._sort()
def project(self, trajectory=None, prep_trajectory=None, which=None):
"""
project a trajectory (or prepared trajectory) onto a subset of
the tICA eigenvectors.
Parameters:
-----------
trajectory : mdtraj.Trajectory, optional
trajectory object (can also pass a prepared trajectory instead)
prep_trajectory : np.ndarray, optional
prepared trajectory
which : np.ndarray
which eigenvectors to project onto
Returns:
--------
proj_trajectory : np.ndarray
projected trajectory (n_points, n_tICs)
"""
if not self._sorted:
self._sort()
if prep_trajectory is None:
if trajectory is None:
raise Exception("must pass one of trajectory or prep_trajectory")
prep_trajectory = self.prep_metric.prepare_trajectory(trajectory)
if which is None:
raise Exception("must pass 'which' to indicate which tICs to project onto")
which = np.array(which).flatten().astype(int)
proj_trajectory = prep_trajectory.dot(self.vecs[:, which])
return proj_trajectory
def save(self, output):
"""
save the results to file
Parameters:
-----------
output : str
output filename (.h5)
"""
# Serialize metric used to calculate tICA input.
metric_string = cPickle.dumps(self.prep_metric)
io.saveh(output, timelag_corr_mat=self.timelag_corr_mat,
cov_mat=self.cov_mat, lag=np.array([self.lag]), vals=self.vals,
vecs=self.vecs, metric_string=np.array([metric_string]))
@classmethod
def load(cls, tica_fn):
"""
load a tICA solution to use in projecting data.
Parameters:
-----------
tica_fn : str
filename pointing to tICA solutions
"""
# the only variables we need to save are the two matrices
# and the eigenvectors / values as well as the lag time
logger.warn("NOTE: You can only use the tICA solution, you will "
"not be able to continue adding data")
f = io.loadh(tica_fn)
metric = cPickle.loads(f["metric_string"][0])
tica_obj = cls(f['lag'][0], prep_metric=metric)
# lag entry is an array... with a single item
tica_obj.timelag_corr_mat = f['timelag_corr_mat']
tica_obj.cov_mat = f['cov_mat']
tica_obj.vals = f['vals']
tica_obj.vecs = f['vecs']
tica_obj._sort()
return tica_obj
|
kajgan/e2 | refs/heads/master | lib/python/Screens/ServiceInfo.py | 2 | from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from ServiceReference import ServiceReference
from enigma import eListboxPythonMultiContent, eListbox, gFont, iServiceInformation, eServiceCenter
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
import skin
RT_HALIGN_LEFT = 0
TYPE_TEXT = 0
TYPE_VALUE_HEX = 1
TYPE_VALUE_DEC = 2
TYPE_VALUE_HEX_DEC = 3
TYPE_SLIDER = 4
TYPE_VALUE_ORBIT_DEC = 5
def to_unsigned(x):
return x & 0xFFFFFFFF
def ServiceInfoListEntry(a, b, valueType=TYPE_TEXT, param=4):
print "b:", b
if not isinstance(b, str):
if valueType == TYPE_VALUE_HEX:
b = ("0x%0" + str(param) + "x") % to_unsigned(b)
elif valueType == TYPE_VALUE_DEC:
b = str(b)
elif valueType == TYPE_VALUE_HEX_DEC:
b = ("0x%0" + str(param) + "x (%dd)") % (to_unsigned(b), b)
elif valueType == TYPE_VALUE_ORBIT_DEC:
direction = 'E'
if b > 1800:
b = 3600 - b
direction = 'W'
b = ("%d.%d%s") % (b // 10, b % 10, direction)
else:
b = str(b)
x, y, w, h = skin.parameters.get("ServiceInfo",(0, 0, 300, 30))
xa, ya, wa, ha = skin.parameters.get("ServiceInfoLeft",(0, 0, 300, 25))
xb, yb, wb, hb = skin.parameters.get("ServiceInfoRight",(300, 0, 600, 25))
return [
#PyObject *type, *px, *py, *pwidth, *pheight, *pfnt, *pstring, *pflags;
(eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, ""),
(eListboxPythonMultiContent.TYPE_TEXT, xa, ya, wa, ha, 0, RT_HALIGN_LEFT, a),
(eListboxPythonMultiContent.TYPE_TEXT, xb, yb, wb, hb, 0, RT_HALIGN_LEFT, b)
]
class ServiceInfoList(HTMLComponent, GUIComponent):
def __init__(self, source):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.list = source
self.l.setList(self.list)
font = skin.fonts.get("ServiceInfo", ("Regular", 23, 25))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
self.instance.setContent(self.l)
TYPE_SERVICE_INFO = 1
TYPE_TRANSPONDER_INFO = 2
class ServiceInfo(Screen):
def __init__(self, session, serviceref=None):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.close,
"cancel": self.close,
"red": self.information,
"green": self.pids,
"yellow": self.transponder,
"blue": self.tuner
}, -1)
if serviceref:
Screen.setTitle(self, _("Transponder Information"))
self.type = TYPE_TRANSPONDER_INFO
self.skinName="ServiceInfoSimple"
info = eServiceCenter.getInstance().info(serviceref)
self.transponder_info = info.getInfoObject(serviceref, iServiceInformation.sTransponderData)
# info is a iStaticServiceInformation, not a iServiceInformation
self.info = None
self.feinfo = None
else:
Screen.setTitle(self, _("Service Information"))
self.type = TYPE_SERVICE_INFO
self["key_red"] = self["red"] = Label(_("Service"))
self["key_green"] = self["green"] = Label(_("PIDs"))
self["key_yellow"] = self["yellow"] = Label(_("Multiplex"))
self["key_blue"] = self["blue"] = Label(_("Tuner status"))
service = session.nav.getCurrentService()
if service is not None:
self.info = service.info()
self.feinfo = service.frontendInfo()
else:
self.info = None
self.feinfo = None
tlist = [ ]
self["infolist"] = ServiceInfoList(tlist)
self.onShown.append(self.information)
def information(self):
if self.type == TYPE_SERVICE_INFO:
if self.session.nav.getCurrentlyPlayingServiceOrGroup():
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
refstr = self.session.nav.getCurrentlyPlayingServiceReference().toString()
else:
name = _("N/A")
refstr = _("N/A")
aspect = "-"
videocodec = "-"
resolution = "-"
if self.info:
videocodec = ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "-" )[self.info and self.info.getInfo(iServiceInformation.sVideoType)]
width = self.info.getInfo(iServiceInformation.sVideoWidth)
height = self.info.getInfo(iServiceInformation.sVideoHeight)
if width > 0 and height > 0:
resolution = "%dx%d" % (width,height)
resolution += ("i", "p", "")[self.info.getInfo(iServiceInformation.sProgressive)]
resolution += str((self.info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
aspect = self.getServiceInfoValue(iServiceInformation.sAspect)
if aspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ):
aspect = "4:3"
else:
aspect = "16:9"
Labels = ( (_("Name"), name, TYPE_TEXT),
(_("Provider"), self.getServiceInfoValue(iServiceInformation.sProvider), TYPE_TEXT),
(_("Videoformat"), aspect, TYPE_TEXT),
(_("Videosize"), resolution, TYPE_TEXT),
(_("Videocodec"), videocodec, TYPE_TEXT),
(_("Namespace"), self.getServiceInfoValue(iServiceInformation.sNamespace), TYPE_VALUE_HEX, 8),
(_("Service reference"), refstr, TYPE_TEXT))
self.fillList(Labels)
else:
if self.transponder_info:
tp_info = ConvertToHumanReadable(self.transponder_info)
conv = { "tuner_type" : _("Transponder type"),
"system" : _("System"),
"modulation" : _("Modulation"),
"orbital_position" : _("Orbital position"),
"frequency" : _("Frequency"),
"symbol_rate" : _("Symbol rate"),
"bandwidth" : _("Bandwidth"),
"polarization" : _("Polarization"),
"inversion" : _("Inversion"),
"pilot" : _("Pilot"),
"rolloff" : _("Roll-off"),
"fec_inner" : _("FEC"),
"code_rate_lp" : _("Coderate LP"),
"code_rate_hp" : _("Coderate HP"),
"constellation" : _("Constellation"),
"transmission_mode": _("Transmission mode"),
"guard_interval" : _("Guard interval"),
"hierarchy_information": _("Hierarchy information") }
Labels = [(conv[i], tp_info[i], i == "orbital_position" and TYPE_VALUE_ORBIT_DEC or TYPE_VALUE_DEC) for i in tp_info.keys() if i in conv]
self.fillList(Labels)
def pids(self):
if self.type == TYPE_SERVICE_INFO:
Labels = ( (_("Video PID"), self.getServiceInfoValue(iServiceInformation.sVideoPID), TYPE_VALUE_HEX_DEC, 4),
(_("Audio PID"), self.getServiceInfoValue(iServiceInformation.sAudioPID), TYPE_VALUE_HEX_DEC, 4),
(_("PCR PID"), self.getServiceInfoValue(iServiceInformation.sPCRPID), TYPE_VALUE_HEX_DEC, 4),
(_("PMT PID"), self.getServiceInfoValue(iServiceInformation.sPMTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TXT PID"), self.getServiceInfoValue(iServiceInformation.sTXTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TSID"), self.getServiceInfoValue(iServiceInformation.sTSID), TYPE_VALUE_HEX_DEC, 4),
(_("ONID"), self.getServiceInfoValue(iServiceInformation.sONID), TYPE_VALUE_HEX_DEC, 4),
(_("SID"), self.getServiceInfoValue(iServiceInformation.sSID), TYPE_VALUE_HEX_DEC, 4))
self.fillList(Labels)
def showFrontendData(self, real):
if self.type == TYPE_SERVICE_INFO:
frontendData = self.feinfo and self.feinfo.getAll(real)
Labels = self.getFEData(frontendData)
self.fillList(Labels)
def transponder(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(True)
def tuner(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(False)
def getFEData(self, frontendDataOrg):
if frontendDataOrg and len(frontendDataOrg):
frontendData = ConvertToHumanReadable(frontendDataOrg)
if frontendDataOrg["tuner_type"] == "DVB-S":
if frontendData["frequency"] > 11699999 :
band = "High"
else:
band = "Low"
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("System"), frontendData["system"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Orbital position"), frontendData["orbital_position"], TYPE_VALUE_DEC),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Polarization"), frontendData["polarization"], TYPE_TEXT),
(_("Band"), band, TYPE_TEXT),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT),
(_("Pilot"), frontendData.get("pilot", None), TYPE_TEXT),
(_("Roll-off"), frontendData.get("rolloff", None), TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-C":
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-T":
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Channel"), getChannelNumber(frontendData["frequency"], frontendData["tuner_number"]), TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("Bandwidth"), frontendData["bandwidth"], TYPE_VALUE_DEC),
(_("Code rate LP"), frontendData["code_rate_lp"], TYPE_TEXT),
(_("Code rate HP"), frontendData["code_rate_hp"], TYPE_TEXT),
(_("Constellation"), frontendData["constellation"], TYPE_TEXT),
(_("Transmission mode"), frontendData["transmission_mode"], TYPE_TEXT),
(_("Guard interval"), frontendData["guard_interval"], TYPE_TEXT),
(_("Hierarchy info"), frontendData["hierarchy_information"], TYPE_TEXT))
return [ ]
def fillList(self, Labels):
tlist = [ ]
for item in Labels:
if item[1] is None:
continue;
value = item[1]
if len(item) < 4:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2]))
else:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2], item[3]))
self["infolist"].l.setList(tlist)
def getServiceInfoValue(self, what):
if self.info is None:
return ""
v = self.info.getInfo(what)
if v == -2:
v = self.info.getInfoString(what)
elif v == -1:
v = _("N/A")
return v
|
lewislone/mStocks | refs/heads/master | packets-analysis/lib/dpkt-1.7/dpkt/rip.py | 15 | # $Id: rip.py 23 2006-11-08 15:45:33Z dugsong $
"""Routing Information Protocol."""
import dpkt
# RIP v2 - RFC 2453
# http://tools.ietf.org/html/rfc2453
REQUEST = 1
RESPONSE = 2
class RIP(dpkt.Packet):
__hdr__ = (
('cmd', 'B', REQUEST),
('v', 'B', 2),
('rsvd', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
self.auth = None
while self.data:
rte = RTE(self.data[:20])
if rte.family == 0xFFFF:
self.auth = Auth(self.data[:20])
else:
l.append(rte)
self.data = self.data[20:]
self.data = self.rtes = l
def __len__(self):
len = self.__hdr_len__
if self.auth:
len += len(self.auth)
len += sum(map(len, self.rtes))
return len
def __str__(self):
auth = ''
if self.auth:
auth = str(self.auth)
return self.pack_hdr() + \
auth + \
''.join(map(str, self.rtes))
class RTE(dpkt.Packet):
__hdr__ = (
('family', 'H', 2),
('route_tag', 'H', 0),
('addr', 'I', 0),
('subnet', 'I', 0),
('next_hop', 'I', 0),
('metric', 'I', 1)
)
class Auth(dpkt.Packet):
__hdr__ = (
('rsvd', 'H', 0xFFFF),
('type', 'H', 2),
('auth', '16s', 0)
)
if __name__ == '__main__':
import unittest
class RIPTestCase(unittest.TestCase):
def testPack(self):
r = RIP(self.s)
self.failUnless(self.s == str(r))
def testUnpack(self):
r = RIP(self.s)
self.failUnless(r.auth == None)
self.failUnless(len(r.rtes) == 2)
rte = r.rtes[1]
self.failUnless(rte.family == 2)
self.failUnless(rte.route_tag == 0)
self.failUnless(rte.metric == 1)
s = '\x02\x02\x00\x00\x00\x02\x00\x00\x01\x02\x03\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\xc0\xa8\x01\x08\xff\xff\xff\xfc\x00\x00\x00\x00\x00\x00\x00\x01'
unittest.main()
|
koniiiik/django | refs/heads/master | tests/gis_tests/gis_migrations/migrations/0001_initial.py | 269 | from django.db import connection, migrations, models
from ...models import models as gis_models
ops = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', gis_models.MultiPolygonField(srid=4326)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(
'gis_migrations.Neighborhood',
models.SET_NULL,
to_field='id',
null=True,
)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', gis_models.PointField(srid=4326, geography=True)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey('gis_migrations.Family', models.SET_NULL, blank=True, null=True),
preserve_default=True,
)
]
if connection.features.gis_enabled and connection.features.supports_raster:
ops += [
migrations.CreateModel(
name='Heatmap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('rast', gis_models.fields.RasterField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
class Migration(migrations.Migration):
"""
Used for gis-specific migration tests.
"""
operations = ops
|
asnorkin/sentiment_analysis | refs/heads/master | site/lib/python2.7/site-packages/requests/__init__.py | 2 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.14.0'
__build__ = 0x021400
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
|
nichung/wwwflaskBlogrevA | refs/heads/master | env/lib/python2.7/site-packages/flask/testsuite/ext.py | 563 | # -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
|
tvibliani/odoo | refs/heads/8.0 | addons/website_blog/tests/test_website_blog_flow.py | 269 | # -*- coding: utf-8 -*-
from openerp.addons.website_blog.tests.common import TestWebsiteBlogCommon
class TestWebsiteBlogFlow(TestWebsiteBlogCommon):
def test_website_blog_followers(self):
""" Test the flow of followers and notifications for blogs. Intended
flow :
- people subscribe to a blog
- when creating a new post, nobody except the creator follows it
- people subscribed to the blog does not receive comments on posts
- when published, a notification is sent to all blog followers
- if someone subscribe to the post or comment it, it become follower
and receive notification for future comments. """
# Create a new blog, subscribe the employee to the blog
test_blog = self.env['blog.blog'].sudo(self.user_blogmanager).create({
'name': 'New Blog',
'description': 'Presentation of new Odoo features'
})
self.assertIn(
self.user_blogmanager.partner_id, test_blog.message_follower_ids,
'website_blog: blog create should be in the blog followers')
test_blog.message_subscribe([self.user_employee.partner_id.id, self.user_public.partner_id.id])
# Create a new post, blog followers should not follow the post
test_blog_post = self.env['blog.post'].sudo(self.user_blogmanager).create({
'name': 'New Post',
'blog_id': test_blog.id,
})
self.assertNotIn(
self.user_employee.partner_id, test_blog_post.message_follower_ids,
'website_blog: subscribing to a blog should not subscribe to its posts')
self.assertNotIn(
self.user_public.partner_id, test_blog_post.message_follower_ids,
'website_blog: subscribing to a blog should not subscribe to its posts')
# Publish the blog
test_blog_post.write({'website_published': True})
# Check publish message has been sent to blog followers
publish_message = next((m for m in test_blog_post.blog_id.message_ids if m.subtype_id.id == self.ref('website_blog.mt_blog_blog_published')), None)
self.assertEqual(
set(publish_message.notified_partner_ids._ids),
set([self.user_employee.partner_id.id, self.user_public.partner_id.id]),
'website_blog: peuple following a blog should be notified of a published post')
# Armand posts a message -> becomes follower
test_blog_post.sudo().message_post(
body='Armande BlogUser Commented',
type='comment',
author_id=self.user_employee.partner_id.id,
subtype='mt_comment',
)
self.assertIn(
self.user_employee.partner_id, test_blog_post.message_follower_ids,
'website_blog: people commenting a post should follow it afterwards')
|
Germanika/plover | refs/heads/bozzy | plover/gui_qt/about_dialog.py | 2 |
import re
from PyQt5.QtWidgets import QDialog
import plover
from plover.gui_qt.about_dialog_ui import Ui_AboutDialog
class AboutDialog(QDialog, Ui_AboutDialog):
ROLE = 'about'
def __init__(self, engine):
super(AboutDialog, self).__init__()
self.setupUi(self)
credits = []
for c in plover.__credits__:
c = re.sub(r'<([^>]*)>', r'<a href="\1">\1</a>', c)
c = c.replace('\n', '<br/>')
credits.append(c)
self.text.setHtml(
'''
<style>
h1 {text-align:center;}
h2 {text-align:center;}
p {text-align:center;}
</style>
<p><img src="%(icon)s"/></p>
<h1>%(name)s %(version)s</h1>
<p>%(description)s</p>
<p><i>Copyright %(copyright)s</i></p>
<p>License: <a href="%(license_url)s">%(license)s</a></p>
<p>Project Homepage: <a href='%(url)s'>%(url)s</a></p>
<h2>Credits:</h2>
<p>%(credits)s</p>
''' % {
'icon' : ':/plover.png',
'name' : plover.__name__.capitalize(),
'version' : plover.__version__,
'description': plover.__long_description__,
'copyright' : plover.__copyright__.replace('(C)', '©'),
'license' : plover.__license__,
'license_url': 'https://www.gnu.org/licenses/gpl-2.0-standalone.html',
'url' : plover.__download_url__,
'credits' : '<br/>'.join(credits),
})
|
gmorph/MAVProxy | refs/heads/master | MAVProxy/modules/mavproxy_gasheli.py | 10 | """
helicopter monitoring and control module gas helicopters
"""
import os, sys, math, time
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_settings
class GasHeliModule(mp_module.MPModule):
def __init__(self, mpstate):
super(GasHeliModule, self).__init__(mpstate, "gas_heli", "Gas Heli", public=False)
self.console.set_status('IGN', 'IGN', row=4)
self.console.set_status('THR', 'THR', row=4)
self.console.set_status('RPM', 'RPM: 0', row=4)
self.add_command('gasheli', self.cmd_gasheli,
'gas helicopter control',
['<start|stop>',
'set (GASHELISETTINGS)'])
self.gasheli_settings = mp_settings.MPSettings(
[ ('ignition_chan', int, 0),
('ignition_disable_time', float, 0.5),
('ignition_stop_time', float, 3),
('starter_chan', int, 0),
('starter_time', float, 3.0),
('starter_pwm_on', int, 2000),
('starter_pwm_off', int, 1000),
]
)
self.add_completion_function('(GASHELISETTINGS)', self.gasheli_settings.completion)
self.starting_motor = False
self.stopping_motor = False
self.motor_t1 = None
self.old_override = 0
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS_RAW' ]:
rc6 = msg.chan6_raw
if rc6 > 1500:
ign_colour = 'green'
else:
ign_colour = 'red'
self.console.set_status('IGN', 'IGN', fg=ign_colour, row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rc8 = msg.servo8_raw
if rc8 < 1200:
thr_colour = 'red'
elif rc8 < 1300:
thr_colour = 'orange'
else:
thr_colour = 'green'
self.console.set_status('THR', 'THR', fg=thr_colour, row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 3000:
rpm_colour = 'red'
elif rpm < 10000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4)
def valid_starter_settings(self):
'''check starter settings'''
if self.gasheli_settings.ignition_chan <= 0 or self.gasheli_settings.ignition_chan > 8:
print("Invalid ignition channel %d" % self.gasheli_settings.ignition_chan)
return False
if self.gasheli_settings.starter_chan <= 0 or self.gasheli_settings.starter_chan > 14:
print("Invalid starter channel %d" % self.gasheli_settings.starter_chan)
return False
return True
def idle_task(self):
'''run periodic tasks'''
if self.starting_motor:
if self.gasheli_settings.ignition_disable_time > 0:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_disable_time:
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.starting_motor = False
if self.stopping_motor:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_stop_time:
# hand back control to RC
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.stopping_motor = False
def start_motor(self):
'''start motor'''
if not self.valid_starter_settings():
return
self.motor_t1 = time.time()
self.stopping_motor = False
if self.gasheli_settings.ignition_disable_time > 0:
self.old_override = self.module('rc').get_override_chan(self.gasheli_settings.ignition_chan-1)
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, 1000)
self.starting_motor = True
else:
# nothing more to do
self.starting_motor = False
# setup starter run
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_REPEAT_SERVO, 0,
self.gasheli_settings.starter_chan,
self.gasheli_settings.starter_pwm_on,
1,
self.gasheli_settings.starter_time*2,
0, 0, 0)
print("Starting motor")
def stop_motor(self):
'''stop motor'''
if not self.valid_starter_settings():
return
self.motor_t1 = time.time()
self.starting_motor = False
self.stopping_motor = True
self.old_override = self.module('rc').get_override_chan(self.gasheli_settings.ignition_chan-1)
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, 1000)
print("Stopping motor")
def cmd_gasheli(self, args):
'''gas help commands'''
usage = "Usage: gasheli <start|stop|set>"
if len(args) < 1:
print(usage)
return
if args[0] == "start":
self.start_motor()
elif args[0] == "stop":
self.stop_motor()
elif args[0] == "set":
self.gasheli_settings.command(args[1:])
else:
print(usage)
def init(mpstate):
'''initialise module'''
return GasHeliModule(mpstate)
|
brucetsao/arduino-ameba | refs/heads/master | build/windows/work/hardware/tools/gcc-arm-none-eabi-4.8.3-2014q1/arm-none-eabi/share/gdb/python/gdb/__init__.py | 110 | # Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import os
import sys
import _gdb
if sys.version_info[0] > 2:
# Python 3 moved "reload"
from imp import reload
from _gdb import *
class _GdbFile (object):
# These two are needed in Python 3
encoding = "UTF-8"
errors = "strict"
def close(self):
# Do nothing.
return None
def isatty(self):
return False
def writelines(self, iterable):
for line in iterable:
self.write(line)
def flush(self):
flush()
class GdbOutputFile (_GdbFile):
def write(self, s):
write(s, stream=STDOUT)
sys.stdout = GdbOutputFile()
class GdbOutputErrorFile (_GdbFile):
def write(self, s):
write(s, stream=STDERR)
sys.stderr = GdbOutputErrorFile()
# Default prompt hook does nothing.
prompt_hook = None
# Ensure that sys.argv is set to something.
# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
sys.argv = ['']
# Initial pretty printers.
pretty_printers = []
# Initial type printers.
type_printers = []
# Convenience variable to GDB's python directory
PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
# Auto-load all functions/commands.
# Packages to auto-load.
packages = [
'function',
'command'
]
# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
# manually iterate the list, collating the Python files in each module
# path. Construct the module name, and import.
def auto_load_packages():
for package in packages:
location = os.path.join(os.path.dirname(__file__), package)
if os.path.exists(location):
py_files = filter(lambda x: x.endswith('.py')
and x != '__init__.py',
os.listdir(location))
for py_file in py_files:
# Construct from foo.py, gdb.module.foo
modname = "%s.%s.%s" % ( __name__, package, py_file[:-3] )
try:
if modname in sys.modules:
# reload modules with duplicate names
reload(__import__(modname))
else:
__import__(modname)
except:
sys.stderr.write (traceback.format_exc() + "\n")
auto_load_packages()
def GdbSetPythonDirectory(dir):
"""Update sys.path, reload gdb and auto-load packages."""
global PYTHONDIR
try:
sys.path.remove(PYTHONDIR)
except ValueError:
pass
sys.path.insert(0, dir)
PYTHONDIR = dir
# note that reload overwrites the gdb module without deleting existing
# attributes
reload(__import__(__name__))
auto_load_packages()
|
andymckay/zamboni | refs/heads/master | wsgi/__init__.py | 12133432 | |
eestay/edx-platform | refs/heads/master | lms/djangoapps/dashboard/management/__init__.py | 12133432 | |
yanbober/SmallReptileTraining | refs/heads/master | DistributedBaseSpider/__init__.py | 12133432 | |
tomduijf/home-assistant | refs/heads/master | tests/util/__init__.py | 12133432 | |
tjlaboss/openmc | refs/heads/develop | tests/regression_tests/tallies/__init__.py | 12133432 | |
FireBladeNooT/Medusa_1_6 | refs/heads/master | medusa/post_processor.py | 1 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
import fnmatch
import os
import re
import stat
import subprocess
import adba
from six import text_type
from . import app, common, db, failed_history, helpers, history, logger, notifiers, show_name_helpers
from .helper.common import episode_num, remove_extension
from .helper.exceptions import (EpisodeNotFoundException, EpisodePostProcessingFailedException,
ShowDirectoryNotFoundException)
from .helpers import is_subtitle, verify_freespace
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from .subtitles import from_code, from_ietf_code
class PostProcessor(object):
"""A class which will process a media file according to the post processing settings in the config."""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = ['.AppleDouble', '.DS_Store']
def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
"""
Create a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = os.path.dirname(os.path.abspath(file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = os.path.basename(file_path)
# relative path to the file that is being processed
self.rel_path = self._get_rel_path()
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.process_method = process_method if process_method else app.PROCESS_METHOD
self.in_history = False
self.release_group = None
self.release_name = None
self.is_proper = False
self.is_priority = is_priority
self.log = ''
self.version = None
self.anidbEpisode = None
self.manually_searched = False
def _log(self, message, level=logger.INFO):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
:param message: The string to log (unicode)
:param level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _get_rel_path(self):
"""Return the relative path to the file if possible, else the parent dir.
:return: relative path to file or parent dir to file
:rtype: text_type
"""
if app.TV_DOWNLOAD_DIR:
try:
rel_path = os.path.relpath(self.file_path, app.TV_DOWNLOAD_DIR)
# check if we really found the relative path
if not rel_path.startswith('..'):
return rel_path
except ValueError:
pass
return self.file_path
def _checkForExistingFile(self, existing_file):
"""
Check if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing.
:param existing_file: The file to compare to
:return:
DOESNT_EXIST if the file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if os.path.isfile(existing_file):
# see if it's bigger than our old file
if os.path.getsize(existing_file) > os.path.getsize(self.file_path):
self._log(u'File {0} is larger than {1}'.format(existing_file, self.file_path), logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif os.path.getsize(existing_file) == os.path.getsize(self.file_path):
self._log(u'File {0} is same size as {1}'.format(existing_file, self.file_path), logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log(u'File {0} is smaller than {1}'.format(existing_file, self.file_path), logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log(u"File {0} doesn't exist so there's no worries about replacing it".format
(existing_file), logger.DEBUG)
return PostProcessor.DOESNT_EXIST
@staticmethod
def _search_files(path, pattern='*', subfolders=None, base_name_only=None, sort=True):
"""
Search for files in a given path.
:param path: path to file or folder (NOTE: folder paths must end with slashes)
:type path: text_type
:param pattern: pattern used to match the files
:type pattern: text_type
:param subfolders: search for files in subfolders
:type subfolders: bool
:param base_name_only: only match files with the same file name
:type base_name_only: bool
:param sort: return files sorted by size
:type sort: bool
:return: list with found files or empty list
:rtype: list
"""
directory = os.path.dirname(path)
if base_name_only and os.path.isfile(path):
pattern = os.path.basename(path).rpartition('.')[0] + pattern
found_files = []
for root, _, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
found_files.append(os.path.join(root, filename))
if not subfolders:
break
if sort:
found_files = sorted(found_files, key=os.path.getsize, reverse=True)
return found_files
def list_associated_files(self, file_path, base_name_only=False, subtitles_only=False, subfolders=False):
"""
For a given file path search for files in the same directory and return their absolute paths.
:param file_path: The file to check for associated files
:param base_name_only: False add extra '.' (conservative search) to file_path minus extension
:param subtitles_only: list only subtitles
:param subfolders: check subfolders while listing files
:return: A list containing all files which are associated to the given file
"""
# file path to the video file that is being processed (without extension)
processed_file_name = os.path.basename(file_path).rpartition('.')[0].lower()
file_list = self._search_files(file_path, subfolders=subfolders, base_name_only=base_name_only)
# loop through all the files in the folder, and check if they are the same name
# even when the cases don't match
filelist = []
for found_file in file_list:
file_name = os.path.basename(found_file).lower()
if file_name.startswith(processed_file_name):
# only add subtitles with valid languages to the list
if is_subtitle(found_file):
code = file_name.rsplit('.', 2)[1].replace('_', '-')
language = from_code(code, unknown='') or from_ietf_code(code, unknown='und')
if not language:
continue
filelist.append(found_file)
file_path_list = []
extensions_to_delete = []
for associated_file_path in filelist:
# Exclude the video file we are post-processing
if associated_file_path == file_path:
continue
# Exlude non-subtitle files with the 'only subtitles' option
if subtitles_only and not is_subtitle(associated_file_path):
continue
# Exclude .rar files from associated list
if re.search(r'(^.+\.(rar|r\d+)$)', associated_file_path):
continue
# Add the extensions that the user doesn't allow to the 'extensions_to_delete' list
if app.MOVE_ASSOCIATED_FILES:
allowed_extensions = app.ALLOWED_EXTENSIONS.split(',')
found_extension = associated_file_path.rpartition('.')[2]
if found_extension and found_extension not in allowed_extensions:
self._log(u'Associated file extension not found in allowed extensions: .{0}'.format
(found_extension.upper()), logger.DEBUG)
if os.path.isfile(associated_file_path):
extensions_to_delete.append(associated_file_path)
if os.path.isfile(associated_file_path):
file_path_list.append(associated_file_path)
if file_path_list:
self._log(u'Found the following associated files for {0}: {1}'.format
(file_path, file_path_list), logger.DEBUG)
if extensions_to_delete:
# Rebuild the 'file_path_list' list only with the extensions the user allows
file_path_list = [associated_file for associated_file in file_path_list
if associated_file not in extensions_to_delete]
self._delete(extensions_to_delete)
else:
self._log(u'No associated files for {0} were found during this pass'.format(file_path), logger.DEBUG)
return file_path_list
def _delete(self, file_path, associated_files=False):
"""
Delete the file and optionally all associated files.
:param file_path: The file to delete
:param associated_files: True to delete all files which differ only by extension, False to leave them
"""
if not file_path:
return
# Check if file_path is a list, if not, make it one
if not isinstance(file_path, list):
file_list = [file_path]
else:
file_list = file_path
# figure out which files we want to delete
if associated_files:
file_list = file_list + self.list_associated_files(file_path, base_name_only=True, subfolders=True)
if not file_list:
self._log(u'There were no files associated with {0}, not deleting anything'.format
(file_path), logger.DEBUG)
return
# delete the file and any other files which we want to delete
for cur_file in file_list:
if os.path.isfile(cur_file):
self._log(u'Deleting file {0}'.format(cur_file), logger.DEBUG)
# check first the read-only attribute
file_attribute = os.stat(cur_file)[0]
if not file_attribute & stat.S_IWRITE:
# File is read-only, so make it writeable
self._log(u'Read only mode on file {0}. Will try to make it writeable'.format
(cur_file), logger.DEBUG)
try:
os.chmod(cur_file, stat.S_IWRITE)
except Exception:
self._log(u'Cannot change permissions of {0}'.format(cur_file), logger.WARNING)
os.remove(cur_file)
# do the library update for synoindex
notifiers.synoindex_notifier.deleteFile(cur_file)
def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False,
action=None, subtitles=False):
"""
Perform a generic operation (move or copy) on a file.
Can rename the file as well as change its location, and optionally move associated files too.
:param file_path: The full path of the media file to act on
:param new_path: Destination path where we want to move/copy the file to
:param new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
:param associated_files: Boolean, whether we should copy similarly-named files too
:param action: function that takes an old path and new path and does an operation with them (move/copy)
:param subtitles: Boolean, whether we should process subtitles too
"""
if not action:
self._log(u'Must provide an action for the combined file operation', logger.ERROR)
return
file_list = [file_path]
if associated_files:
file_list = file_list + self.list_associated_files(file_path)
elif subtitles:
file_list = file_list + self.list_associated_files(file_path, subtitles_only=True)
if not file_list:
self._log(u'There were no files associated with {0}, not moving anything'.format
(file_path), logger.DEBUG)
return
# base name with file path (without extension and ending dot)
old_base_name = file_path.rpartition('.')[0]
old_base_name_length = len(old_base_name)
for cur_file_path in file_list:
# remember if the extension changed
changed_extension = None
# file extension without leading dot (for example: de.srt)
extension = cur_file_path[old_base_name_length + 1:]
# initally set current extension as new extension
new_extension = extension
# split the extension in two parts. E.g.: ('de', '.srt')
split_extension = os.path.splitext(extension)
# check if it's a subtitle and also has a subtitle language
if is_subtitle(cur_file_path) and all(split_extension):
sub_lang = split_extension[0].lower()
if sub_lang == 'pt-br':
sub_lang = 'pt-BR'
new_extension = sub_lang + split_extension[1]
changed_extension = True
# replace nfo with nfo-orig to avoid conflicts
if extension == 'nfo' and app.NFO_RENAME:
new_extension = 'nfo-orig'
changed_extension = True
# rename file with new base name
if new_base_name:
new_file_name = new_base_name + '.' + new_extension
else:
# current file name including extension
new_file_name = os.path.basename(cur_file_path)
# if we're not renaming we still need to change the extension sometimes
if changed_extension:
new_file_name = new_file_name.replace(extension, new_extension)
if app.SUBTITLES_DIR and is_subtitle(cur_file_path):
subs_new_path = os.path.join(new_path, app.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u'Unable to create subtitles folder {0}'.format(subs_new_path), logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
new_file_path = os.path.join(subs_new_path, new_file_name)
else:
new_file_path = os.path.join(new_path, new_file_name)
action(cur_file_path, new_file_path)
def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
Move file and set proper permissions.
:param file_path: The full path of the media file to move
:param new_path: Destination path where we want to move the file to
:param new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
:param associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_move(cur_file_path, new_file_path):
self._log(u'Moving file from {0} to {1} '.format(cur_file_path, new_file_path), logger.DEBUG)
try:
helpers.moveFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError) as e:
self._log(u'Unable to move file {0} to {1}: {2!r}'.format
(cur_file_path, new_file_path, e), logger.ERROR)
raise
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move,
subtitles=subtitles)
def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
Copy file and set proper permissions.
:param file_path: The full path of the media file to copy
:param new_path: Destination path where we want to copy the file to
:param new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
:param associated_files: Boolean, whether we should copy similarly-named files too
"""
def _int_copy(cur_file_path, new_file_path):
self._log(u'Copying file from {0} to {1}'.format(cur_file_path, new_file_path), logger.DEBUG)
try:
helpers.copyFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError) as e:
self._log(u'Unable to copy file {0} to {1}: {2!r}'.format
(cur_file_path, new_file_path, e), logger.ERROR)
raise
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy,
subtitles=subtitles)
def _hardlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
Hardlink file and set proper permissions.
:param file_path: The full path of the media file to move
:param new_path: Destination path where we want to create a hard linked file
:param new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
:param associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_hard_link(cur_file_path, new_file_path):
self._log(u'Hard linking file from {0} to {1}'.format(cur_file_path, new_file_path), logger.DEBUG)
try:
helpers.hardlinkFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError) as e:
self._log(u'Unable to link file {0} to {1}: {2!r}'.format
(cur_file_path, new_file_path, e), logger.ERROR)
raise
self._combined_file_operation(file_path, new_path, new_base_name, associated_files,
action=_int_hard_link, subtitles=subtitles)
def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
Move file, symlink source location back to destination, and set proper permissions.
:param file_path: The full path of the media file to move
:param new_path: Destination path where we want to move the file to create a symbolic link to
:param new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
:param associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_move_and_sym_link(cur_file_path, new_file_path):
self._log(u'Moving then symbolic linking file from {0} to {1}'.format
(cur_file_path, new_file_path), logger.DEBUG)
try:
helpers.moveAndSymlinkFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError) as e:
self._log(u'Unable to link file {0} to {1}: {2!r}'.format
(cur_file_path, new_file_path, e), logger.ERROR)
raise
self._combined_file_operation(file_path, new_path, new_base_name, associated_files,
action=_int_move_and_sym_link, subtitles=subtitles)
@staticmethod
def _build_anidb_episode(connection, file_path):
"""
Look up anidb properties for an episode.
:param connection: anidb connection handler
:param file_path: file to check
:return: episode object
"""
ep = adba.Episode(connection, filePath=file_path,
paramsF=['quality', 'anidb_file_name', 'crc32'],
paramsA=['epno', 'english_name', 'short_name_list', 'other_name', 'synonym_list'])
return ep
def _add_to_anidb_mylist(self, file_path):
"""
Add an episode to anidb mylist.
:param file_path: file to add to mylist
"""
if helpers.set_up_anidb_connection():
if not self.anidbEpisode: # seems like we could parse the name before, now lets build the anidb object
self.anidbEpisode = self._build_anidb_episode(app.ADBA_CONNECTION, file_path)
self._log(u'Adding the file to the anidb mylist', logger.DEBUG)
try:
self.anidbEpisode.add_to_mylist(status=1) # status = 1 sets the status of the file to "internal HDD"
except Exception as e:
self._log(u'Exception message: {0!r}'.format(e))
def _find_info(self):
"""
For a given file try to find the showid, season, and episode.
:return: A (show, season, episodes, quality, version) tuple
"""
show = season = quality = version = None
episodes = []
attempt_list = [
# try to analyze the nzb name
lambda: self._analyze_name(self.nzb_name),
# try to analyze the file name
lambda: self._analyze_name(self.file_name),
# try to analyze the file path
lambda: self._analyze_name(self.rel_path)
]
# Try every possible method to get our info
for att_num, attempt in enumerate(attempt_list):
try:
cur_show, cur_season, cur_episodes, cur_quality, cur_version = attempt()
except (InvalidNameException, InvalidShowException) as error:
logger.log(u'{0}'.format(error), logger.DEBUG)
continue
if not cur_show:
continue
show = cur_show
if att_num < (len(attempt_list) - 1):
if common.Quality.qualityStrings[cur_quality] == 'Unknown':
continue
quality = cur_quality
# we only get current version from anime
if cur_version is not None:
version = cur_version
if cur_season is not None:
season = cur_season
if cur_episodes:
episodes = cur_episodes
# for air-by-date shows we need to look up the season/episode from database
if season == -1 and show and episodes:
self._log(u'Looks like this is an air-by-date or sports show, '
u'attempting to convert the date to season/episode', logger.DEBUG)
try:
airdate = episodes[0].toordinal()
except AttributeError:
self._log(u'Could not convert to a valid airdate: {0}'.format(episodes[0]), logger.DEBUG)
episodes = []
continue
# Ignore season 0 when searching for episode
# (conflict between special and regular episode, same air date)
main_db_con = db.DBConnection()
sql_result = main_db_con.select(
'SELECT season, episode '
'FROM tv_episodes '
'WHERE showid = ? '
'AND indexer = ? '
'AND airdate = ? '
'AND season != 0',
[show.indexerid, show.indexer, airdate])
if sql_result:
season = int(sql_result[0]['season'])
episodes = [int(sql_result[0]['episode'])]
else:
# Found no result, trying with season 0
sql_result = main_db_con.select(
'SELECT season, episode '
'FROM tv_episodes '
'WHERE showid = ? '
'AND indexer = ? '
'AND airdate = ?',
[show.indexerid, show.indexer, airdate])
if sql_result:
season = int(sql_result[0]['season'])
episodes = [int(sql_result[0]['episode'])]
else:
self._log(u'Unable to find episode with date {0} for show {1}, skipping'.format
(episodes[0], show.indexerid), logger.DEBUG)
# we don't want to leave dates in the episode list
# if we couldn't convert them to real episode numbers
episodes = []
continue
# If there's no season, we assume it's the first season
elif season is None and show:
main_db_con = db.DBConnection()
numseasons_result = main_db_con.select(
'SELECT COUNT(DISTINCT season) '
'FROM tv_episodes '
'WHERE showid = ? '
'AND indexer = ? '
'AND season != 0',
[show.indexerid, show.indexer])
if int(numseasons_result[0][0]) == 1 and season is None:
self._log(u"Episode doesn't have a season number, but this show appears "
u"to have only 1 season, setting season number to 1...", logger.DEBUG)
season = 1
if show and season and episodes:
return show, season, episodes, quality, version
return show, season, episodes, quality, version
def _analyze_name(self, name):
"""
Take a name and try to figure out a show, season, and episode from it.
:param name: A string which we want to analyze to determine show info from (unicode)
:return: A (indexer_id, season, [episodes]) tuple. The first two may be None and episodes may be []
if none were found.
"""
to_return = (None, None, [], None, None)
if not name:
return to_return
logger.log(u'Analyzing name: {0}'.format(name), logger.DEBUG)
# parse the name to break it into show name, season, and episode
try:
parse_result = NameParser().parse(name)
except (InvalidNameException, InvalidShowException) as error:
logger.log(u'{0}'.format(error), logger.DEBUG)
return to_return
# show object
show = parse_result.show
if parse_result.is_air_by_date:
season = -1
episodes = [parse_result.air_date]
else:
season = parse_result.season_number
episodes = parse_result.episode_numbers
to_return = (show, season, episodes, parse_result.quality, parse_result.version)
self._finalize(parse_result)
return to_return
def _finalize(self, parse_result):
"""
Store parse result if it is complete and final.
:param parse_result: Result of parsers
"""
self.release_group = parse_result.release_group
# remember whether it's a proper
self.is_proper = bool(parse_result.proper_tags)
# if the result is complete then remember that for later
# if the result is complete then set release name
if parse_result.series_name and ((parse_result.season_number is not None and parse_result.episode_numbers) or
parse_result.air_date) and parse_result.release_group:
if not self.release_name:
self.release_name = remove_extension(os.path.basename(parse_result.original_name))
else:
logger.log(u"Parse result not sufficient (all following have to be set). Won't save release name",
logger.DEBUG)
logger.log(u'Parse result (series_name): {0}'.format(parse_result.series_name), logger.DEBUG)
logger.log(u'Parse result (season_number): {0}'.format(parse_result.season_number), logger.DEBUG)
logger.log(u'Parse result (episode_numbers): {0}'.format(parse_result.episode_numbers), logger.DEBUG)
logger.log(u'Parse result (ab_episode_numbers): {0}'.format(parse_result.ab_episode_numbers), logger.DEBUG)
logger.log(u'Parse result (air_date): {0}'.format(parse_result.air_date), logger.DEBUG)
logger.log(u'Parse result (release_group): {0}'.format(parse_result.release_group), logger.DEBUG)
def _get_ep_obj(self, show, season, episodes):
"""
Retrieve the TVEpisode object requested.
:param show: The show object belonging to the show we want to process
:param season: The season of the episode (int)
:param episodes: A list of episodes to find (list of ints)
:return: If the episode(s) can be found then a TVEpisode object with the correct related eps will
be instantiated and returned. If the episode can't be found then None will be returned.
"""
root_ep = None
for cur_episode in episodes:
self._log(u'Retrieving episode object for {0} {1}'.format
(show.name, episode_num(season, cur_episode)), logger.DEBUG)
# now that we've figured out which episode this file is just load it manually
try:
cur_ep = show.get_episode(season, cur_episode)
if not cur_ep:
raise EpisodeNotFoundException()
except EpisodeNotFoundException as e:
raise EpisodePostProcessingFailedException(u'Unable to create episode: {0!r}'.format(e))
# associate all the episodes together under a single root episode
if root_ep is None:
root_ep = cur_ep
root_ep.related_episodes = []
elif cur_ep not in root_ep.related_episodes:
root_ep.related_episodes.append(cur_ep)
return root_ep
def _get_quality(self, ep_obj):
"""
Determine the quality of the file that is being post processed.
First by checking if it is directly available in the TVEpisode's status or
otherwise by parsing through the data available.
:param ep_obj: The TVEpisode object related to the file we are post processing
:return: A quality value found in common.Quality
"""
ep_quality = common.Quality.UNKNOWN
# Try getting quality from the episode (snatched) status first
if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
_, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
if ep_quality != common.Quality.UNKNOWN:
self._log(u'The snatched status has a quality in it, using that: {0}'.format
(common.Quality.qualityStrings[ep_quality]), logger.DEBUG)
return ep_quality
# NZB name is the most reliable if it exists, followed by file name and lastly folder name
name_list = [self.nzb_name, self.file_name, self.rel_path]
for cur_name in name_list:
# Skip names that are falsey
if not cur_name:
continue
ep_quality = common.Quality.nameQuality(cur_name, ep_obj.show.is_anime, extend=False)
self._log(u"Looking up quality for '{0}', got {1}".format
(cur_name, common.Quality.qualityStrings[ep_quality]), logger.DEBUG)
if ep_quality != common.Quality.UNKNOWN:
self._log(u"Looks like '{0}' has quality {1}, using that".format
(cur_name, common.Quality.qualityStrings[ep_quality]), logger.DEBUG)
return ep_quality
# Try using other methods to get the file quality
ep_quality = common.Quality.nameQuality(self.file_path, ep_obj.show.is_anime)
self._log(u"Trying other methods to get quality for '{0}', got {1}".format
(self.file_name, common.Quality.qualityStrings[ep_quality]), logger.DEBUG)
if ep_quality != common.Quality.UNKNOWN:
self._log(u"Looks like '{0}' has quality {1}, using that".format
(self.file_name, common.Quality.qualityStrings[ep_quality]), logger.DEBUG)
return ep_quality
return ep_quality
def _priority_from_history(self, show_id, season, episodes, quality):
"""Evaluate if the file should be marked as priority."""
main_db_con = db.DBConnection()
for episode in episodes:
# First: check if the episode status is snatched
tv_episodes_result = main_db_con.select(
'SELECT status '
'FROM tv_episodes '
'WHERE showid = ? '
'AND season = ? '
'AND episode = ? '
"AND (status LIKE '%02' "
"OR status LIKE '%09' "
"OR status LIKE '%12')",
[show_id, season, episode])
if tv_episodes_result:
# Second: get the quality of the last snatched epsiode
# and compare it to the quality we are post-processing
history_result = main_db_con.select(
'SELECT quality, manually_searched '
'FROM history '
'WHERE showid = ? '
'AND season = ? '
'AND episode = ? '
"AND (action LIKE '%02' "
"OR action LIKE '%09' "
"OR action LIKE '%12') "
'ORDER BY date DESC',
[show_id, season, episode])
if history_result and history_result[0]['quality'] == quality:
# Third: make sure the file we are post-processing hasn't been
# previously processed, as we wouldn't want it in that case
if history_result[0]['manually_searched']:
self.manually_searched = True
download_result = main_db_con.select(
'SELECT resource '
'FROM history '
'WHERE showid = ? '
'AND season = ? '
'AND episode = ? '
'AND quality = ? '
"AND action LIKE '%04' "
'ORDER BY date DESC',
[show_id, season, episode, quality])
if download_result:
download_name = os.path.basename(download_result[0]['resource'])
# If the file name we are processing differs from the file
# that was previously processed, we want this file
if self.file_name != download_name:
self.in_history = True
return
else:
# There aren't any other files processed before for this
# episode and quality, we can safely say we want this file
self.in_history = True
return
def _run_extra_scripts(self, ep_obj):
"""
Execute any extra scripts defined in the config.
:param ep_obj: The object to use when calling the extra script
"""
if not app.EXTRA_SCRIPTS:
return
file_path = self.file_path
if isinstance(file_path, text_type):
try:
file_path = file_path.encode(app.SYS_ENCODING)
except UnicodeEncodeError:
# ignore it
pass
ep_location = ep_obj.location
if isinstance(ep_location, text_type):
try:
ep_location = ep_location.encode(app.SYS_ENCODING)
except UnicodeEncodeError:
# ignore it
pass
for cur_script_name in app.EXTRA_SCRIPTS:
if isinstance(cur_script_name, text_type):
try:
cur_script_name = cur_script_name.encode(app.SYS_ENCODING)
except UnicodeEncodeError:
# ignore it
pass
# generate a safe command line string to execute the script and provide all the parameters
script_cmd = [piece for piece in re.split(r'(\'.*?\'|".*?"| )', cur_script_name) if piece.strip()]
script_cmd[0] = os.path.abspath(script_cmd[0])
self._log(u'Absolute path to script: {0}'.format(script_cmd[0]), logger.DEBUG)
script_cmd += [
ep_location, file_path, str(ep_obj.show.indexerid),
str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate)
]
# use subprocess to run the command and capture output
self._log(u'Executing command: {0}'.format(script_cmd))
try:
p = subprocess.Popen(
script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=app.PROG_DIR
)
out, _ = p.communicate()
self._log(u'Script result: {0}'.format(out), logger.DEBUG)
except Exception as e:
self._log(u'Unable to run extra_script: {0!r}'.format(e))
def _is_priority(self, ep_obj, new_ep_quality):
"""
Determine if the episode is a priority download or not (if it is expected).
Episodes which are expected (snatched) or larger than the existing episode are priority, others are not.
:param ep_obj: The TVEpisode object in question
:param new_ep_quality: The quality of the episode that is being processed
:return: True if the episode is priority, False otherwise.
"""
if self.is_priority:
return True
_, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
# if Medusa downloaded this on purpose we likely have a priority download
if self.in_history:
# If manual searched, then by pass any quality checks
if self.manually_searched:
self._log(u"This episode was manually snatched. Marking it as priority", logger.DEBUG)
return True
# We only want it if the new quality is higher
# Assuming the new quality is a wanted quality
if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
self._log(u"Medusa snatched this episode and it is a higher quality. Marking it as priority",
logger.DEBUG)
return True
# if it's a proper of equal or higher quality
if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
self._log(u"Medusa snatched this episode and it is a proper of equal or higher quality. "
u"Marking it as priority", logger.DEBUG)
return True
self._log(u"This episode is not in history. Not marking it as priority", logger.DEBUG)
return False
def flag_kodi_clean_library(self):
"""Set flag to clean Kodi's library if Kodi is enabled."""
if app.USE_KODI:
self._log(u'Setting to clean Kodi library as we are going to replace the file')
app.KODI_LIBRARY_CLEAN_PENDING = True
def process(self):
"""
Post-process a given file.
:return: True on success, False on failure
"""
self._log(u'Processing {0} ({1})'.format(self.file_path, self.nzb_name or 'Torrent'))
if os.path.isdir(self.file_path):
self._log(u'File {0} seems to be a directory'.format(self.file_path))
return False
if not os.path.exists(self.file_path):
raise EpisodePostProcessingFailedException(u"File {0} doesn't exist, did unrar fail?".format
(self.file_path))
for ignore_file in self.IGNORED_FILESTRINGS:
if ignore_file in self.file_path:
self._log(u'File {0} is ignored type, skipping'.format(self.file_path))
return False
# reset per-file stuff
self.in_history = False
# reset the anidb episode object
self.anidbEpisode = None
# try to find the file info
(show, season, episodes, quality, version) = self._find_info()
if not show:
raise EpisodePostProcessingFailedException(u"This show isn't in your list, you need to add it "
u"before post-processing an episode")
elif season is None or not episodes:
raise EpisodePostProcessingFailedException(u'Not enough information to determine what episode this is')
# retrieve/create the corresponding TVEpisode objects
ep_obj = self._get_ep_obj(show, season, episodes)
old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
# get the quality of the episode we're processing
if quality and common.Quality.qualityStrings[quality] != 'Unknown':
self._log(u'The episode file has a quality in it, using that: {0}'.format
(common.Quality.qualityStrings[quality]), logger.DEBUG)
new_ep_quality = quality
else:
new_ep_quality = self._get_quality(ep_obj)
logger.log(u'Quality of the episode we are processing: {0}'.format
(common.Quality.qualityStrings[new_ep_quality]), logger.DEBUG)
# check snatched history to see if we should set download as priority
self._priority_from_history(show.indexerid, season, episodes, new_ep_quality)
# see if this is a priority download (is it snatched, in history, PROPER, or BEST)
priority_download = self._is_priority(ep_obj, new_ep_quality)
self._log(u'This episode is a priority download: {0}'.format(priority_download), logger.DEBUG)
# get the version of the episode we're processing (default is -1)
if version != -1:
self._log(u'Episode has a version in it, using that: v{0}'.format(version), logger.DEBUG)
new_ep_version = version
# check for an existing file
existing_file_status = self._checkForExistingFile(ep_obj.location)
if not priority_download:
if existing_file_status == PostProcessor.EXISTS_SAME:
self._log(u'File exists and the new file has the same size, aborting post-processing')
return True
if existing_file_status != PostProcessor.DOESNT_EXIST:
if self.is_proper and new_ep_quality == old_ep_quality:
self._log(u'New file is a PROPER, marking it safe to replace')
self.flag_kodi_clean_library()
else:
allowed_qualities, preferred_qualities = show.current_qualities
should_replace, replace_msg = common.Quality.should_replace(old_ep_status, old_ep_quality,
new_ep_quality, allowed_qualities,
preferred_qualities)
if not should_replace:
raise EpisodePostProcessingFailedException(
u'File exists. Marking it unsafe to replace. Reason: {0}'.format(replace_msg))
else:
self._log(u'File exists. Marking it safe to replace. Reason: {0}'.format(replace_msg))
self.flag_kodi_clean_library()
# Check if the processed file season is already in our indexer. If not,
# the file is most probably mislabled/fake and will be skipped.
# Only proceed if the file season is > 0
if int(ep_obj.season) > 0:
main_db_con = db.DBConnection()
max_season = main_db_con.select(
"SELECT MAX(season) FROM tv_episodes WHERE showid = ? and indexer = ?",
[show.indexerid, show.indexer])
# If the file season (ep_obj.season) is bigger than
# the indexer season (max_season[0][0]), skip the file
if int(ep_obj.season) > int(max_season[0][0]):
self._log(u'File has season {0}, while the indexer is on season {1}. '
u'The file may be incorrectly labeled or fake, aborting.'.format
(ep_obj.season, max_season[0][0]))
return False
# if the file is priority then we're going to replace it even if it exists
else:
# Set to clean Kodi if file exists and it is priority_download
if existing_file_status != PostProcessor.DOESNT_EXIST:
self.flag_kodi_clean_library()
self._log(u"This download is marked a priority download so I'm going to replace "
u"an existing file if I find one")
# try to find out if we have enough space to perform the copy or move action.
if not helpers.isFileLocked(self.file_path, False):
if not verify_freespace(self.file_path, ep_obj.show._location, [ep_obj] + ep_obj.related_episodes):
self._log(u'Not enough space to continue post-processing, exiting', logger.WARNING)
return False
else:
self._log(u'Unable to determine needed filespace as the source file is locked for access')
# delete the existing file (and company)
for cur_ep in [ep_obj] + ep_obj.related_episodes:
try:
self._delete(cur_ep.location, associated_files=True)
# clean up any left over folders
if cur_ep.location:
helpers.delete_empty_folders(os.path.dirname(cur_ep.location), keep_dir=ep_obj.show._location)
except (OSError, IOError):
raise EpisodePostProcessingFailedException(u'Unable to delete the existing files')
# set the status of the episodes
# for curEp in [ep_obj] + ep_obj.related_episodes:
# curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
# if the show directory doesn't exist then make it if allowed
if not os.path.isdir(ep_obj.show._location) and app.CREATE_MISSING_SHOW_DIRS:
self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
try:
os.mkdir(ep_obj.show._location) # pylint: disable=protected-access
helpers.chmodAsParent(ep_obj.show._location) # pylint: disable=protected-access
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(ep_obj.show._location) # pylint: disable=protected-access
except (OSError, IOError):
raise EpisodePostProcessingFailedException(u'Unable to create the show directory: {0}'.format
(ep_obj.show._location)) # pylint: disable=protected-access
# get metadata for the show (but not episode because it hasn't been fully processed)
ep_obj.show.write_metadata(True)
# update the ep info before we rename so the quality & release name go into the name properly
sql_l = []
for cur_ep in [ep_obj] + ep_obj.related_episodes:
with cur_ep.lock:
if self.release_name:
self._log(u'Found release name {0}'.format(self.release_name), logger.DEBUG)
cur_ep.release_name = self.release_name
elif self.file_name:
# If we can't get the release name we expect, save the original release name instead
self._log(u'Using original release name {0}'.format(self.file_name), logger.DEBUG)
cur_ep.release_name = self.file_name
else:
cur_ep.release_name = u''
cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
cur_ep.subtitles = u''
cur_ep.subtitles_searchcount = 0
cur_ep.subtitles_lastsearch = u'0001-01-01 00:00:00'
cur_ep.is_proper = self.is_proper
cur_ep.version = new_ep_version
if self.release_group:
cur_ep.release_group = self.release_group
else:
cur_ep.release_group = u''
sql_l.append(cur_ep.get_sql())
# Just want to keep this consistent for failed handling right now
release_name = show_name_helpers.determineReleaseName(self.folder_path, self.nzb_name)
if release_name is not None:
failed_history.log_success(release_name)
else:
self._log(u"Couldn't determine release name, aborting", logger.WARNING)
# find the destination folder
try:
proper_path = ep_obj.proper_path()
proper_absolute_path = os.path.join(ep_obj.show.location, proper_path)
dest_path = os.path.dirname(proper_absolute_path)
except ShowDirectoryNotFoundException:
raise EpisodePostProcessingFailedException(u"Unable to post-process an episode if the show dir '{0}' "
u"doesn't exist, quitting".format(ep_obj.show.raw_location))
self._log(u'Destination folder for this episode: {0}'.format(dest_path), logger.DEBUG)
# create any folders we need
helpers.make_dirs(dest_path)
# figure out the base name of the resulting episode file
if app.RENAME_EPISODES:
orig_extension = self.file_name.rpartition('.')[-1]
new_base_name = os.path.basename(proper_path)
new_file_name = new_base_name + '.' + orig_extension
else:
# if we're not renaming then there's no new base name, we'll just use the existing name
new_base_name = None
new_file_name = self.file_name
# add to anidb
if ep_obj.show.is_anime and app.ANIDB_USE_MYLIST:
self._add_to_anidb_mylist(self.file_path)
try:
# move the episode and associated files to the show dir
if self.process_method == 'copy':
if helpers.isFileLocked(self.file_path, False):
raise EpisodePostProcessingFailedException('File is locked for reading')
self._copy(self.file_path, dest_path, new_base_name, app.MOVE_ASSOCIATED_FILES,
app.USE_SUBTITLES and ep_obj.show.subtitles)
elif self.process_method == 'move':
if helpers.isFileLocked(self.file_path, True):
raise EpisodePostProcessingFailedException('File is locked for reading/writing')
self._move(self.file_path, dest_path, new_base_name, app.MOVE_ASSOCIATED_FILES,
app.USE_SUBTITLES and ep_obj.show.subtitles)
elif self.process_method == "hardlink":
self._hardlink(self.file_path, dest_path, new_base_name, app.MOVE_ASSOCIATED_FILES,
app.USE_SUBTITLES and ep_obj.show.subtitles)
elif self.process_method == "symlink":
if helpers.isFileLocked(self.file_path, True):
raise EpisodePostProcessingFailedException('File is locked for reading/writing')
self._moveAndSymlink(self.file_path, dest_path, new_base_name, app.MOVE_ASSOCIATED_FILES,
app.USE_SUBTITLES and ep_obj.show.subtitles)
else:
logger.log(u' "{0}" is an unknown file processing method. '
u'Please correct your app\'s usage of the api.'.format(self.process_method), logger.WARNING)
raise EpisodePostProcessingFailedException('Unable to move the files to their new home')
except (OSError, IOError):
raise EpisodePostProcessingFailedException('Unable to move the files to their new home')
# download subtitles
if app.USE_SUBTITLES and ep_obj.show.subtitles:
for cur_ep in [ep_obj] + ep_obj.related_episodes:
with cur_ep.lock:
cur_ep.location = os.path.join(dest_path, new_file_name)
cur_ep.refresh_subtitles()
cur_ep.download_subtitles()
# now that processing has finished, we can put the info in the DB.
# If we do it earlier, then when processing fails, it won't try again.
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
# put the new location in the database
sql_l = []
for cur_ep in [ep_obj] + ep_obj.related_episodes:
with cur_ep.lock:
cur_ep.location = os.path.join(dest_path, new_file_name)
sql_l.append(cur_ep.get_sql())
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
cur_ep.airdate_modify_stamp()
# generate nfo/tbn
try:
ep_obj.create_meta_files()
except Exception:
logger.log(u'Could not create/update meta files. Continuing with post-processing...')
# log it to history
history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group, new_ep_version)
# If any notification fails, don't stop post_processor
try:
# send notifications
notifiers.notify_download(ep_obj._format_pattern('%SN - %Sx%0E - %EN - %QN'))
# do the library update for KODI
notifiers.kodi_notifier.update_library(ep_obj.show.name)
# do the library update for Plex
notifiers.plex_notifier.update_library(ep_obj)
# do the library update for EMBY
notifiers.emby_notifier.update_library(ep_obj.show)
# do the library update for NMJ
# nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
# do the library update for Synology Indexer
notifiers.synoindex_notifier.addFile(ep_obj.location)
# do the library update for pyTivo
notifiers.pytivo_notifier.update_library(ep_obj)
# do the library update for Trakt
notifiers.trakt_notifier.update_library(ep_obj)
except Exception as e:
logger.log(u'Some notifications could not be sent. Error: {0!r}. '
u'Continuing with post-processing...'.format(e))
self._run_extra_scripts(ep_obj)
return True
|
mpdevilleres/tbpc_app | refs/heads/master | tbpc/team_mgt/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-03 04:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contract_mgt', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('letter_reference', models.CharField(blank=True, max_length=255)),
('remarks', models.TextField(blank=True)),
('file', models.FileField(upload_to='documents/%Y/%m/%d')),
('is_reply', models.BooleanField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Duty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('description', models.TextField(blank=True)),
('category', models.CharField(blank=True, choices=[('DISPUTE', 'Dispute'), ('DELAY PENALTY', 'Delay Penalty'), ('INVOICE', 'Invoice'), ('SUPPORT WARRANTY', 'Support Warranty'), ('OTHERS', 'Others')], max_length=255)),
('severity', models.CharField(blank=True, choices=[('CRITICAL', 'Critical'), ('AVERAGE', 'Average'), ('LOW', 'Low')], max_length=255)),
('end_user', models.CharField(blank=True, max_length=255)),
('remarks', models.TextField(blank=True)),
('impact', models.CharField(blank=True, max_length=255)),
('waiting_date', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('issue_date', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('close_date', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('_latest_status', models.CharField(blank=True, max_length=255)),
('_latest_action', models.CharField(blank=True, max_length=255)),
('contract', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contract_mgt.Contract')),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Person/s in Charge')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DutyHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('action_taken', models.TextField(blank=True)),
('next_action', models.TextField(blank=True)),
('remarks', models.TextField(blank=True)),
('status', models.CharField(blank=True, choices=[('OPEN', 'Open'), ('CLOSED', 'Closed'), ('ON-HOLD', 'On-Hold')], default='OPEN', max_length=255)),
('feedback_date', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('action_date', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('action_taken_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('duty', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team_mgt.Duty')),
],
options={
'ordering': ['-pk'],
},
),
migrations.AddField(
model_name='document',
name='duty',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team_mgt.Duty'),
),
]
|
andyzsf/django | refs/heads/master | django/contrib/sessions/backends/cache.py | 102 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import caches
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
|
kenshay/ImageScripter | refs/heads/master | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/timeline/importer.py | 39 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class TimelineImporter(object):
"""Reads TraceData and populates timeline model with what it finds."""
def __init__(self, model, trace_data, import_order):
self._model = model
self._trace_data = trace_data
self.import_order = import_order
@staticmethod
def GetSupportedPart():
raise NotImplementedError
def ImportEvents(self):
"""Processes the event data in the wrapper and creates and adds
new timeline events to the model"""
raise NotImplementedError
def FinalizeImport(self):
"""Called after all other importers for the model are run."""
raise NotImplementedError
|
numenta/NAB | refs/heads/master | nab/detectors/htmjava/__init__.py | 12133432 | |
pixyj/feel | refs/heads/master | client/config/nginx/__init__.py | 12133432 | |
playm2mboy/edx-platform | refs/heads/master | lms/djangoapps/discussion_api/tests/__init__.py | 12133432 | |
gg7/sentry | refs/heads/master | tests/sentry/models/__init__.py | 12133432 | |
awainer/7539 | refs/heads/master | aplicaciones_informaticas/backend/migrations/__init__.py | 12133432 | |
proliming/zulip | refs/heads/master | zilencer/urls/__init__.py | 12133432 | |
aldryn/aldryn-grid-foundation | refs/heads/master | aldryn_grid_foundation/migrations/__init__.py | 12133432 | |
pali88/support-tools | refs/heads/master | googlecode-issues-exporter/__init__.py | 12133432 | |
citrix-openstack-build/nova | refs/heads/master | nova/api/openstack/compute/plugins/v3/__init__.py | 12133432 | |
coberger/DIRAC | refs/heads/integration | TransformationSystem/Agent/RequestOperations/__init__.py | 12133432 | |
threefoldfoundation/app_backend | refs/heads/master | plugins/tff_backend/models/investor.py | 1 | from google.appengine.ext import ndb
from enum import IntEnum
from framework.models.common import NdbModel
from framework.utils import now
from plugins.tff_backend.bizz.gcs import get_serving_url, encrypt_filename
from plugins.tff_backend.consts.payment import TOKEN_TFT
from plugins.tff_backend.plugin_consts import NAMESPACE
class PaymentInfo(IntEnum):
UAE = 1
HAS_MULTIPLIED_TOKENS = 2
class InvestmentAgreement(NdbModel):
NAMESPACE = NAMESPACE
STATUS_CANCELED = -1
STATUS_CREATED = 0
STATUS_SIGNED = 1
STATUS_PAID = 2
def _compute_token_count(self):
return round(float(self.token_count) / pow(10, self.token_precision), self.token_precision)
app_user = ndb.UserProperty() # todo: remove after migration 014
username = ndb.StringProperty()
amount = ndb.FloatProperty(indexed=False)
token = ndb.StringProperty(indexed=False, default=TOKEN_TFT)
token_count_float = ndb.ComputedProperty(_compute_token_count, indexed=False) # Real amount of tokens
token_count = ndb.IntegerProperty(indexed=False, default=0) # amount of tokens x 10 ^ token_precision
token_precision = ndb.IntegerProperty(indexed=False, default=0)
currency = ndb.StringProperty(indexed=False)
name = ndb.StringProperty(indexed=False)
address = ndb.StringProperty(indexed=False)
reference = ndb.StringProperty(indexed=False)
iyo_see_id = ndb.StringProperty(indexed=False)
signature_payload = ndb.StringProperty(indexed=False)
signature = ndb.StringProperty(indexed=False)
status = ndb.IntegerProperty(default=STATUS_CREATED)
creation_time = ndb.IntegerProperty()
sign_time = ndb.IntegerProperty()
paid_time = ndb.IntegerProperty()
cancel_time = ndb.IntegerProperty()
modification_time = ndb.IntegerProperty()
version = ndb.StringProperty()
payment_info = ndb.IntegerProperty(repeated=True, choices=map(int, PaymentInfo))
def _pre_put_hook(self):
self.modification_time = now()
def _post_put_hook(self, future):
from plugins.tff_backend.dal.investment_agreements import index_investment_agreement
if ndb.in_transaction():
from google.appengine.ext import deferred
deferred.defer(index_investment_agreement, self, _transactional=True)
else:
index_investment_agreement(self)
@property
def id(self):
return self.key.id()
@property
def document_url(self):
return get_serving_url(self.filename(self.id))
@classmethod
def filename(cls, agreement_id):
return u'purchase-agreements/%s.pdf' % encrypt_filename(agreement_id)
@classmethod
def create_key(cls, agreement_id):
return ndb.Key(cls, agreement_id, namespace=NAMESPACE)
@classmethod
def list(cls):
return cls.query()
@classmethod
def list_by_user(cls, username):
return cls.query() \
.filter(cls.username == username)
@classmethod
def list_by_status_and_user(cls, username, statuses):
# type: (unicode, list[int]) -> list[InvestmentAgreement]
statuses = [statuses] if isinstance(statuses, int) else statuses
return [investment for investment in cls.list_by_user(username) if investment.status in statuses]
def to_dict(self, extra_properties=[], include=None, exclude=None):
return super(InvestmentAgreement, self).to_dict(extra_properties + ['document_url'], include, exclude)
|
PikaDm/clave-online-shop-template | refs/heads/master | clave/views.py | 1 | from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from cart.forms import CartAddProductForm
from .recommender import Recommender
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request,
'clave/product/list.html',
{'category': category,
'categories': categories,
'products': products})
def product_detail(request, id, slug):
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
r = Recommender()
recommended_products = r.suggest_products_for([product], 3)
return render(request,
'clave/product/detail.html',
{'product': product,
'cart_product_form': cart_product_form,
'recommended_products': recommended_products})
|
kaarl/pyload | refs/heads/stable | module/plugins/hoster/WarserverCz.py | 5 | # -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster
class WarserverCz(DeadHoster):
__name__ = "WarserverCz"
__type__ = "hoster"
__version__ = "0.17"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?warserver\.cz/stahnout/\d+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Warserver.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
|
atsao72/sympy | refs/heads/master | examples/beginner/basic.py | 80 | #!/usr/bin/env python
"""Basic example
Demonstrates how to create symbols and print some algebra operations.
"""
import sympy
from sympy import pprint
def main():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
e = ( a*b*b + 2*b*a*b )**c
print
pprint(e)
print
if __name__ == "__main__":
main()
|
aweinstock314/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py | 467 | def main(request, response):
return (202, "Giraffe"), [("Content-Type", "text/html"), ("X-Test", "PASS")], "PASS"
|
gbrmachado/treeherder | refs/heads/master | tests/log_parser/test_error_parser.py | 10 | import pytest
from treeherder.log_parser.parsers import ErrorParser
ERROR_TEST_CASES = (
"23:52:39 INFO - 346 INFO TEST-UNEXPECTED-FAIL | dom/base/test/test_XHRDocURI.html | foo",
"00:54:55 WARNING - PROCESS-CRASH | Shutdown | application crashed [@ PR_GetThreadPrivate]",
"23:57:52 INFO - Remote Device Error: Unhandled exception in cleanupDevice",
"23:57:52 ERROR - Return code: 1",
"23:57:52 CRITICAL - Preparing to abort run due to failed verify check.",
"23:57:52 FATAL - Dying due to failing verification",
"remoteFailed: [Failure instance: Traceback (failure with no frames): Foo.",
"08:13:37 INFO - make: *** [test-integration-test] Error 1",
"pymake\..\..\mozmake.exe: *** [buildsymbols] Error 11",
"00:55:13 INFO - SUMMARY: AddressSanitizer: 64 byte(s) leaked in 1 allocation(s).",
"Automation Error: Foo bar",
"[taskcluster] Error: Task run time exceeded 7200 seconds.",
"foo.js: line 123, col 321, Error - ESLint bar",
"2014-04-04 06:37:57 ERROR 403: Forbidden.",
"[taskcluster:error] Could not upload artifact",
"[taskcluster-vcs:error] Could not extract archive"
)
NON_ERROR_TEST_CASES = (
"TEST-PASS | foo | bar",
"07:42:02 INFO - Exception:",
"07:51:08 INFO - Caught Exception: Remote Device Error: unable to connect to panda-0501 after 5 attempts",
"06:21:18 INFO - I/GeckoDump( 730): 110 INFO TEST-UNEXPECTED-FAIL | foo | bar",
"[taskcluster:info] Starting task",
"[taskcluster] Starting task"
)
@pytest.mark.parametrize("line", ERROR_TEST_CASES)
def test_error_lines_matched(line):
parser = ErrorParser()
is_error_line = parser.is_error_line(line)
assert is_error_line
@pytest.mark.parametrize("line", NON_ERROR_TEST_CASES)
def test_successful_lines_not_matched(line):
parser = ErrorParser()
is_error_line = parser.is_error_line(line)
assert not is_error_line
|
rgbconsulting/rgb-accounting | refs/heads/8.0 | account_move_extended/__init__.py | 6 | # -*- coding: utf-8 -*-
# See README file for full copyright and licensing details.
|
JakeBrand/CMPUT410-E3 | refs/heads/master | lab4/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/__init__.py | 132 | """Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension, Library
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core, setuptools.command
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
__version__ = '0.6'
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The distribute_setup script for instance, will check if this
# attribute is present to decide whether to reinstall the package
# or not.
_distribute = True
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
|
gauravbose/digital-menu | refs/heads/master | build/lib.linux-x86_64-2.7/django/contrib/gis/geos/libgeos.py | 103 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
|
ychen820/microblog | refs/heads/master | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/dispatch/saferef.py | 86 | """
"Safe weakrefs", originally from pyDispatcher.
Provides a way to safely weakref any function, including bound methods (which
aren't handled by the core weakref module).
"""
import traceback
import weakref
def safeRef(target, onDelete = None):
"""Return a *safe* weak reference to a callable target
target -- the object to be weakly referenced, if it's a
bound method reference, will create a BoundMethodWeakref,
otherwise creates a simple weakref.
onDelete -- if provided, will have a hard reference stored
to the callable to be called after the safe reference
goes out of scope with the reference object, (either a
weakref or a BoundMethodWeakref) as argument.
"""
if hasattr(target, 'im_self'):
if target.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, 'im_func'), """safeRef target %r has im_self, but no im_func, don't know how to create reference"""%( target,)
reference = get_bound_method_weakref(
target=target,
onDelete=onDelete
)
return reference
if callable(onDelete):
return weakref.ref(target, onDelete)
else:
return weakref.ref( target )
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods
BoundMethodWeakref objects provide a mechanism for
referencing a bound method without requiring that the
method object itself (which is normally a transient
object) is kept alive. Instead, the BoundMethodWeakref
object keeps weak references to both the object and the
function which together define the instance method.
Attributes:
key -- the identity key for the reference, calculated
by the class's calculateKey method applied to the
target instance method
deletionMethods -- sequence of callable objects taking
single argument, a reference to this object which
will be called when *either* the target object or
target function is garbage collected (i.e. when
this object becomes invalid). These are specified
as the onDelete parameters of safeRef calls.
weakSelf -- weak reference to the target object
weakFunc -- weak reference to the target function
Class Attributes:
_allInstances -- class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculateKey(target) method applied to the target
objects. This weak value dictionary is used to
short-circuit creation so that multiple references
to the same (object, function) pair produce the
same BoundMethodWeakref instance.
"""
_allInstances = weakref.WeakValueDictionary()
def __new__( cls, target, onDelete=None, *arguments,**named ):
"""Create new instance or return current instance
Basically this method of construction allows us to
short-circuit creation of references to already-
referenced instance methods. The key corresponding
to the target is calculated, and if there is already
an existing reference, that is returned, with its
deletionMethods attribute updated. Otherwise the
new instance is created and registered in the table
of already-referenced methods.
"""
key = cls.calculateKey(target)
current =cls._allInstances.get(key)
if current is not None:
current.deletionMethods.append( onDelete)
return current
else:
base = super( BoundMethodWeakref, cls).__new__( cls )
cls._allInstances[key] = base
base.__init__( target, onDelete, *arguments,**named)
return base
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[ self.key ]
except KeyError:
pass
for function in methods:
try:
if callable( function ):
function( self )
except Exception, e:
try:
traceback.print_exc()
except AttributeError, err:
print '''Exception during saferef %s cleanup function %s: %s'''%(
self, function, e
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey( target )
self.weakSelf = weakref.ref(target.im_self, remove)
self.weakFunc = weakref.ref(target.im_func, remove)
self.selfName = str(target.im_self)
self.funcName = str(target.im_func.__name__)
def calculateKey( cls, target ):
"""Calculate the reference key for this reference
Currently this is a two-tuple of the id()'s of the
target object and the target function respectively.
"""
return (id(target.im_self),id(target.im_func))
calculateKey = classmethod( calculateKey )
def __str__(self):
"""Give a friendly representation of the object"""
return """%s( %s.%s )"""%(
self.__class__.__name__,
self.selfName,
self.funcName,
)
__repr__ = __str__
def __nonzero__( self ):
"""Whether we are still a valid reference"""
return self() is not None
def __cmp__( self, other ):
"""Compare with another reference"""
if not isinstance (other,self.__class__):
return cmp( self.__class__, type(other) )
return cmp( self.key, other.key)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
return function.__get__(target)
return None
class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):
"""A specialized BoundMethodWeakref, for platforms where instance methods
are not descriptors.
It assumes that the function name and the target attribute name are the
same, instead of assuming that the function is a descriptor. This approach
is equally fast, but not 100% reliable because functions can be stored on an
attribute named differenty than the function's name such as in:
class A: pass
def foo(self): return "foo"
A.bar = foo
But this shouldn't be a common use case. So, on platforms where methods
aren't descriptors (such as Jython) this implementation has the advantage
of working in the most cases.
"""
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
assert getattr(target.im_self, target.__name__) == target, \
("method %s isn't available as the attribute %s of %s" %
(target, target.__name__, target.im_self))
super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
# Using partial() would be another option, but it erases the
# "signature" of the function. That is, after a function is
# curried, the inspect module can't be used to determine how
# many arguments the function expects, nor what keyword
# arguments it supports, and pydispatcher needs this
# information.
return getattr(target, function.__name__)
return None
def get_bound_method_weakref(target, onDelete):
"""Instantiates the appropiate BoundMethodWeakRef, depending on the details of
the underlying class method implementation"""
if hasattr(target, '__get__'):
# target method is a descriptor, so the default implementation works:
return BoundMethodWeakref(target=target, onDelete=onDelete)
else:
# no luck, use the alternative implementation:
return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
|
yurkis/whitebox | refs/heads/master | sysmanager/modules/comms/sys/info.py | 1 | import sys
import fileinput
import os
import subprocess
from ...result import *
################################################################################
def cpuload(args):
val= subprocess.getoutput("iostat proc")
strs=val.split("\n")
vals=strs[2].split()
idle= vals[6]
sys=vals[4]
user=vals[2]
interrupt=vals[5]
user_niced=vals[3]
out={"user":user,
"ucer_niced":user_niced,
"system":sys,
"interrupt":interrupt,
"idle":idle}
#print (json.dumps(out))
return Result(out)
################################################################################
# extended device statistics
#device r/s w/s kr/s kw/s qlen svc_t %b
#ada0 0.6 20.1 15.9 252.6 0 3.6 2
def hddload(args):
val=subprocess.getoutput("iostat -t da -x")
strs=val.split("\n")[2:];
out = []
for line in strs:
sp=line.split()
entry={"name":sp[0],
"reads":sp[1],
"writes":sp[2],
"read":sp[3],
"write":sp[4],
"queue":sp[5]}
##out[sp[0]]=entry;
out.append(entry)
#print (json.dumps(out))l
return Result(out)
################################################################################
def basesystem(args):
out = {"base_system_version": subprocess.getoutput("uname -r"),
"uptime":subprocess.getoutput("uptime")}
#print (json.dumps(out))
return Result(out)
################################################################################
def shells(args):
f = open('/etc/shells')
shells = []
for line in f:
line= line.strip()
if (line == '') or (line[0] == "#"):
continue
shells.append(line)
return Result(shells)
################################################################################
def hostname(args):
return Result(subprocess.getoutput("hostname")) |
Isilon/isilon_sdk | refs/heads/master | tests/test_namespace_directories.py | 1 | """Directories with isi_sdk.NamespaceApi."""
import urllib3
import isi_sdk_8_1_1 as isi_sdk
import test_constants
urllib3.disable_warnings()
def main():
# configure username and password
configuration = isi_sdk.Configuration()
configuration.username = test_constants.USERNAME
configuration.password = test_constants.PASSWORD
configuration.verify_ssl = test_constants.VERIFY_SSL
configuration.host = test_constants.HOST
# configure client connection
api_client = isi_sdk.ApiClient(configuration)
api = isi_sdk.NamespaceApi(api_client)
# create a directory
api.create_directory(
'ifs/ns_src/ns_dir', x_isi_ifs_target_type='container',
recursive=True, overwrite=True)
# recursively copy directory from /ifs/ns_src to /ifs/ns_dest
api.copy_directory(
'ifs/ns_dest', x_isi_ifs_copy_source='/namespace/ifs/ns_src',
merge=True)
print('Copied directory: {}'.format(
api.get_directory_contents('ifs/ns_dest').children[0].name))
api.delete_directory('ifs/ns_dest', recursive=True)
# move directory from /ifs/ns_src to /ifs/ns_dest
api.move_directory(
'ifs/ns_src', x_isi_ifs_set_location='/namespace/ifs/ns_dest')
print('Moved directory: {}'.format(
api.get_directory_contents('ifs/ns_dest').children[0].name))
api.delete_directory('ifs/ns_dest', recursive=True)
# get directory attributes from response headers
sdk_resp = api.get_directory_attributes_with_http_info('ifs/data')
# the third index of the response is the response headers
print('Directory attributes from headers: {}'.format(sdk_resp[2]))
# get default directory detail
details = api.get_directory_contents(
'ifs', detail='default').children[0].to_dict()
details = dict((k, v) for k, v in details.items() if v)
print('Default directory details: {}'.format(details))
# get directory last modified time
print('Last modified time: {}'.format(
api.get_directory_contents(
'ifs', detail='last_modified').children[0].last_modified))
# use resume token to paginate requests
resume = api.get_directory_contents('ifs', limit=3).resume
api.get_directory_contents('ifs', resume=resume)
# get extended attributes on a directory
print('Directory metadata attributes: {}'.format(
api.get_directory_metadata('ifs', metadata=True)))
# create extended attribute
meta = isi_sdk.NamespaceMetadata(
action='update',
attrs=[isi_sdk.NamespaceMetadataAttrs(
name='test', value='42', op='update', namespace='user')])
# set extended attribute on a directory
api.set_directory_metadata('ifs', metadata=True, directory_metadata=meta)
# remove extended attribute
meta = isi_sdk.NamespaceMetadata(
action='update',
attrs=[isi_sdk.NamespaceMetadataAttrs(
name='test', value='42', op='delete', namespace='user')])
api.set_directory_metadata('ifs', metadata=True, directory_metadata=meta)
# set access control list on a directory
test_dir = 'ifs/ns_src'
api.create_directory(
test_dir, x_isi_ifs_target_type='container',
x_isi_ifs_access_control='0770')
print('Directory ACL: {}'.format(api.get_acl(test_dir, acl=True)))
# give everyone read permissions on the directory
acl_body = isi_sdk.NamespaceAcl(authoritative='mode', mode='0444')
api.set_acl(test_dir, acl=True, namespace_acl=acl_body)
print('Set directory permissions: {}'.format(
api.get_acl(test_dir, acl=True).mode))
api.delete_directory(test_dir)
# build directory query
query = isi_sdk.DirectoryQuery(
result=['name', 'size', 'last_modified', 'owner'],
scope=isi_sdk.DirectoryQueryScope(
logic='and',
conditions=[
isi_sdk.DirectoryQueryScopeConditions(
operator='>=',
attr='last_modified',
value="Thu, 15 Dec 2011 06:41:04"
),
isi_sdk.DirectoryQueryScopeConditions(
operator='>=',
attr='size',
value=1000
)
]
)
)
# exhaustive list of optional details
details = (
'access_time,atime_val,block_size,blocks,btime_val,'
'change_time,create_time,ctime_val,gid,group,id,'
'is_hidden,mode,mtime_val,nlink,stub,type,uid,'
'container,container_path')
# execute directory query
query_resp = api.query_directory(
'ifs/data', query=True, directory_query=query, detail=details,
max_depth=2, limit=10)
print('Query results for /ifs/data: {}'.format(query_resp))
# request remaining results in chunks
while query_resp.resume:
query_resp = api.query_directory(
'ifs/data', query=True, directory_query=query,
resume=query_resp.resume)
print('Resume query results: {}'.format(query_resp))
print('Successful clean up')
if __name__ == '__main__':
main()
|
ksrajkumar/openerp-6.1 | refs/heads/master | openerp/addons/mrp/report/bom_structure.py | 9 | ## -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
from osv import osv
import pooler
class bom_structure(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(bom_structure, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_children':self.get_children,
})
def get_children(self, object, level=0):
result = []
def _get_rec(object,level):
for l in object:
res = {}
res['name'] = l.name
res['pname'] = l.product_id.name
res['pcode'] = l.product_id.default_code
res['pqty'] = l.product_qty
res['uname'] = l.product_uom.name
res['code'] = l.code
res['level'] = level
result.append(res)
if l.child_complete_ids:
if level<6:
level += 1
_get_rec(l.child_complete_ids,level)
if level>0 and level<6:
level -= 1
return result
children = _get_rec(object,level)
return children
report_sxw.report_sxw('report.bom.structure','mrp.bom','mrp/report/bom_structure.rml',parser=bom_structure,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
marckuz/django | refs/heads/master | tests/admin_scripts/custom_templates/app_template/api.py | 581 | # your API code
|
oOPa/ReClean | refs/heads/master | properties/migrations/0002_tennant.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('properties', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tennant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('responded', models.BooleanField()),
],
),
]
|
nicolasnoble/grpc | refs/heads/master | src/python/grpcio_status/grpc_status/__init__.py | 90 | # Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
damienmg/bazel | refs/heads/master | third_party/protobuf/3.4.0/python/google/protobuf/internal/message_test.py | 4 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import collections
import copy
import math
import operator
import pickle
import six
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import packed_field_test_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import testing_refleaks
from google.protobuf import message
from google.protobuf.internal import _parameterized
if six.PY3:
long = int
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
BaseTestCase = testing_refleaks.BaseTestCase
@_parameterized.NamedParameters(
('_proto2', unittest_pb2),
('_proto3', unittest_proto3_arena_pb2))
class MessageTest(BaseTestCase):
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception))
def testGoldenMessage(self, message_module):
# Proto3 doesn't have the "default_foo" members or foreign enums,
# and doesn't preserve unknown fields, so for proto3 we use a golden
# message that doesn't have these fields set.
if message_module is unittest_pb2:
golden_data = test_util.GoldenFileData(
'golden_message_oneof_implemented')
else:
golden_data = test_util.GoldenFileData('golden_message_proto3')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self, message_module):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = message_module.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testDeterminismParameters(self, message_module):
# This message is always deterministically serialized, even if determinism
# is disabled, so we can use it to verify that all the determinism
# parameters work correctly.
golden_data = (b'\xe2\x02\nOne string'
b'\xe2\x02\nTwo string'
b'\xe2\x02\nRed string'
b'\xe2\x02\x0bBlue string')
golden_message = message_module.TestAllTypes()
golden_message.repeated_string.extend([
'One string',
'Two string',
'Red string',
'Blue string',
])
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=None))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=False))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=True))
class BadArgError(Exception):
pass
class BadArg(object):
def __nonzero__(self):
raise BadArgError()
def __bool__(self):
raise BadArgError()
with self.assertRaises(BadArgError):
golden_message.SerializeToString(deterministic=BadArg())
def testPickleSupport(self, message_module):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPositiveInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCD\x02\x00\x00\x80\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
else:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCA\x02\x04\x00\x00\x80\x7F'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCD\x02\x00\x00\x80\xFF'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
else:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCA\x02\x04\x00\x00\x80\xFF'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self, message_module):
golden_data = (b'\x5D\x00\x00\xC0\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
b'\xCD\x02\x00\x00\xC0\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = message_module.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = message_module.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_float = 2.0
self.assertEqual(str(message), 'optional_float: 2.0\n')
def testHighPrecisionFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_double = 0.12345678912345678
if sys.version_info >= (3,):
self.assertEqual(str(message), 'optional_double: 0.12345678912345678\n')
else:
self.assertEqual(str(message), 'optional_double: 0.123456789123\n')
def testUnknownFieldPrinting(self, message_module):
populated = message_module.TestAllTypes()
test_util.SetAllNonLazyFields(populated)
empty = message_module.TestEmptyMessage()
empty.ParseFromString(populated.SerializeToString())
self.assertEqual(str(empty), '')
def testRepeatedNestedFieldIteration(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
msg.repeated_nested_message.add(bb=3)
msg.repeated_nested_message.add(bb=4)
self.assertEqual([1, 2, 3, 4],
[m.bb for m in msg.repeated_nested_message])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in reversed(msg.repeated_nested_message)])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in msg.repeated_nested_message[::-1]])
def testSortingRepeatedScalarFieldsDefaultComparator(self, message_module):
"""Check some different types with the default comparator."""
message = message_module.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
self.assertEqual(str(message.repeated_int32), str([1, 2, 3]))
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
self.assertEqual(str(message.repeated_string), str([u'a', u'b', u'c']))
message.repeated_bytes.append(b'a')
message.repeated_bytes.append(b'c')
message.repeated_bytes.append(b'b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], b'a')
self.assertEqual(message.repeated_bytes[1], b'b')
self.assertEqual(message.repeated_bytes[2], b'c')
self.assertEqual(str(message.repeated_bytes), str([b'a', b'b', b'c']))
def testSortingRepeatedScalarFieldsCustomComparator(self, message_module):
"""Check some different types with custom comparator."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=operator.attrgetter('bb'))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
self.assertEqual(str(message.repeated_nested_message),
'[bb: 1\n, bb: 2\n, bb: 3\n, bb: 4\n, bb: 5\n, bb: 6\n]')
def testSortingRepeatedCompositeFieldsStable(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 21
message.repeated_nested_message.add().bb = 20
message.repeated_nested_message.add().bb = 13
message.repeated_nested_message.add().bb = 33
message.repeated_nested_message.add().bb = 11
message.repeated_nested_message.add().bb = 24
message.repeated_nested_message.add().bb = 10
message.repeated_nested_message.sort(key=lambda z: z.bb // 10)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
# Make sure that for the C++ implementation, the underlying fields
# are actually reordered.
pb = message.SerializeToString()
message.Clear()
message.MergeFromString(pb)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
def testRepeatedCompositeFieldSortArguments(self, message_module):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = message_module.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
if sys.version_info >= (3,): return # No cmp sorting in PY3.
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self, message_module):
"""Check sorting a scalar field using list.sort() arguments."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
if sys.version_info < (3,): # No cmp sorting in PY3.
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
if sys.version_info < (3,): # No cmp sorting in PY3.
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testRepeatedFieldsComparable(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
m1.repeated_int32.append(0)
m1.repeated_int32.append(1)
m1.repeated_int32.append(2)
m2.repeated_int32.append(0)
m2.repeated_int32.append(1)
m2.repeated_int32.append(2)
m1.repeated_nested_message.add().bb = 1
m1.repeated_nested_message.add().bb = 2
m1.repeated_nested_message.add().bb = 3
m2.repeated_nested_message.add().bb = 1
m2.repeated_nested_message.add().bb = 2
m2.repeated_nested_message.add().bb = 3
if sys.version_info >= (3,): return # No cmp() in PY3.
# These comparisons should not raise errors.
_ = m1 < m2
_ = m1.repeated_nested_message < m2.repeated_nested_message
# Make sure cmp always works. If it wasn't defined, these would be
# id() comparisons and would all fail.
self.assertEqual(cmp(m1, m2), 0)
self.assertEqual(cmp(m1.repeated_int32, m2.repeated_int32), 0)
self.assertEqual(cmp(m1.repeated_int32, [0, 1, 2]), 0)
self.assertEqual(cmp(m1.repeated_nested_message,
m2.repeated_nested_message), 0)
with self.assertRaises(TypeError):
# Can't compare repeated composite containers to lists.
cmp(m1.repeated_nested_message, m2.repeated_nested_message[:])
# TODO(anuraag): Implement extensiondict comparison in C++ and then add test
def testRepeatedFieldsAreSequences(self, message_module):
m = message_module.TestAllTypes()
self.assertIsInstance(m.repeated_int32, collections.MutableSequence)
self.assertIsInstance(m.repeated_nested_message,
collections.MutableSequence)
def testRepeatedFieldInsideNestedMessage(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.repeated_int32.extend([])
self.assertTrue(m.HasField('payload'))
def ensureNestedMessageExists(self, msg, attribute):
"""Make sure that a nested message object exists.
As soon as a nested message attribute is accessed, it will be present in the
_fields dict, without being marked as actually being set.
"""
getattr(msg, attribute)
self.assertFalse(msg.HasField(attribute))
def testOneofGetCaseNonexistingField(self, message_module):
m = message_module.TestAllTypes()
self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field')
def testOneofDefaultValues(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
# Oneof is set even when setting it to a default value.
m.oneof_uint32 = 0
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertFalse(m.HasField('oneof_string'))
m.oneof_string = ""
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_uint32'))
def testOneofSemantics(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
m.oneof_string = u'foo'
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertTrue(m.HasField('oneof_string'))
# Read nested message accessor without accessing submessage.
m.oneof_nested_message
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
# Read accessor of nested message without accessing submessage.
m.oneof_nested_message.bb
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
m.oneof_nested_message.bb = 11
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_string'))
self.assertTrue(m.HasField('oneof_nested_message'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_nested_message'))
self.assertTrue(m.HasField('oneof_bytes'))
def testOneofCompositeFieldReadAccess(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertEqual(11, m.oneof_uint32)
def testOneofWhichOneof(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
m.ClearField('oneof_bytes')
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
def testOneofClearField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_field')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearSetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_uint32')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearUnsetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
m.ClearField('oneof_nested_message')
self.assertEqual(11, m.oneof_uint32)
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
def testOneofDeserialize(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.ParseFromString(m.SerializeToString())
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofCopyFrom(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.CopyFrom(m)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofNestedMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_uint32 = 11
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_bytes = b'bb'
m2.child.payload.oneof_bytes = b'bb'
m2.MergeFrom(m)
self.assertEqual('oneof_uint32', m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_bytes', m2.child.payload.WhichOneof('oneof_field'))
def testOneofMessageMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 11
m.child.payload.oneof_nested_message.bb = 12
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_uint32 = 13
m2.MergeFrom(m)
self.assertEqual('oneof_nested_message',
m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_nested_message',
m2.child.payload.WhichOneof('oneof_field'))
def testOneofNestedMessageInit(self, message_module):
m = message_module.TestAllTypes(
oneof_nested_message=message_module.TestAllTypes.NestedMessage())
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
def testOneofClear(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.Clear()
self.assertIsNone(m.WhichOneof('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
def testAssignByteStringToUnicodeField(self, message_module):
"""Assigning a byte string to a string field should result
in the value being converted to a Unicode string."""
m = message_module.TestAllTypes()
m.optional_string = str('')
self.assertIsInstance(m.optional_string, six.text_type)
def testLongValuedSlice(self, message_module):
"""It should be possible to use long-valued indicies in slices
This didn't used to work in the v2 C++ implementation.
"""
m = message_module.TestAllTypes()
# Repeated scalar
m.repeated_int32.append(1)
sl = m.repeated_int32[long(0):long(len(m.repeated_int32))]
self.assertEqual(len(m.repeated_int32), len(sl))
# Repeated composite
m.repeated_nested_message.add().bb = 3
sl = m.repeated_nested_message[long(0):long(len(m.repeated_nested_message))]
self.assertEqual(len(m.repeated_nested_message), len(sl))
def testExtendShouldNotSwallowExceptions(self, message_module):
"""This didn't use to work in the v2 C++ implementation."""
m = message_module.TestAllTypes()
with self.assertRaises(NameError) as _:
m.repeated_int32.extend(a for i in range(10)) # pylint: disable=undefined-variable
with self.assertRaises(NameError) as _:
m.repeated_nested_enum.extend(
a for i in range(10)) # pylint: disable=undefined-variable
FALSY_VALUES = [None, False, 0, 0.0, b'', u'', bytearray(), [], {}, set()]
def testExtendInt32WithNothing(self, message_module):
"""Test no-ops extending repeated int32 fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_int32.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([])
self.assertSequenceEqual([], m.repeated_int32)
def testExtendFloatWithNothing(self, message_module):
"""Test no-ops extending repeated float fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_float.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([])
self.assertSequenceEqual([], m.repeated_float)
def testExtendStringWithNothing(self, message_module):
"""Test no-ops extending repeated string fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_string.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([])
self.assertSequenceEqual([], m.repeated_string)
def testExtendInt32WithPythonList(self, message_module):
"""Test extending repeated int32 fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([0])
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend([1, 2])
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend([3, 4])
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithPythonList(self, message_module):
"""Test extending repeated float fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([0.0])
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend([1.0, 2.0])
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend([3.0, 4.0])
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithPythonList(self, message_module):
"""Test extending repeated string fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([''])
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(['11', '22'])
self.assertSequenceEqual(['', '11', '22'], m.repeated_string)
m.repeated_string.extend(['33', '44'])
self.assertSequenceEqual(['', '11', '22', '33', '44'], m.repeated_string)
def testExtendStringWithString(self, message_module):
"""Test extending repeated string fields with characters from a string."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend('abc')
self.assertSequenceEqual(['a', 'b', 'c'], m.repeated_string)
class TestIterable(object):
"""This iterable object mimics the behavior of numpy.array.
__nonzero__ fails for length > 1, and returns bool(item[0]) for length == 1.
"""
def __init__(self, values=None):
self._list = values or []
def __nonzero__(self):
size = len(self._list)
if size == 0:
return False
if size == 1:
return bool(self._list[0])
raise ValueError('Truth value is ambiguous.')
def __len__(self):
return len(self._list)
def __iter__(self):
return self._list.__iter__()
def testExtendInt32WithIterable(self, message_module):
"""Test extending repeated int32 fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([0]))
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([1, 2]))
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([3, 4]))
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithIterable(self, message_module):
"""Test extending repeated float fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([0.0]))
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([1.0, 2.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([3.0, 4.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithIterable(self, message_module):
"""Test extending repeated string fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['']))
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['1', '2']))
self.assertSequenceEqual(['', '1', '2'], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['3', '4']))
self.assertSequenceEqual(['', '1', '2', '3', '4'], m.repeated_string)
def testPickleRepeatedScalarContainer(self, message_module):
# TODO(tibell): The pure-Python implementation support pickling of
# scalar containers in *some* cases. For now the cpp2 version
# throws an exception to avoid a segfault. Investigate if we
# want to support pickling of these fields.
#
# For more information see: https://b2.corp.google.com/u/0/issues/18677897
if (api_implementation.Type() != 'cpp' or
api_implementation.Version() == 2):
return
m = message_module.TestAllTypes()
with self.assertRaises(pickle.PickleError) as _:
pickle.dumps(m.repeated_int32, pickle.HIGHEST_PROTOCOL)
def testSortEmptyRepeatedCompositeContainer(self, message_module):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = message_module.TestAllTypes()
m.repeated_nested_message.sort()
def testHasFieldOnRepeatedField(self, message_module):
"""Using HasField on a repeated field should raise an exception.
"""
m = message_module.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.HasField('repeated_int32')
def testRepeatedScalarFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_int32.pop()
m.repeated_int32.extend(range(5))
self.assertEqual(4, m.repeated_int32.pop())
self.assertEqual(0, m.repeated_int32.pop(0))
self.assertEqual(2, m.repeated_int32.pop(1))
self.assertEqual([1, 3], m.repeated_int32)
def testRepeatedCompositeFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_nested_message.pop()
for i in range(5):
n = m.repeated_nested_message.add()
n.bb = i
self.assertEqual(4, m.repeated_nested_message.pop().bb)
self.assertEqual(0, m.repeated_nested_message.pop(0).bb)
self.assertEqual(2, m.repeated_nested_message.pop(1).bb)
self.assertEqual([1, 3], [n.bb for n in m.repeated_nested_message])
# Class to test proto2-only features (required, extensions, etc.)
class Proto2Test(BaseTestCase):
def testFieldPresence(self):
message = unittest_pb2.TestAllTypes()
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
with self.assertRaises(ValueError):
message.HasField("field_doesnt_exist")
with self.assertRaises(ValueError):
message.HasField("repeated_int32")
with self.assertRaises(ValueError):
message.HasField("repeated_nested_message")
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Fields are set even when setting the values to default values.
message.optional_int32 = 0
message.optional_bool = False
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField("optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_bool = True
message.optional_nested_message.bb = 15
self.assertTrue(message.HasField("optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Clearing the fields unsets them and resets their value to default.
message.ClearField("optional_int32")
message.ClearField("optional_bool")
message.ClearField("optional_nested_message")
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# TODO(tibell): The C++ implementations actually allows assignment
# of unknown enum values to *scalar* fields (but not repeated
# fields). Once checked enum fields becomes the default in the
# Python implementation, the C++ implementation should follow suit.
def testAssignInvalidEnum(self):
"""It should not be possible to assign an invalid enum number to an
enum field."""
m = unittest_pb2.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.optional_nested_enum = 1234567
self.assertRaises(ValueError, m.repeated_nested_enum.append, 1234567)
def testGoldenExtensions(self):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
self.assertEqual(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
# TODO(haberman): this isn't really a proto2-specific test except that this
# message has a required field in it. Should probably be factored out so
# that we can test the other parts with proto3.
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testPythonicInit(self):
message = unittest_pb2.TestAllTypes(
optional_int32=100,
optional_fixed32=200,
optional_float=300.5,
optional_bytes=b'x',
optionalgroup={'a': 400},
optional_nested_message={'bb': 500},
optional_foreign_message={},
optional_nested_enum='BAZ',
repeatedgroup=[{'a': 600},
{'a': 700}],
repeated_nested_enum=['FOO', unittest_pb2.TestAllTypes.BAR],
default_int32=800,
oneof_string='y')
self.assertIsInstance(message, unittest_pb2.TestAllTypes)
self.assertEqual(100, message.optional_int32)
self.assertEqual(200, message.optional_fixed32)
self.assertEqual(300.5, message.optional_float)
self.assertEqual(b'x', message.optional_bytes)
self.assertEqual(400, message.optionalgroup.a)
self.assertIsInstance(message.optional_nested_message,
unittest_pb2.TestAllTypes.NestedMessage)
self.assertEqual(500, message.optional_nested_message.bb)
self.assertTrue(message.HasField('optional_foreign_message'))
self.assertEqual(message.optional_foreign_message,
unittest_pb2.ForeignMessage())
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
self.assertEqual(2, len(message.repeatedgroup))
self.assertEqual(600, message.repeatedgroup[0].a)
self.assertEqual(700, message.repeatedgroup[1].a)
self.assertEqual(2, len(message.repeated_nested_enum))
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.repeated_nested_enum[0])
self.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[1])
self.assertEqual(800, message.default_int32)
self.assertEqual('y', message.oneof_string)
self.assertFalse(message.HasField('optional_int64'))
self.assertEqual(0, len(message.repeated_float))
self.assertEqual(42, message.default_int64)
message = unittest_pb2.TestAllTypes(optional_nested_enum=u'BAZ')
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(
optional_nested_message={'INVALID_NESTED_FIELD': 17})
with self.assertRaises(TypeError):
unittest_pb2.TestAllTypes(
optional_nested_message={'bb': 'INVALID_VALUE_TYPE'})
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(optional_nested_enum='INVALID_LABEL')
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(repeated_nested_enum='FOO')
# Class to test proto3-only features/behavior (updated field presence & enums)
class Proto3Test(BaseTestCase):
# Utility method for comparing equality with a map.
def assertMapIterEquals(self, map_iter, dict_value):
# Avoid mutating caller's copy.
dict_value = dict(dict_value)
for k, v in map_iter:
self.assertEqual(v, dict_value[k])
del dict_value[k]
self.assertEqual({}, dict_value)
def testFieldPresence(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
# We can't test presence of non-repeated, non-submessage fields.
with self.assertRaises(ValueError):
message.HasField('optional_int32')
with self.assertRaises(ValueError):
message.HasField('optional_float')
with self.assertRaises(ValueError):
message.HasField('optional_string')
with self.assertRaises(ValueError):
message.HasField('optional_bool')
# But we can still test presence of submessage fields.
self.assertFalse(message.HasField('optional_nested_message'))
# As with proto2, we can't test presence of fields that don't exist, or
# repeated fields.
with self.assertRaises(ValueError):
message.HasField('field_doesnt_exist')
with self.assertRaises(ValueError):
message.HasField('repeated_int32')
with self.assertRaises(ValueError):
message.HasField('repeated_nested_message')
# Fields should default to their type-specific default.
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Setting a submessage should still return proper presence information.
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField('optional_nested_message'))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_float = 1.1
message.optional_string = 'abc'
message.optional_bool = True
message.optional_nested_message.bb = 15
# Clearing the fields unsets them and resets their value to default.
message.ClearField('optional_int32')
message.ClearField('optional_float')
message.ClearField('optional_string')
message.ClearField('optional_bool')
message.ClearField('optional_nested_message')
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
def testAssignUnknownEnum(self):
"""Assigning an unknown enum value is allowed and preserves the value."""
m = unittest_proto3_arena_pb2.TestAllTypes()
m.optional_nested_enum = 1234567
self.assertEqual(1234567, m.optional_nested_enum)
m.repeated_nested_enum.append(22334455)
self.assertEqual(22334455, m.repeated_nested_enum[0])
# Assignment is a different code path than append for the C++ impl.
m.repeated_nested_enum[0] = 7654321
self.assertEqual(7654321, m.repeated_nested_enum[0])
serialized = m.SerializeToString()
m2 = unittest_proto3_arena_pb2.TestAllTypes()
m2.ParseFromString(serialized)
self.assertEqual(1234567, m2.optional_nested_enum)
self.assertEqual(7654321, m2.repeated_nested_enum[0])
# ParseFromString in Proto2 should accept unknown enums too.
m3 = unittest_pb2.TestAllTypes()
m3.ParseFromString(serialized)
m2.Clear()
m2.ParseFromString(m3.SerializeToString())
self.assertEqual(1234567, m2.optional_nested_enum)
self.assertEqual(7654321, m2.repeated_nested_enum[0])
# Map isn't really a proto3-only feature. But there is no proto2 equivalent
# of google/protobuf/map_unittest.proto right now, so it's not easy to
# test both with the same test like we do for the other proto2/proto3 tests.
# (google/protobuf/map_protobuf_unittest.proto is very different in the set
# of messages and fields it contains).
def testScalarMapDefaults(self):
msg = map_unittest_pb2.TestMap()
# Scalars start out unset.
self.assertFalse(-123 in msg.map_int32_int32)
self.assertFalse(-2**33 in msg.map_int64_int64)
self.assertFalse(123 in msg.map_uint32_uint32)
self.assertFalse(2**33 in msg.map_uint64_uint64)
self.assertFalse(123 in msg.map_int32_double)
self.assertFalse(False in msg.map_bool_bool)
self.assertFalse('abc' in msg.map_string_string)
self.assertFalse(111 in msg.map_int32_bytes)
self.assertFalse(888 in msg.map_int32_enum)
# Accessing an unset key returns the default.
self.assertEqual(0, msg.map_int32_int32[-123])
self.assertEqual(0, msg.map_int64_int64[-2**33])
self.assertEqual(0, msg.map_uint32_uint32[123])
self.assertEqual(0, msg.map_uint64_uint64[2**33])
self.assertEqual(0.0, msg.map_int32_double[123])
self.assertTrue(isinstance(msg.map_int32_double[123], float))
self.assertEqual(False, msg.map_bool_bool[False])
self.assertTrue(isinstance(msg.map_bool_bool[False], bool))
self.assertEqual('', msg.map_string_string['abc'])
self.assertEqual(b'', msg.map_int32_bytes[111])
self.assertEqual(0, msg.map_int32_enum[888])
# It also sets the value in the map
self.assertTrue(-123 in msg.map_int32_int32)
self.assertTrue(-2**33 in msg.map_int64_int64)
self.assertTrue(123 in msg.map_uint32_uint32)
self.assertTrue(2**33 in msg.map_uint64_uint64)
self.assertTrue(123 in msg.map_int32_double)
self.assertTrue(False in msg.map_bool_bool)
self.assertTrue('abc' in msg.map_string_string)
self.assertTrue(111 in msg.map_int32_bytes)
self.assertTrue(888 in msg.map_int32_enum)
self.assertIsInstance(msg.map_string_string['abc'], six.text_type)
# Accessing an unset key still throws TypeError if the type of the key
# is incorrect.
with self.assertRaises(TypeError):
msg.map_string_string[123]
with self.assertRaises(TypeError):
123 in msg.map_string_string
def testMapGet(self):
# Need to test that get() properly returns the default, even though the dict
# has defaultdict-like semantics.
msg = map_unittest_pb2.TestMap()
self.assertIsNone(msg.map_int32_int32.get(5))
self.assertEqual(10, msg.map_int32_int32.get(5, 10))
self.assertIsNone(msg.map_int32_int32.get(5))
msg.map_int32_int32[5] = 15
self.assertEqual(15, msg.map_int32_int32.get(5))
self.assertIsNone(msg.map_int32_foreign_message.get(5))
self.assertEqual(10, msg.map_int32_foreign_message.get(5, 10))
submsg = msg.map_int32_foreign_message[5]
self.assertIs(submsg, msg.map_int32_foreign_message.get(5))
def testScalarMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
self.assertFalse(5 in msg.map_int32_int32)
msg.map_int32_int32[-123] = -456
msg.map_int64_int64[-2**33] = -2**34
msg.map_uint32_uint32[123] = 456
msg.map_uint64_uint64[2**33] = 2**34
msg.map_string_string['abc'] = '123'
msg.map_int32_enum[888] = 2
self.assertEqual([], msg.FindInitializationErrors())
self.assertEqual(1, len(msg.map_string_string))
# Bad key.
with self.assertRaises(TypeError):
msg.map_string_string[123] = '123'
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(1, len(msg.map_string_string))
# Bad value.
with self.assertRaises(TypeError):
msg.map_string_string['123'] = 123
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
# Bad key.
with self.assertRaises(TypeError):
msg2.map_string_string[123] = '123'
# Bad value.
with self.assertRaises(TypeError):
msg2.map_string_string['123'] = 123
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
self.assertEqual('123', msg2.map_string_string['abc'])
self.assertEqual(2, msg2.map_int32_enum[888])
def testStringUnicodeConversionInMap(self):
msg = map_unittest_pb2.TestMap()
unicode_obj = u'\u1234'
bytes_obj = unicode_obj.encode('utf8')
msg.map_string_string[bytes_obj] = bytes_obj
(key, value) = list(msg.map_string_string.items())[0]
self.assertEqual(key, unicode_obj)
self.assertEqual(value, unicode_obj)
self.assertIsInstance(key, six.text_type)
self.assertIsInstance(value, six.text_type)
def testMessageMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_foreign_message))
self.assertFalse(5 in msg.map_int32_foreign_message)
msg.map_int32_foreign_message[123]
# get_or_create() is an alias for getitem.
msg.map_int32_foreign_message.get_or_create(-456)
self.assertEqual(2, len(msg.map_int32_foreign_message))
self.assertIn(123, msg.map_int32_foreign_message)
self.assertIn(-456, msg.map_int32_foreign_message)
self.assertEqual(2, len(msg.map_int32_foreign_message))
# Bad key.
with self.assertRaises(TypeError):
msg.map_int32_foreign_message['123']
# Can't assign directly to submessage.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[999] = msg.map_int32_foreign_message[123]
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(2, len(msg.map_int32_foreign_message))
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
self.assertIn(123, msg2.map_int32_foreign_message)
self.assertIn(-456, msg2.map_int32_foreign_message)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
def testNestedMessageMapItemDelete(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_all_types[1].optional_nested_message.bb = 1
del msg.map_int32_all_types[1]
msg.map_int32_all_types[2].optional_nested_message.bb = 2
self.assertEqual(1, len(msg.map_int32_all_types))
msg.map_int32_all_types[1].optional_nested_message.bb = 1
self.assertEqual(2, len(msg.map_int32_all_types))
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
keys = [1, 2]
# The loop triggers PyErr_Occurred() in c extension.
for key in keys:
del msg2.map_int32_all_types[key]
def testMapByteSize(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[1] = 1
size = msg.ByteSize()
msg.map_int32_int32[1] = 128
self.assertEqual(msg.ByteSize(), size + 1)
msg.map_int32_foreign_message[19].c = 1
size = msg.ByteSize()
msg.map_int32_foreign_message[19].c = 128
self.assertEqual(msg.ByteSize(), size + 1)
def testMergeFrom(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[12] = 34
msg.map_int32_int32[56] = 78
msg.map_int64_int64[22] = 33
msg.map_int32_foreign_message[111].c = 5
msg.map_int32_foreign_message[222].c = 10
msg2 = map_unittest_pb2.TestMap()
msg2.map_int32_int32[12] = 55
msg2.map_int64_int64[88] = 99
msg2.map_int32_foreign_message[222].c = 15
msg2.map_int32_foreign_message[222].d = 20
old_map_value = msg2.map_int32_foreign_message[222]
msg2.MergeFrom(msg)
self.assertEqual(34, msg2.map_int32_int32[12])
self.assertEqual(78, msg2.map_int32_int32[56])
self.assertEqual(33, msg2.map_int64_int64[22])
self.assertEqual(99, msg2.map_int64_int64[88])
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
self.assertEqual(10, msg2.map_int32_foreign_message[222].c)
self.assertFalse(msg2.map_int32_foreign_message[222].HasField('d'))
if api_implementation.Type() != 'cpp':
# During the call to MergeFrom(), the C++ implementation will have
# deallocated the underlying message, but this is very difficult to detect
# properly. The line below is likely to cause a segmentation fault.
# With the Python implementation, old_map_value is just 'detached' from
# the main message. Using it will not crash of course, but since it still
# have a reference to the parent message I'm sure we can find interesting
# ways to cause inconsistencies.
self.assertEqual(15, old_map_value.c)
# Verify that there is only one entry per key, even though the MergeFrom
# may have internally created multiple entries for a single key in the
# list representation.
as_dict = {}
for key in msg2.map_int32_foreign_message:
self.assertFalse(key in as_dict)
as_dict[key] = msg2.map_int32_foreign_message[key].c
self.assertEqual({111: 5, 222: 10}, as_dict)
# Special case: test that delete of item really removes the item, even if
# there might have physically been duplicate keys due to the previous merge.
# This is only a special case for the C++ implementation which stores the
# map as an array.
del msg2.map_int32_int32[12]
self.assertFalse(12 in msg2.map_int32_int32)
del msg2.map_int32_foreign_message[222]
self.assertFalse(222 in msg2.map_int32_foreign_message)
def testMergeFromBadType(self):
msg = map_unittest_pb2.TestMap()
with self.assertRaisesRegexp(
TypeError,
r'Parameter to MergeFrom\(\) must be instance of same class: expected '
r'.*TestMap got int\.'):
msg.MergeFrom(1)
def testCopyFromBadType(self):
msg = map_unittest_pb2.TestMap()
with self.assertRaisesRegexp(
TypeError,
r'Parameter to [A-Za-z]*From\(\) must be instance of same class: '
r'expected .*TestMap got int\.'):
msg.CopyFrom(1)
def testIntegerMapWithLongs(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[long(-123)] = long(-456)
msg.map_int64_int64[long(-2**33)] = long(-2**34)
msg.map_uint32_uint32[long(123)] = long(456)
msg.map_uint64_uint64[long(2**33)] = long(2**34)
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
def testMapAssignmentCausesPresence(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_int32[123] = 456
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_int32[888] = 999
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_int32.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testMapAssignmentCausesPresenceForSubmessages(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_foreign_message[123].c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_foreign_message[888].c = 7
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message[888].MergeFrom(
msg.test_map.map_int32_foreign_message[123])
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testModifyMapWhileIterating(self):
msg = map_unittest_pb2.TestMap()
string_string_iter = iter(msg.map_string_string)
int32_foreign_iter = iter(msg.map_int32_foreign_message)
msg.map_string_string['abc'] = '123'
msg.map_int32_foreign_message[5].c = 5
with self.assertRaises(RuntimeError):
for key in string_string_iter:
pass
with self.assertRaises(RuntimeError):
for key in int32_foreign_iter:
pass
def testSubmessageMap(self):
msg = map_unittest_pb2.TestMap()
submsg = msg.map_int32_foreign_message[111]
self.assertIs(submsg, msg.map_int32_foreign_message[111])
self.assertIsInstance(submsg, unittest_pb2.ForeignMessage)
submsg.c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
# Doesn't allow direct submessage assignment.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[88] = unittest_pb2.ForeignMessage()
def testMapIteration(self):
msg = map_unittest_pb2.TestMap()
for k, v in msg.map_int32_int32.items():
# Should not be reached.
self.assertTrue(False)
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
self.assertEqual(3, len(msg.map_int32_int32))
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(msg.map_int32_int32.items(), matching_dict)
def testMapItems(self):
# Map items used to have strange behaviors when use c extension. Because
# [] may reorder the map and invalidate any exsting iterators.
# TODO(jieluo): Check if [] reordering the map is a bug or intended
# behavior.
msg = map_unittest_pb2.TestMap()
msg.map_string_string['local_init_op'] = ''
msg.map_string_string['trainable_variables'] = ''
msg.map_string_string['variables'] = ''
msg.map_string_string['init_op'] = ''
msg.map_string_string['summaries'] = ''
items1 = msg.map_string_string.items()
items2 = msg.map_string_string.items()
self.assertEqual(items1, items2)
def testMapDeterministicSerialization(self):
golden_data = (b'r\x0c\n\x07init_op\x12\x01d'
b'r\n\n\x05item1\x12\x01e'
b'r\n\n\x05item2\x12\x01f'
b'r\n\n\x05item3\x12\x01g'
b'r\x0b\n\x05item4\x12\x02QQ'
b'r\x12\n\rlocal_init_op\x12\x01a'
b'r\x0e\n\tsummaries\x12\x01e'
b'r\x18\n\x13trainable_variables\x12\x01b'
b'r\x0e\n\tvariables\x12\x01c')
msg = map_unittest_pb2.TestMap()
msg.map_string_string['local_init_op'] = 'a'
msg.map_string_string['trainable_variables'] = 'b'
msg.map_string_string['variables'] = 'c'
msg.map_string_string['init_op'] = 'd'
msg.map_string_string['summaries'] = 'e'
msg.map_string_string['item1'] = 'e'
msg.map_string_string['item2'] = 'f'
msg.map_string_string['item3'] = 'g'
msg.map_string_string['item4'] = 'QQ'
# If deterministic serialization is not working correctly, this will be
# "flaky" depending on the exact python dict hash seed.
#
# Fortunately, there are enough items in this map that it is extremely
# unlikely to ever hit the "right" in-order combination, so the test
# itself should fail reliably.
self.assertEqual(golden_data, msg.SerializeToString(deterministic=True))
def testMapIterationClearMessage(self):
# Iterator needs to work even if message and map are deleted.
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
it = msg.map_int32_int32.items()
del msg
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(it, matching_dict)
def testMapConstruction(self):
msg = map_unittest_pb2.TestMap(map_int32_int32={1: 2, 3: 4})
self.assertEqual(2, msg.map_int32_int32[1])
self.assertEqual(4, msg.map_int32_int32[3])
msg = map_unittest_pb2.TestMap(
map_int32_foreign_message={3: unittest_pb2.ForeignMessage(c=5)})
self.assertEqual(5, msg.map_int32_foreign_message[3].c)
def testMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_map = msg.map_int32_int32
int32_map[2] = 4
int32_map[3] = 6
int32_map[4] = 8
msg.ClearField('map_int32_int32')
self.assertEqual(b'', msg.SerializeToString())
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(int32_map.items(), matching_dict)
def testMessageMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_foreign_message = msg.map_int32_foreign_message
int32_foreign_message[2].c = 5
msg.ClearField('map_int32_foreign_message')
self.assertEqual(b'', msg.SerializeToString())
self.assertTrue(2 in int32_foreign_message.keys())
def testMapIterInvalidatedByClearField(self):
# Map iterator is invalidated when field is cleared.
# But this case does need to not crash the interpreter.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
it = iter(msg.map_int32_int32)
msg.ClearField('map_int32_int32')
with self.assertRaises(RuntimeError):
for _ in it:
pass
it = iter(msg.map_int32_foreign_message)
msg.ClearField('map_int32_foreign_message')
with self.assertRaises(RuntimeError):
for _ in it:
pass
def testMapDelete(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
msg.map_int32_int32[4] = 6
self.assertEqual(1, len(msg.map_int32_int32))
with self.assertRaises(KeyError):
del msg.map_int32_int32[88]
del msg.map_int32_int32[4]
self.assertEqual(0, len(msg.map_int32_int32))
def testMapsAreMapping(self):
msg = map_unittest_pb2.TestMap()
self.assertIsInstance(msg.map_int32_int32, collections.Mapping)
self.assertIsInstance(msg.map_int32_int32, collections.MutableMapping)
self.assertIsInstance(msg.map_int32_foreign_message, collections.Mapping)
self.assertIsInstance(msg.map_int32_foreign_message,
collections.MutableMapping)
def testMapFindInitializationErrorsSmokeTest(self):
msg = map_unittest_pb2.TestMap()
msg.map_string_string['abc'] = '123'
msg.map_int32_int32[35] = 64
msg.map_string_foreign_message['foo'].c = 5
self.assertEqual(0, len(msg.FindInitializationErrors()))
class ValidTypeNamesTest(BaseTestCase):
def assertImportFromName(self, msg, base_name):
# Parse <type 'module.class_name'> to extra 'some.name' as a string.
tp_name = str(type(msg)).split("'")[1]
valid_names = ('Repeated%sContainer' % base_name,
'Repeated%sFieldContainer' % base_name)
self.assertTrue(any(tp_name.endswith(v) for v in valid_names),
'%r does end with any of %r' % (tp_name, valid_names))
parts = tp_name.split('.')
class_name = parts[-1]
module_name = '.'.join(parts[:-1])
__import__(module_name, fromlist=[class_name])
def testTypeNamesCanBeImported(self):
# If import doesn't work, pickling won't work either.
pb = unittest_pb2.TestAllTypes()
self.assertImportFromName(pb.repeated_int32, 'Scalar')
self.assertImportFromName(pb.repeated_nested_message, 'Composite')
class PackedFieldTest(BaseTestCase):
def setMessage(self, message):
message.repeated_int32.append(1)
message.repeated_int64.append(1)
message.repeated_uint32.append(1)
message.repeated_uint64.append(1)
message.repeated_sint32.append(1)
message.repeated_sint64.append(1)
message.repeated_fixed32.append(1)
message.repeated_fixed64.append(1)
message.repeated_sfixed32.append(1)
message.repeated_sfixed64.append(1)
message.repeated_float.append(1.0)
message.repeated_double.append(1.0)
message.repeated_bool.append(True)
message.repeated_nested_enum.append(1)
def testPackedFields(self):
message = packed_field_test_pb2.TestPackedTypes()
self.setMessage(message)
golden_data = (b'\x0A\x01\x01'
b'\x12\x01\x01'
b'\x1A\x01\x01'
b'\x22\x01\x01'
b'\x2A\x01\x02'
b'\x32\x01\x02'
b'\x3A\x04\x01\x00\x00\x00'
b'\x42\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4A\x04\x01\x00\x00\x00'
b'\x52\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5A\x04\x00\x00\x80\x3f'
b'\x62\x08\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x6A\x01\x01'
b'\x72\x01\x01')
self.assertEqual(golden_data, message.SerializeToString())
def testUnpackedFields(self):
message = packed_field_test_pb2.TestUnpackedTypes()
self.setMessage(message)
golden_data = (b'\x08\x01'
b'\x10\x01'
b'\x18\x01'
b'\x20\x01'
b'\x28\x02'
b'\x30\x02'
b'\x3D\x01\x00\x00\x00'
b'\x41\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4D\x01\x00\x00\x00'
b'\x51\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5D\x00\x00\x80\x3f'
b'\x61\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x68\x01'
b'\x70\x01')
self.assertEqual(golden_data, message.SerializeToString())
@unittest.skipIf(api_implementation.Type() != 'cpp',
'explicit tests of the C++ implementation')
class OversizeProtosTest(BaseTestCase):
@classmethod
def setUpClass(cls):
# At the moment, reference cycles between DescriptorPool and Message classes
# are not detected and these objects are never freed.
# To avoid errors with ReferenceLeakChecker, we create the class only once.
file_desc = """
name: "f/f.msg2"
package: "f"
message_type {
name: "msg1"
field {
name: "payload"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
}
}
message_type {
name: "msg2"
field {
name: "field"
number: 1
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: "msg1"
}
}
"""
pool = descriptor_pool.DescriptorPool()
desc = descriptor_pb2.FileDescriptorProto()
text_format.Parse(file_desc, desc)
pool.Add(desc)
cls.proto_cls = message_factory.MessageFactory(pool).GetPrototype(
pool.FindMessageTypeByName('f.msg2'))
def setUp(self):
self.p = self.proto_cls()
self.p.field.payload = 'c' * (1024 * 1024 * 64 + 1)
self.p_serialized = self.p.SerializeToString()
def testAssertOversizeProto(self):
from google.protobuf.pyext._message import SetAllowOversizeProtos
SetAllowOversizeProtos(False)
q = self.proto_cls()
try:
q.ParseFromString(self.p_serialized)
except message.DecodeError as e:
self.assertEqual(str(e), 'Error parsing message')
def testSucceedOversizeProto(self):
from google.protobuf.pyext._message import SetAllowOversizeProtos
SetAllowOversizeProtos(True)
q = self.proto_cls()
q.ParseFromString(self.p_serialized)
self.assertEqual(self.p.field.payload, q.field.payload)
if __name__ == '__main__':
unittest.main()
|
axsauze/eventsfinder | refs/heads/master | django/contrib/staticfiles/models.py | 12133432 | |
sghai/robottelo | refs/heads/master | tests/foreman/api/test_docker.py | 1 | # -*- encoding: utf-8 -*-
"""Unit tests for the Docker feature.
:Requirement: Docker
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice, randint, shuffle
from time import sleep
from fauxfactory import gen_string, gen_url
from nailgun import entities
from requests.exceptions import HTTPError
from robottelo.api.utils import promote
from robottelo.cleanup import vm_cleanup
from robottelo.config import settings
from robottelo.constants import DOCKER_REGISTRY_HUB
from robottelo.datafactory import (
generate_strings_list,
invalid_docker_upstream_names,
valid_data_list,
valid_docker_repository_names,
valid_docker_upstream_names,
)
from robottelo.decorators import (
bz_bug_is_open,
run_in_one_thread,
run_only_on,
skip_if_bug_open,
skip_if_not_set,
tier1,
tier2,
upgrade
)
from robottelo.test import APITestCase
from robottelo.vm import VirtualMachine
DOCKER_PROVIDER = 'Docker'
def _create_repository(product, name=None, upstream_name=None):
"""Creates a Docker-based repository.
:param product: A ``Product`` object.
:param str name: Name for the repository. If ``None`` then a random
value will be generated.
:param str upstream_name: A valid name of an existing upstream repository.
If ``None`` then defaults to ``busybox``.
:return: A ``Repository`` object.
"""
if name is None:
name = choice(generate_strings_list(15, ['numeric', 'html']))
if upstream_name is None:
upstream_name = u'busybox'
return entities.Repository(
content_type=u'docker',
docker_upstream_name=upstream_name,
name=name,
product=product,
url=DOCKER_REGISTRY_HUB,
).create()
class DockerRepositoryTestCase(APITestCase):
"""Tests specific to performing CRUD methods against ``Docker``
repositories.
"""
@classmethod
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(DockerRepositoryTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
@tier1
@run_only_on('sat')
def test_positive_create_with_name(self):
"""Create one Docker-type repository
:id: 3360aab2-74f3-4f6e-a083-46498ceacad2
:expectedresults: A repository is created with a Docker upstream
repository.
:CaseImportance: Critical
"""
for name in valid_docker_repository_names():
with self.subTest(name):
repo = _create_repository(
entities.Product(organization=self.org).create(),
name,
)
self.assertEqual(repo.name, name)
self.assertEqual(repo.docker_upstream_name, 'busybox')
self.assertEqual(repo.content_type, 'docker')
@tier1
@run_only_on('sat')
def test_positive_create_with_upstream_name(self):
"""Create a Docker-type repository with a valid docker upstream
name
:id: 742a2118-0ab2-4e63-b978-88fe9f52c034
:expectedresults: A repository is created with the specified upstream
name.
:CaseImportance: Critical
"""
for upstream_name in valid_docker_upstream_names():
with self.subTest(upstream_name):
repo = _create_repository(
entities.Product(organization=self.org).create(),
upstream_name=upstream_name,
)
self.assertEqual(repo.docker_upstream_name, upstream_name)
self.assertEqual(repo.content_type, u'docker')
@tier1
@run_only_on('sat')
def test_negative_create_with_invalid_upstream_name(self):
"""Create a Docker-type repository with a invalid docker
upstream name.
:id: 2c5abb4a-e50b-427a-81d2-57eaf8f57a0f
:expectedresults: A repository is not created and a proper error is
raised.
:CaseImportance: Critical
"""
product = entities.Product(organization=self.org).create()
for upstream_name in invalid_docker_upstream_names():
with self.subTest(upstream_name):
with self.assertRaises(HTTPError):
_create_repository(product, upstream_name=upstream_name)
@tier2
@run_only_on('sat')
def test_positive_create_repos_using_same_product(self):
"""Create multiple Docker-type repositories
:id: 4a6929fc-5111-43ff-940c-07a754828630
:expectedresults: Multiple docker repositories are created with a
Docker usptream repository and they all belong to the same product.
:CaseLevel: Integration
"""
product = entities.Product(organization=self.org).create()
for _ in range(randint(2, 5)):
repo = _create_repository(product)
product = product.read()
self.assertIn(repo.id, [repo_.id for repo_ in product.repository])
@tier2
@run_only_on('sat')
def test_positive_create_repos_using_multiple_products(self):
"""Create multiple Docker-type repositories on multiple products
:id: 5a65d20b-d3b5-4bd7-9c8f-19c8af190558
:expectedresults: Multiple docker repositories are created with a
Docker upstream repository and they all belong to their respective
products.
:CaseLevel: Integration
"""
for _ in range(randint(2, 5)):
product = entities.Product(organization=self.org).create()
for _ in range(randint(2, 3)):
repo = _create_repository(product)
product = product.read()
self.assertIn(
repo.id,
[repo_.id for repo_ in product.repository],
)
@tier2
@run_only_on('sat')
def test_positive_sync(self):
"""Create and sync a Docker-type repository
:id: 80fbcd84-1c6f-444f-a44e-7d2738a0cba2
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create()
)
repo.sync()
repo = repo.read()
self.assertGreaterEqual(repo.content_counts['docker_manifest'], 1)
@tier1
@run_only_on('sat')
def test_positive_update_name(self):
"""Create a Docker-type repository and update its name.
:id: 7967e6b5-c206-4ad0-bcf5-64a7ce85233b
:expectedresults: A repository is created with a Docker upstream
repository and that its name can be updated.
:CaseImportance: Critical
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
# Update the repository name to random value
for new_name in valid_docker_repository_names():
with self.subTest(new_name):
repo.name = new_name
repo = repo.update()
self.assertEqual(repo.name, new_name)
@tier1
@run_only_on('sat')
def test_positive_update_upstream_name(self):
"""Create a Docker-type repository and update its upstream name.
:id: 4e2fb78d-0b6a-4455-8869-8eaf9d4a61b0
:expectedresults: A repository is created with a Docker upstream
repository and that its upstream name can be updated.
:CaseImportance: Critical
"""
new_upstream_name = u'fedora/ssh'
repo = _create_repository(
entities.Product(organization=self.org).create())
self.assertNotEqual(repo.docker_upstream_name, new_upstream_name)
# Update the repository upstream name
repo.docker_upstream_name = new_upstream_name
repo = repo.update()
self.assertEqual(repo.docker_upstream_name, new_upstream_name)
@tier2
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1489322)
def test_positive_update_url(self):
"""Create a Docker-type repository and update its URL.
:id: 6a588e65-bf1d-4ca9-82ce-591f9070215f
:expectedresults: A repository is created with a Docker upstream
repository and that its URL can be updated.
:BZ: 1489322
:CaseLevel: Integration
"""
new_url = gen_url()
repo = _create_repository(
entities.Product(organization=self.org).create())
self.assertEqual(repo.url, DOCKER_REGISTRY_HUB)
# Update the repository URL
repo.url = new_url
repo = repo.update()
self.assertEqual(repo.url, new_url)
self.assertNotEqual(repo.url, DOCKER_REGISTRY_HUB)
@tier1
@run_only_on('sat')
def test_positive_delete(self):
"""Create and delete a Docker-type repository
:id: 92df93cb-9de2-40fa-8451-b8c1ba8f45be
:expectedresults: A repository is created with a Docker upstream
repository and then deleted.
:CaseImportance: Critical
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
# Delete it
repo.delete()
with self.assertRaises(HTTPError):
repo.read()
@tier2
@run_only_on('sat')
def test_positive_delete_random_repo(self):
"""Create Docker-type repositories on multiple products and
delete a random repository from a random product.
:id: cbc2792d-cf81-41f7-8889-001a27e4dd66
:expectedresults: Random repository can be deleted from random product
without altering the other products.
:CaseLevel: Integration
"""
repos = []
products = [
entities.Product(organization=self.org).create()
for _
in range(randint(2, 5))
]
for product in products:
repo = _create_repository(product)
self.assertEqual(repo.content_type, u'docker')
repos.append(repo)
# Delete a random repository
shuffle(repos)
repo = repos.pop()
repo.delete()
with self.assertRaises(HTTPError):
repo.read()
# Check if others repositories are not touched
for repo in repos:
repo = repo.read()
self.assertIn(repo.product.id, [prod.id for prod in products])
class DockerContentViewTestCase(APITestCase):
"""Tests specific to using ``Docker`` repositories with Content Views."""
@classmethod
def setUpClass(cls):
"""Create an organization which can be re-used in tests."""
super(DockerContentViewTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
@tier2
@run_only_on('sat')
def test_positive_add_docker_repo(self):
"""Add one Docker-type repository to a non-composite content view
:id: a065822f-bb41-4fc9-bf5c-65814ca11b2d
:expectedresults: A repository is created with a Docker repository and
the product is added to a non-composite content view
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
# Create content view and associate docker repo
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(repo.id, [repo_.id for repo_ in content_view.repository])
@tier2
@run_only_on('sat')
def test_positive_add_docker_repos(self):
"""Add multiple Docker-type repositories to a
non-composite content view.
:id: 08eed081-2003-4475-95ac-553a56b83997
:expectedresults: Repositories are created with Docker upstream repos
and the product is added to a non-composite content view.
:CaseLevel: Integration
"""
product = entities.Product(organization=self.org).create()
repos = [
_create_repository(product, name=gen_string('alpha'))
for _
in range(randint(2, 5))
]
self.assertEqual(len(product.read().repository), len(repos))
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = repos
content_view = content_view.update(['repository'])
self.assertEqual(len(content_view.repository), len(repos))
content_view.repository = [
repo.read() for repo in content_view.repository
]
self.assertEqual(
{repo.id for repo in repos},
{repo.id for repo in content_view.repository}
)
for repo in repos + content_view.repository:
self.assertEqual(repo.content_type, u'docker')
self.assertEqual(repo.docker_upstream_name, u'busybox')
@tier2
@run_only_on('sat')
def test_positive_add_synced_docker_repo(self):
"""Create and sync a Docker-type repository
:id: 3c7d6f17-266e-43d3-99f8-13bf0251eca6
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
repo.sync()
repo = repo.read()
self.assertGreaterEqual(repo.content_counts['docker_manifest'], 1)
# Create content view and associate docker repo
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(repo.id, [repo_.id for repo_ in content_view.repository])
@tier2
@run_only_on('sat')
def test_positive_add_docker_repo_to_ccv(self):
"""Add one Docker-type repository to a composite content view
:id: fe278275-2bb2-4d68-8624-f0cfd63ecb57
:expectedresults: A repository is created with a Docker repository and
the product is added to a content view which is then added to a
composite content view.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
# Create content view and associate docker repo
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(repo.id, [repo_.id for repo_ in content_view.repository])
# Publish it and grab its version ID (there should only be one version)
content_view.publish()
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
# Create composite content view and associate content view to it
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = content_view.version
comp_content_view = comp_content_view.update(['component'])
self.assertIn(
content_view.version[0].id,
[component.id for component in comp_content_view.component]
)
@tier2
@run_only_on('sat')
def test_positive_add_docker_repos_to_ccv(self):
"""Add multiple Docker-type repositories to a composite
content view.
:id: 3824ccae-fb59-4f63-a1ab-a4f2419fcadd
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a random number of content
views which are then added to a composite content view.
:CaseLevel: Integration
"""
cv_versions = []
product = entities.Product(organization=self.org).create()
for _ in range(randint(2, 5)):
# Create content view and associate docker repo
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
repo = _create_repository(product)
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(
repo.id,
[repo_.id for repo_ in content_view.repository]
)
# Publish it and grab its version ID (there should be one version)
content_view.publish()
content_view = content_view.read()
cv_versions.append(content_view.version[0])
# Create composite content view and associate content view to it
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
for cv_version in cv_versions:
comp_content_view.component.append(cv_version)
comp_content_view = comp_content_view.update(['component'])
self.assertIn(
cv_version.id,
[component.id for component in comp_content_view.component]
)
@tier2
@run_only_on('sat')
def test_positive_publish_with_docker_repo(self):
"""Add Docker-type repository to content view and publish it once.
:id: 86a73e96-ead6-41fb-8095-154a0b83e344
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published only once.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(repo.id, [repo_.id for repo_ in content_view.repository])
# Not published yet?
content_view = content_view.read()
self.assertIsNone(content_view.last_published)
self.assertEqual(float(content_view.next_version), 1.0)
# Publish it and check that it was indeed published.
content_view.publish()
content_view = content_view.read()
self.assertIsNotNone(content_view.last_published)
self.assertGreater(float(content_view.next_version), 1.0)
@tier2
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1217635)
def test_positive_publish_with_docker_repo_composite(self):
"""Add Docker-type repository to composite content view and
publish it once.
:id: 103ebee0-1978-4fc5-a11e-4dcdbf704185
:expectedresults: One repository is created with an upstream repository
and the product is added to a content view which is then published
only once and then added to a composite content view which is also
published only once.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertIn(repo.id, [repo_.id for repo_ in content_view.repository])
# Not published yet?
content_view = content_view.read()
self.assertIsNone(content_view.last_published)
self.assertEqual(float(content_view.next_version), 1.0)
# Publish it and check that it was indeed published.
content_view.publish()
content_view = content_view.read()
self.assertIsNotNone(content_view.last_published)
self.assertGreater(float(content_view.next_version), 1.0)
# Create composite content view…
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [content_view.version[0]]
comp_content_view = comp_content_view.update(['component'])
self.assertIn(
content_view.version[0].id, # pylint:disable=no-member
[component.id for component in comp_content_view.component]
)
# … publish it…
comp_content_view.publish()
# … and check that it was indeed published
comp_content_view = comp_content_view.read()
self.assertIsNotNone(comp_content_view.last_published)
self.assertGreater(float(comp_content_view.next_version), 1.0)
@tier2
@run_only_on('sat')
def test_positive_publish_multiple_with_docker_repo(self):
"""Add Docker-type repository to content view and publish it
multiple times.
:id: e2caad64-e9f4-422d-a1ab-f64c286d82ff
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published multiple times.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
self.assertIsNone(content_view.read().last_published)
publish_amount = randint(2, 5)
for _ in range(publish_amount):
content_view.publish()
content_view = content_view.read()
self.assertIsNotNone(content_view.last_published)
self.assertEqual(len(content_view.version), publish_amount)
@tier2
@run_only_on('sat')
def test_positive_publish_multiple_with_docker_repo_composite(self):
"""Add Docker-type repository to content view and publish it
multiple times.
:id: 77a5957a-7415-41c3-be68-fa706fee7c98
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
added to a composite content view which is then published multiple
times.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
self.assertIsNone(content_view.read().last_published)
content_view.publish()
content_view = content_view.read()
self.assertIsNotNone(content_view.last_published)
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [content_view.version[0]]
comp_content_view = comp_content_view.update(['component'])
self.assertEqual(
[content_view.version[0].id],
[comp.id for comp in comp_content_view.component],
)
self.assertIsNone(comp_content_view.last_published)
publish_amount = randint(2, 5)
for _ in range(publish_amount):
comp_content_view.publish()
comp_content_view = comp_content_view.read()
self.assertIsNotNone(comp_content_view.last_published)
self.assertEqual(len(comp_content_view.version), publish_amount)
@tier2
@run_only_on('sat')
def test_positive_promote_with_docker_repo(self):
"""Add Docker-type repository to content view and publish it.
Then promote it to the next available lifecycle-environment.
:id: 5ab7d7f1-fb13-4b83-b228-a6293be36195
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
:CaseLevel: Integration
"""
lce = entities.LifecycleEnvironment(organization=self.org).create()
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
content_view.publish()
content_view = content_view.read()
cvv = content_view.version[0].read()
self.assertEqual(len(cvv.environment), 1)
promote(cvv, lce.id)
self.assertEqual(len(cvv.read().environment), 2)
@tier2
@run_only_on('sat')
def test_positive_promote_multiple_with_docker_repo(self):
"""Add Docker-type repository to content view and publish it.
Then promote it to multiple available lifecycle-environments.
:id: 7b0cbc95-5f63-47f3-9048-e6917078be73
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
content_view.publish()
cvv = content_view.read().version[0]
self.assertEqual(len(cvv.read().environment), 1)
for i in range(1, randint(3, 6)):
lce = entities.LifecycleEnvironment(organization=self.org).create()
promote(cvv, lce.id)
self.assertEqual(len(cvv.read().environment), i+1)
@tier2
@run_only_on('sat')
def test_positive_promote_with_docker_repo_composite(self):
"""Add Docker-type repository to content view and publish it.
Then add that content view to composite one. Publish and promote that
composite content view to the next available lifecycle-environment.
:id: e903c7b2-7722-4a9e-bb69-99bbd3c23946
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
:CaseLevel: Integration
"""
lce = entities.LifecycleEnvironment(organization=self.org).create()
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
content_view.publish()
cvv = content_view.read().version[0].read()
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [cvv]
comp_content_view = comp_content_view.update(['component'])
self.assertEqual(cvv.id, comp_content_view.component[0].id)
comp_content_view.publish()
comp_cvv = comp_content_view.read().version[0]
self.assertEqual(len(comp_cvv.read().environment), 1)
promote(comp_cvv, lce.id)
self.assertEqual(len(comp_cvv.read().environment), 2)
@upgrade
@tier2
@run_only_on('sat')
def test_positive_promote_multiple_with_docker_repo_composite(self):
"""Add Docker-type repository to content view and publish it.
Then add that content view to composite one. Publish and promote that
composite content view to the multiple available lifecycle-environments
:id: 91ac0f4a-8974-47e2-a1d6-7d734aa4ad46
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
:CaseLevel: Integration
"""
repo = _create_repository(
entities.Product(organization=self.org).create())
content_view = entities.ContentView(
composite=False,
organization=self.org,
).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
self.assertEqual(
[repo.id], [repo_.id for repo_ in content_view.repository])
content_view.publish()
cvv = content_view.read().version[0].read()
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [cvv]
comp_content_view = comp_content_view.update(['component'])
self.assertEqual(cvv.id, comp_content_view.component[0].id)
comp_content_view.publish()
comp_cvv = comp_content_view.read().version[0]
self.assertEqual(len(comp_cvv.read().environment), 1)
for i in range(1, randint(3, 6)):
lce = entities.LifecycleEnvironment(organization=self.org).create()
promote(comp_cvv, lce.id)
self.assertEqual(len(comp_cvv.read().environment), i + 1)
class DockerActivationKeyTestCase(APITestCase):
"""Tests specific to adding ``Docker`` repositories to Activation Keys."""
@classmethod
def setUpClass(cls):
"""Create necessary objects which can be re-used in tests."""
super(DockerActivationKeyTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
cls.lce = entities.LifecycleEnvironment(organization=cls.org).create()
cls.repo = _create_repository(
entities.Product(organization=cls.org).create())
content_view = entities.ContentView(
composite=False,
organization=cls.org,
).create()
content_view.repository = [cls.repo]
cls.content_view = content_view.update(['repository'])
cls.content_view.publish()
cls.cvv = content_view.read().version[0].read()
promote(cls.cvv, cls.lce.id)
@tier2
@run_only_on('sat')
def test_positive_add_docker_repo_cv(self):
"""Add Docker-type repository to a non-composite content view
and publish it. Then create an activation key and associate it with the
Docker content view.
:id: ce4ae928-49c7-4782-a032-08885050dd83
:expectedresults: Docker-based content view can be added to activation
key
:CaseLevel: Integration
"""
ak = entities.ActivationKey(
content_view=self.content_view,
environment=self.lce,
organization=self.org,
).create()
self.assertEqual(ak.content_view.id, self.content_view.id)
self.assertEqual(ak.content_view.read().repository[0].id, self.repo.id)
@tier2
@run_only_on('sat')
def test_positive_remove_docker_repo_cv(self):
"""Add Docker-type repository to a non-composite content view
and publish it. Create an activation key and associate it with the
Docker content view. Then remove this content view from the activation
key.
:id: 6a887a67-6700-47ac-9230-deaa0e382f22
:expectedresults: Docker-based content view can be added and then
removed from the activation key.
:CaseLevel: Integration
"""
ak = entities.ActivationKey(
content_view=self.content_view,
environment=self.lce,
organization=self.org,
).create()
self.assertEqual(ak.content_view.id, self.content_view.id)
ak.content_view = None
self.assertIsNone(ak.update(['content_view']).content_view)
@tier2
@run_only_on('sat')
def test_positive_add_docker_repo_ccv(self):
"""Add Docker-type repository to a non-composite content view and
publish it. Then add this content view to a composite content view and
publish it. Create an activation key and associate it with the
composite Docker content view.
:id: 2fc8a462-9d91-48bc-8e32-7ff8f769b9e4
:expectedresults: Docker-based content view can be added to activation
key
:CaseLevel: Integration
"""
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [self.cvv]
comp_content_view = comp_content_view.update(['component'])
self.assertEqual(self.cvv.id, comp_content_view.component[0].id)
comp_content_view.publish()
comp_cvv = comp_content_view.read().version[0].read()
promote(comp_cvv, self.lce.id)
ak = entities.ActivationKey(
content_view=comp_content_view,
environment=self.lce,
organization=self.org,
).create()
self.assertEqual(ak.content_view.id, comp_content_view.id)
@tier2
@run_only_on('sat')
def test_positive_remove_docker_repo_ccv(self):
"""Add Docker-type repository to a non-composite content view
and publish it. Then add this content view to a composite content view
and publish it. Create an activation key and associate it with the
composite Docker content view. Then, remove the composite content view
from the activation key.
:id: f3542272-13db-4a49-bc27-d1137172df41
:expectedresults: Docker-based composite content view can be added and
then removed from the activation key.
:CaseLevel: Integration
"""
comp_content_view = entities.ContentView(
composite=True,
organization=self.org,
).create()
comp_content_view.component = [self.cvv]
comp_content_view = comp_content_view.update(['component'])
self.assertEqual(self.cvv.id, comp_content_view.component[0].id)
comp_content_view.publish()
comp_cvv = comp_content_view.read().version[0].read()
promote(comp_cvv, self.lce.id)
ak = entities.ActivationKey(
content_view=comp_content_view,
environment=self.lce,
organization=self.org,
).create()
self.assertEqual(ak.content_view.id, comp_content_view.id)
ak.content_view = None
self.assertIsNone(ak.update(['content_view']).content_view)
class DockerComputeResourceTestCase(APITestCase):
"""Tests specific to managing Docker-based Compute Resources."""
@classmethod
@skip_if_not_set('docker')
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(DockerComputeResourceTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
@tier2
@run_only_on('sat')
def test_positive_create_internal(self):
"""Create a Docker-based Compute Resource in the Satellite 6
instance.
:id: 146dd836-83c7-4f9c-937e-791162ea106e
:expectedresults: Compute Resource can be created and listed.
:CaseLevel: Integration
"""
for name in valid_data_list():
with self.subTest(name):
compute_resource = entities.DockerComputeResource(
name=name,
url=settings.docker.get_unix_socket_url(),
).create()
self.assertEqual(compute_resource.name, name)
self.assertEqual(compute_resource.provider, DOCKER_PROVIDER)
self.assertEqual(
compute_resource.url,
settings.docker.get_unix_socket_url()
)
@tier2
@run_only_on('sat')
def test_positive_update_internal(self):
"""Create a Docker-based Compute Resource in the Satellite 6
instance then edit its attributes.
:id: 5590621f-063c-4e32-80cb-ebe634dbadaa
:expectedresults: Compute Resource can be created, listed and its
attributes can be updated.
:CaseLevel: Integration
"""
for url in (settings.docker.external_url,
settings.docker.get_unix_socket_url()):
with self.subTest(url):
compute_resource = entities.DockerComputeResource(
organization=[self.org],
url=url,
).create()
self.assertEqual(compute_resource.url, url)
compute_resource.url = gen_url()
self.assertEqual(
compute_resource.url,
compute_resource.update(['url']).url,
)
@skip_if_bug_open('bugzilla', 1466240)
@skip_if_bug_open('bugzilla', 1478966)
@tier2
@run_only_on('sat')
def test_positive_list_containers(self):
"""Create a Docker-based Compute Resource in the Satellite 6
instance then list its running containers.
:id: 96bfba71-03e5-4d80-bd27-fc5db8e00b50
:expectedresults: Compute Resource can be created and existing
instances can be listed.
:CaseLevel: Integration
"""
# Instantiate and setup a docker host VM + compute resource
docker_image = settings.docker.docker_image
with VirtualMachine(
source_image=docker_image,
tag=u'docker'
) as docker_host:
docker_host.create()
docker_host.install_katello_ca()
url = 'http://{0}:2375'.format(docker_host.ip_addr)
compute_resource = entities.DockerComputeResource(
organization=[self.org],
url=url,
).create()
self.assertEqual(compute_resource.url, url)
self.assertEqual(len(entities.AbstractDockerContainer(
compute_resource=compute_resource).search()), 0)
container = entities.DockerHubContainer(
command='top',
compute_resource=compute_resource,
organization=[self.org],
).create()
result = entities.AbstractDockerContainer(
compute_resource=compute_resource).search()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, container.name)
@tier2
@run_only_on('sat')
def test_positive_create_external(self):
"""Create a Docker-based Compute Resource using an external
Docker-enabled system.
:id: 91ae6374-82de-424e-aa4c-e19209acd5b5
:expectedresults: Compute Resource can be created and listed.
:CaseLevel: Integration
"""
for name in valid_data_list():
with self.subTest(name):
compute_resource = entities.DockerComputeResource(
name=name,
url=settings.docker.external_url,
).create()
self.assertEqual(compute_resource.name, name)
self.assertEqual(compute_resource.provider, DOCKER_PROVIDER)
self.assertEqual(
compute_resource.url, settings.docker.external_url)
@tier1
@run_only_on('sat')
def test_positive_delete(self):
"""Create a Docker-based Compute Resource then delete it.
:id: f1f23c1e-6481-46b5-9485-787ae18d9ed5
:expectedresults: Compute Resource can be created, listed and deleted.
:CaseImportance: Critical
"""
for url in (settings.docker.external_url,
settings.docker.get_unix_socket_url()):
with self.subTest(url):
resource = entities.DockerComputeResource(url=url).create()
self.assertEqual(resource.url, url)
self.assertEqual(resource.provider, DOCKER_PROVIDER)
resource.delete()
with self.assertRaises(HTTPError):
resource.read()
class DockerContainerTestCase(APITestCase):
"""Tests specific to using ``Containers`` in an external Docker
Compute Resource
"""
@classmethod
@skip_if_not_set('docker')
@skip_if_bug_open('bugzilla', 1478966)
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(DockerContainerTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
def setUp(self):
"""Instantiate and setup a docker host VM + compute resource"""
docker_image = settings.docker.docker_image
self.docker_host = VirtualMachine(
source_image=docker_image,
tag=u'docker'
)
self.addCleanup(vm_cleanup, self.docker_host)
self.docker_host.create()
self.docker_host.install_katello_ca()
self.compute_resource = entities.DockerComputeResource(
name=gen_string('alpha'),
organization=[self.org],
url='http://{0}:2375'.format(self.docker_host.ip_addr),
).create()
@tier2
@run_only_on('sat')
def test_positive_create_with_compresource(self):
"""Create containers for docker compute resources
:id: c57c261c-39cf-4a71-93a4-e01e3ec368a7
:expectedresults: The docker container is created
:CaseLevel: Integration
"""
container = entities.DockerHubContainer(
command='top',
compute_resource=self.compute_resource,
organization=[self.org],
).create()
self.assertEqual(
container.compute_resource.read().name,
self.compute_resource.name,
)
@upgrade
@tier2
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1282431)
@skip_if_bug_open('bugzilla', 1347658)
def test_positive_create_using_cv(self):
"""Create docker container using custom content view, lifecycle
environment and docker repository for docker compute resource
:id: 69f29cc8-45e0-4b3a-b001-2842c45617e0
:expectedresults: The docker container is created
:CaseLevel: Integration
"""
lce = entities.LifecycleEnvironment(organization=self.org).create()
repo = _create_repository(
entities.Product(organization=self.org).create(),
upstream_name='centos',
)
repo.sync()
content_view = entities.ContentView(organization=self.org).create()
content_view.repository = [repo]
content_view = content_view.update(['repository'])
content_view.publish()
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
cvv = content_view.read().version[0].read()
promote(cvv, lce.id)
# publishing takes few seconds sometimes
retries = 10 if bz_bug_is_open(1452149) else 1
for i in range(retries):
try:
container = entities.DockerHubContainer(
command='top',
compute_resource=self.compute_resource,
organization=[self.org],
repository_name=repo.container_repository_name,
tag='latest',
tty='yes',
).create()
except HTTPError:
if i == retries - 1:
raise
else:
sleep(2)
pass
self.assertEqual(
container.compute_resource.read().name,
self.compute_resource.name
)
self.assertEqual(
container.repository_name,
repo.container_repository_name
)
self.assertEqual(container.tag, 'latest')
@tier2
@run_only_on('sat')
def test_positive_power_on_off(self):
"""Create containers for docker compute resource,
then power them on and finally power them off
:id: 6271afcf-698b-47e2-af80-1ce38c111742
:expectedresults: The docker container is created and the power status
is showing properly
:CaseLevel: Integration
"""
container = entities.DockerHubContainer(
command='top',
compute_resource=self.compute_resource,
organization=[self.org],
).create()
self.assertEqual(
container.compute_resource.read().url,
self.compute_resource.url,
)
self.assertTrue(container.power(
data={u'power_action': 'status'})['running'])
container.power(data={u'power_action': 'stop'})
self.assertFalse(container.power(
data={u'power_action': 'status'})['running'])
@tier2
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1479291)
def test_positive_read_container_log(self):
"""Create containers for docker compute resource and read their logs
:id: ffeb3c57-c7dc-4cee-a087-b52daedd4485
:expectedresults: The docker container is created and its log can be
read
:CaseLevel: Integration
"""
container = entities.DockerHubContainer(
command='date',
compute_resource=self.compute_resource,
organization=[self.org],
).create()
self.assertTrue(container.logs()['logs'])
@upgrade
@run_in_one_thread
@run_only_on('sat')
@tier2
def test_positive_create_with_external_registry(self):
"""Create a container pulling an image from a custom external
registry
:id: 04506604-637f-473b-a764-825c61067b1b
:expectedresults: The docker container is created and the image is
pulled from the external registry
:CaseLevel: Integration
"""
repo_name = 'rhel'
registry = entities.Registry(
url=settings.docker.external_registry_1).create()
try:
container = entities.DockerRegistryContainer(
compute_resource=self.compute_resource,
organization=[self.org],
registry=registry,
repository_name=repo_name,
).create()
self.assertEqual(container.registry.id, registry.id)
self.assertEqual(container.repository_name, repo_name)
finally:
registry.delete()
@tier1
@run_only_on('sat')
def test_positive_delete(self):
"""Delete containers using docker compute resource
:id: 12efdf50-9494-48c3-a181-01c495b48c19
:expectedresults: The docker containers are deleted
:CaseImportance: Critical
"""
container = entities.DockerHubContainer(
command='top',
compute_resource=self.compute_resource,
organization=[self.org],
).create()
container.delete()
with self.assertRaises(HTTPError):
container.read()
@skip_if_bug_open('bugzilla', 1414821)
class DockerUnixSocketContainerTestCase(APITestCase):
"""Tests specific to using ``Containers`` in local unix-socket
Docker Compute Resource
"""
@classmethod
@skip_if_not_set('docker')
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(DockerUnixSocketContainerTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
cls.compute_resource = entities.DockerComputeResource(
name=gen_string('alpha'),
organization=[cls.org],
url=settings.docker.get_unix_socket_url(),
).create()
@tier2
@run_only_on('sat')
def test_positive_create_with_compresource(self):
"""Create containers for docker compute resources
:id: 91a8a159-0f00-44b6-8ab7-dc8b1a5f1f37
:expectedresults: The docker container is created
:CaseLevel: Integration
"""
container = entities.DockerHubContainer(
command='top',
compute_resource=self.compute_resource,
organization=[self.org],
).create()
self.assertEqual(
container.compute_resource.read().name,
self.compute_resource.name,
)
@run_in_one_thread
class DockerRegistryTestCase(APITestCase):
"""Tests specific to performing CRUD methods against ``Registries``
repositories.
"""
@classmethod
@skip_if_not_set('docker')
def setUpClass(cls):
"""Skip the tests if docker section is not set in properties file and
set external docker registry url which can be re-used in tests.
"""
super(DockerRegistryTestCase, cls).setUpClass()
cls.url = settings.docker.external_registry_1
@tier1
@run_only_on('sat')
def test_positive_create_with_name(self):
"""Create an external docker registry
:id: 8212ab15-8298-4a46-88ba-eaf71069e068
:expectedresults: External registry is created successfully
:CaseImportance: Critical
"""
for name in valid_data_list():
with self.subTest(name):
description = gen_string('alphanumeric')
registry = entities.Registry(
description=description,
name=name,
url=self.url,
).create()
try:
self.assertEqual(registry.name, name)
self.assertEqual(registry.url, self.url)
self.assertEqual(registry.description, description)
finally:
registry.delete()
@tier1
@run_only_on('sat')
def test_positive_update_name(self):
"""Create an external docker registry and update its name
:id: fdd9c76b-43a7-4ece-8975-3b08241134c8
:expectedresults: the external registry is updated with the new name
:CaseImportance: Critical
"""
registry = entities.Registry(
name=gen_string('alpha'), url=self.url).create()
try:
for new_name in valid_data_list():
with self.subTest(new_name):
registry.name = new_name
registry = registry.update()
self.assertEqual(registry.name, new_name)
finally:
registry.delete()
@tier2
@run_only_on('sat')
def test_positive_update_url(self):
"""Create an external docker registry and update its URL
:id: a3701f92-0846-4d1b-b691-48cdc85c1341
:expectedresults: the external registry is updated with the new URL
:CaseLevel: Integration
"""
new_url = settings.docker.external_registry_2
registry = entities.Registry(url=self.url).create()
try:
self.assertEqual(registry.url, self.url)
registry.url = new_url
registry = registry.update()
self.assertEqual(registry.url, new_url)
finally:
registry.delete()
@tier2
@run_only_on('sat')
def test_positive_update_description(self):
"""Create an external docker registry and update its description
:id: 7eb08208-8b45-444f-b365-2d6f6e417533
:expectedresults: the external registry is updated with the new
description
:CaseLevel: Integration
"""
registry = entities.Registry(url=self.url).create()
try:
for new_desc in valid_data_list():
with self.subTest(new_desc):
registry.description = new_desc
registry = registry.update()
self.assertEqual(registry.description, new_desc)
finally:
registry.delete()
@tier2
@run_only_on('sat')
def test_positive_update_username(self):
"""Create an external docker registry and update its username
:id: 7da17c30-4582-4e27-a080-e446e6eec176
:expectedresults: the external registry is updated with the new
username
:CaseLevel: Integration
"""
username = gen_string('alpha')
new_username = gen_string('alpha')
registry = entities.Registry(
username=username,
password=gen_string('alpha'),
url=self.url,
).create()
try:
self.assertEqual(registry.username, username)
registry.username = new_username
registry = registry.update()
self.assertEqual(registry.username, new_username)
finally:
registry.delete()
@upgrade
@tier1
@run_only_on('sat')
def test_positive_delete(self):
"""Create an external docker registry and then delete it
:id: 1a215237-91b5-4fcc-8c18-a9944068ac88
:expectedresults: The external registry is deleted successfully
:CaseImportance: Critical
"""
registry = entities.Registry(url=self.url).create()
registry.delete()
with self.assertRaises(HTTPError):
registry.read()
|
valandil/complex_bessel | refs/heads/master | tests/hankelcontours.py | 2 | # Python script to generate the contour plot
# seen in Abramowitz & Stegun's book on p. 359.
# The values are imported from the file "contours.dat"
#
# The pylab module is required for this script to run
#
# Joey Dumont <joey.dumont@gmail.com>
# Denis Gagnon <gagnon88@gmail.com>
#
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True
plt.rcParams['font.size'] = 10
plt.rcParams['legend.numpoints'] = 3
Data = np.loadtxt('contours.dat')
x = Data[:,0]
y = Data[:,1]
M = np.sqrt(Data[:,2]**2 + Data[:,3]**2)
Z = Data[:,2]+np.complex(0,1)*Data[:,3]
phi=(180/np.pi)*np.abs(np.arctan2(Data[:,3],Data[:,2]))
Dimension = np.sqrt(M.size)
X=np.linspace(x.min(),x.max(), Dimension)
Y=np.linspace(y.min(),y.max(), Dimension)
Xg,Yg=np.meshgrid(X,Y)
M0=np.reshape(M,[Dimension, Dimension])
phi0=np.reshape(phi,[Dimension, Dimension])
contourM=np.linspace(0.2,3.2,16)
contourP=np.linspace(0,360,15)
plt.figure(figsize=(7,5))
plt.contour(Xg,Yg,M0,contourM)
CS = plt.contour(Xg,Yg,phi0,contourP,colors='k',linestyles='dashdot')
Xcut=[-4.0,0]
Ycut=[0,0]
plt.plot(Xcut,Ycut,lw=2.5,color='k')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Contour lines of the modulus and phase of $H_0^{(1)}(x+iy)$ \n (reproduced from Abramowitz \& Stegun, p.359)')
plt.savefig('contours.png')
|
LUTAN/tensorflow | refs/heads/master | tensorflow/contrib/session_bundle/gc.py | 47 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
|
grayjay/aenea | refs/heads/master | client/aenea/communications.py | 4 | # This file is part of Aenea
#
# Aenea is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# Aenea is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Aenea. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (2014) Alex Roper
# Alex Roper <alex@aroper.net>
import httplib
import jsonrpclib
import socket
import time
import aenea.config
import aenea.configuration
_server_config = aenea.configuration.ConfigWatcher(
'server_state',
{'host': aenea.config.DEFAULT_SERVER_ADDRESS[0],
'port': aenea.config.DEFAULT_SERVER_ADDRESS[1]})
_server_config.write()
def set_server_address(address):
'''address is (host, port).'''
_server_config.refresh()
_server_config['host'], _server_config['port'] = address
_server_config.write()
class _ImpatientTransport(jsonrpclib.jsonrpc.Transport):
'''Transport for jsonrpclib that supports a timeout.'''
def __init__(self, timeout=None):
self._timeout = timeout
jsonrpclib.jsonrpc.Transport.__init__(self)
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if (hasattr(self, '_connection') and
self._connection is not None and
host == self._connection[0]):
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
#store the host argument along with the connection object
if self._timeout is None:
self._connection = host, httplib.HTTPConnection(chost)
else:
self._connection = host, httplib.HTTPConnection(
chost,
timeout=self._timeout
)
return self._connection[1]
class Proxy(object):
def __init__(self):
self._address = None
self.last_connect_good = False
self._last_failed_connect = 0
self._transport = _ImpatientTransport(aenea.config.COMMAND_TIMEOUT)
def _execute_batch(self, batch, use_multiple_actions=False):
self._refresh_server()
if self._address is None:
return
if time.time() - self._last_failed_connect > aenea.config.CONNECT_RETRY_COOLDOWN:
try:
if not self.last_connect_good:
socket.create_connection(self._address, aenea.config.CONNECT_TIMEOUT)
self.last_connect_good = True
if len(batch) == 1:
return (getattr(
self._server,
batch[0][0])(*batch[0][1], **batch[0][2])
)
elif use_multiple_actions:
self._server.multiple_actions(batch)
else:
for (command, args, kwargs) in batch:
getattr(self._server, command)(*args, **kwargs)
except socket.error:
self._last_failed_connect = time.time()
self.last_connect_good = False
print 'Socket error connecting to aenea server. To avoid slowing dictation, we won\'t try again for %i seconds.' % aenea.config.CONNECT_RETRY_COOLDOWN
def execute_batch(self, batch):
self._execute_batch(batch, aenea.config.USE_MULTIPLE_ACTIONS)
def __getattr__(self, meth):
def call(*a, **kw):
return self._execute_batch([(meth, a, kw)])
return call
def _refresh_server(self):
_server_config.refresh()
address = _server_config.conf['host'], _server_config.conf['port']
if self._address != address:
self.last_connect_good = False
self._address = address
self._server = jsonrpclib.Server(
'http://%s:%i' % address,
transport=self._transport
)
self._last_failed_connect = 0
class BatchProxy(object):
def __init__(self):
self._commands = []
def __getattr__(self, key):
def call(*a, **kw):
if not key.startswith('_'):
self._commands.append((key, a, kw))
return call
server = Proxy()
|
DiogoGCosta/europe-travel | refs/heads/master | travel.py | 1 | #!/usr/bin/env python
#coding: utf-8
'''cidade de partida lisboa'''
'''o indice da cidade de partida (começando em 0) , neste caso é 7, pois é a 8ª cidade a aparecer no .csv'''
start_city = 7
'''cidade destino amsterdao'''
'''o indice da cidade destino, neste caso é 0, pois é o primeiro que aparece no .csv'''
dest_city = 0
cities = []
current_solution = []
best = 0
size = 0
bVisited = 0
'''função recursiva'''
def func(current_cost,current_city,visited):
global cities, current_solution,best,size,dest_city,start_city,bVisited
tempVisited = bVisited
if( bVisited in cities[current_city][3]):
if(current_cost + cities[current_city][3][bVisited] < best):
best = current_cost + cities[current_city][3][bVisited]
return cities[current_city][3][bVisited]
if(visited >= size):
'''caminho melhor até ao momento'''
if(current_cost < best):
best = current_cost
return 0
bVisited = (1<<current_city) | bVisited
for city in cities[current_city][1]:
if(((bVisited >> city[0]) & 1) == 0):
if((city[0] == dest_city and visited+1 < size)):
continue
temp = func(current_cost+city[1],city[0],visited+1)
if( (tempVisited in cities[current_city][3]) ):
if cities[current_city][3][tempVisited] > (temp + city[1]):
cities[current_city][3][tempVisited] = (temp + city[1])
cities[current_city][2][tempVisited] = city[0]
else:
cities[current_city][3][tempVisited] = temp + city[1]
cities[current_city][2][tempVisited] = city[0]
bVisited = (~(1<<current_city)) & bVisited
return cities[current_city][3][bVisited]
def doIt(file_name):
global cities, current_solution,best,size,dest_city,start_city,bVisited
'''inicializar as variáveis para cada um dos tres casos de teste'''
current_solution = []
cities = []
best = 0
size = 0
bVisited = 0
f = open(file_name, 'r')
lines = f.readlines()
'''ler o cabeçalho do ficheiro para obter as cidades'''
line = lines[0].split(';')
for i in range(1,len(line)):
cities.append([line[i].split('\n')[0],[],{},{}])
'''ler o resto das linhas para obter distancias/custo/tempo entre as cidades'''
for i in range(0,len(lines)-1):
line = lines[i+1].split(';')
for j in range(1,len(line)):
if(len(line[j])>0):
'''insere os valores em ambas as cidades'''
if(j-1 != i):
cities[j-1][1].append([i,float(line[j])])
cities[i][1].append([j-1,float(line[j])])
best += float(line[j])
else:
cities[i][1].append([j-1,float(line[j])])
'''ordenamos o array dos custos para outras cidades de cada cidade, por ordem crescente'''
for city in cities:
city[1].sort(key=lambda x: x[1], reverse=False)
size = len(cities)
'''7 é o indice da cidade Lisboa, é só trocar para outro indice qualquer'''
func(0,start_city,1)
def printCities(out_temp):
global cities
city = start_city
bVisited = 0
out_temp += cities[start_city][0]+ ' e terminando em ' + cities[dest_city][0] + ' segue o percurso: '
while city != dest_city:
out_temp += (cities[city][0]+', ')
temp_city = city
city = cities[city][2][bVisited]
bVisited = (1<<temp_city) | bVisited
out_temp += (cities[city][0]+' ')
return out_temp
'''main'''
#custo
print("A calcular o menor custo...")
doIt('cost.csv')
out_temp = printCities("A rota com menor custo começando em ")
out_temp += 'custando um total de ' + ('%.2f' % best) + ' euros.\n'
print(out_temp)
#tempo
print("A calcular o menor tempo...")
doIt('time.csv')
out_temp = printCities('A rota que demora menos tempo começando em ')
out_temp += 'demorando um total de ' + ('%.2f' % best) + ' horas.\n'
print(out_temp)
#distância
print("A calcular a menor distância...")
doIt('distance.csv')
out_temp = printCities('A rota que percorre menos distância começando em ')
out_temp += 'percorrendo um total de ' + ('%.2f' % best) + ' km.'
print(out_temp)
|
paul-xxx/micropython | refs/heads/master | tests/basics/list_slice_3arg.py | 64 | x = list(range(10))
print(x[::-1])
print(x[::2])
print(x[::-2])
x = list(range(9))
print(x[::-1])
print(x[::2])
print(x[::-2])
|
adam111316/SickGear | refs/heads/master | lib/sqlalchemy/orm/persistence.py | 75 | # orm/persistence.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
try:
evaluator_compiler = evaluator.EvaluatorCompiler()
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
target_cls = query._mapper_zero().class_
#TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
|
saltduck/python-bitcoinlib | refs/heads/master | bitcoin/core/scripteval.py | 3 | # Copyright (C) 2012-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Script evaluation
Be warned that there are highly likely to be consensus bugs in this code; it is
unlikely to match Satoshi Bitcoin exactly. Think carefully before using this
module.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
_bord = ord
if sys.version > '3':
long = int
_bord = lambda x: x
import hashlib
import bitcoin.core
import bitcoin.core._bignum
import bitcoin.core.key
import bitcoin.core.serialize
# Importing everything for simplicity; note that we use __all__ at the end so
# we're not exporting the whole contents of the script module.
from bitcoin.core.script import *
MAX_NUM_SIZE = 4
MAX_STACK_ITEMS = 1000
SCRIPT_VERIFY_P2SH = object()
SCRIPT_VERIFY_STRICTENC = object()
SCRIPT_VERIFY_EVEN_S = object()
SCRIPT_VERIFY_NOCACHE = object()
class EvalScriptError(bitcoin.core.ValidationError):
"""Base class for exceptions raised when a script fails during EvalScript()
The execution state just prior the opcode raising the is saved. (if
available)
"""
def __init__(self,
msg,
sop=None, sop_data=None, sop_pc=None,
stack=None, scriptIn=None, txTo=None, inIdx=None, flags=None,
altstack=None, vfExec=None, pbegincodehash=None, nOpCount=None):
super(EvalScriptError, self).__init__('EvalScript: %s' % msg)
self.sop = sop
self.sop_data = sop_data
self.sop_pc = sop_pc
self.stack = stack
self.scriptIn = scriptIn
self.txTo = txTo
self.inIdx = inIdx
self.flags = flags
self.altstack = altstack
self.vfExec = vfExec
self.pbegincodehash = pbegincodehash
self.nOpCount = nOpCount
class MaxOpCountError(EvalScriptError):
def __init__(self, **kwargs):
super(MaxOpCountError, self).__init__('max opcode count exceeded',**kwargs)
class MissingOpArgumentsError(EvalScriptError):
"""Missing arguments"""
def __init__(self, opcode, s, n, **kwargs):
super(MissingOpArgumentsError, self).__init__(
'missing arguments for %s; need %d items, but only %d on stack' %
(OPCODE_NAMES[opcode], n, len(s)),
**kwargs)
class ArgumentsInvalidError(EvalScriptError):
"""Arguments are invalid"""
def __init__(self, opcode, msg, **kwargs):
super(ArgumentsInvalidError, self).__init__(
'%s args invalid: %s' % (OPCODE_NAMES[opcode], msg),
**kwargs)
class VerifyOpFailedError(EvalScriptError):
"""A VERIFY opcode failed"""
def __init__(self, opcode, **kwargs):
super(VerifyOpFailedError, self).__init__('%s failed' % OPCODE_NAMES[opcode],
**kwargs)
def _CastToBigNum(s, err_raiser):
v = bitcoin.core._bignum.vch2bn(s)
if len(s) > MAX_NUM_SIZE:
raise err_raiser(EvalScriptError, 'CastToBigNum() : overflow')
return v
def _CastToBool(s):
for i in range(len(s)):
sv = _bord(s[i])
if sv != 0:
if (i == (len(s) - 1)) and (sv == 0x80):
return False
return True
return False
def _CheckSig(sig, pubkey, script, txTo, inIdx, err_raiser):
key = bitcoin.core.key.CECKey()
key.set_pubkey(pubkey)
if len(sig) == 0:
return False
hashtype = _bord(sig[-1])
sig = sig[:-1]
# Raw signature hash due to the SIGHASH_SINGLE bug
#
# Note that we never raise an exception if RawSignatureHash() returns an
# error code. However the first error code case, where inIdx >=
# len(txTo.vin), shouldn't ever happen during EvalScript() as that would
# imply the scriptSig being checked doesn't correspond to a valid txout -
# that should cause other validation machinery to fail long before we ever
# got here.
(h, err) = RawSignatureHash(script, txTo, inIdx, hashtype)
return key.verify(h, sig)
def _CheckMultiSig(opcode, script, stack, txTo, inIdx, err_raiser, nOpCount):
i = 1
if len(stack) < i:
err_raiser(MissingOpArgumentsError, opcode, stack, i)
keys_count = _CastToBigNum(stack[-i], err_raiser)
if keys_count < 0 or keys_count > 20:
err_raiser(ArgumentsInvalidError, opcode, "keys count invalid")
i += 1
ikey = i
i += keys_count
nOpCount[0] += keys_count
if nOpCount[0] > MAX_SCRIPT_OPCODES:
err_raiser(MaxOpCountError)
if len(stack) < i:
err_raiser(ArgumentsInvalidError, opcode, "not enough keys on stack")
sigs_count = _CastToBigNum(stack[-i], err_raiser)
if sigs_count < 0 or sigs_count > keys_count:
err_raiser(ArgumentsInvalidError, opcode, "sigs count invalid")
i += 1
isig = i
i += sigs_count
if len(stack) < i-1:
raise err_raiser(ArgumentsInvalidError, opcode, "not enough sigs on stack")
elif len(stack) < i:
raise err_raiser(ArgumentsInvalidError, opcode, "missing dummy value")
# Drop the signature, since there's no way for a signature to sign itself
#
# Of course, this can only come up in very contrived cases now that
# scriptSig and scriptPubKey are processed separately.
for k in range(sigs_count):
sig = stack[-isig - k]
script = FindAndDelete(script, CScript([sig]))
success = True
while success and sigs_count > 0:
sig = stack[-isig]
pubkey = stack[-ikey]
if _CheckSig(sig, pubkey, script, txTo, inIdx, err_raiser):
isig += 1
sigs_count -= 1
ikey += 1
keys_count -= 1
if sigs_count > keys_count:
success = False
# with VERIFY bail now before we modify the stack
if opcode == OP_CHECKMULTISIGVERIFY:
err_raiser(VerifyOpFailedError, opcode)
while i > 0:
stack.pop()
i -= 1
if opcode == OP_CHECKMULTISIG:
if success:
stack.append(b"\x01")
else:
stack.append(b"\x00")
# OP_2MUL and OP_2DIV are *not* included in this list as they are disabled
_ISA_UNOP = {
OP_1ADD,
OP_1SUB,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
}
def _UnaryOp(opcode, stack, err_raiser):
if len(stack) < 1:
err_raiser(MissingOpArgumentsError, opcode, stack, 1)
bn = _CastToBigNum(stack[-1], err_raiser)
stack.pop()
if opcode == OP_1ADD:
bn += 1
elif opcode == OP_1SUB:
bn -= 1
elif opcode == OP_NEGATE:
bn = -bn
elif opcode == OP_ABS:
if bn < 0:
bn = -bn
elif opcode == OP_NOT:
bn = long(bn == 0)
elif opcode == OP_0NOTEQUAL:
bn = long(bn != 0)
else:
raise AssertionError("Unknown unary opcode encountered; this should not happen")
stack.append(bitcoin.core._bignum.bn2vch(bn))
# OP_LSHIFT and OP_RSHIFT are *not* included in this list as they are disabled
_ISA_BINOP = {
OP_ADD,
OP_SUB,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
}
def _BinOp(opcode, stack, err_raiser):
if len(stack) < 2:
err_raiser(MissingOpArgumentsError, opcode, stack, 2)
bn2 = _CastToBigNum(stack[-1], err_raiser)
bn1 = _CastToBigNum(stack[-2], err_raiser)
# We don't pop the stack yet so that OP_NUMEQUALVERIFY can raise
# VerifyOpFailedError with a correct stack.
if opcode == OP_ADD:
bn = bn1 + bn2
elif opcode == OP_SUB:
bn = bn1 - bn2
elif opcode == OP_BOOLAND:
bn = long(bn1 != 0 and bn2 != 0)
elif opcode == OP_BOOLOR:
bn = long(bn1 != 0 or bn2 != 0)
elif opcode == OP_NUMEQUAL:
bn = long(bn1 == bn2)
elif opcode == OP_NUMEQUALVERIFY:
bn = long(bn1 == bn2)
if not bn:
err_raiser(VerifyOpFailedError, opcode)
else:
# No exception, so time to pop the stack
stack.pop()
stack.pop()
return
elif opcode == OP_NUMNOTEQUAL:
bn = long(bn1 != bn2)
elif opcode == OP_LESSTHAN:
bn = long(bn1 < bn2)
elif opcode == OP_GREATERTHAN:
bn = long(bn1 > bn2)
elif opcode == OP_LESSTHANOREQUAL:
bn = long(bn1 <= bn2)
elif opcode == OP_GREATERTHANOREQUAL:
bn = long(bn1 >= bn2)
elif opcode == OP_MIN:
if bn1 < bn2:
bn = bn1
else:
bn = bn2
elif opcode == OP_MAX:
if bn1 > bn2:
bn = bn1
else:
bn = bn2
else:
raise AssertionError("Unknown binop opcode encountered; this should not happen")
stack.pop()
stack.pop()
stack.append(bitcoin.core._bignum.bn2vch(bn))
def _CheckExec(vfExec):
for b in vfExec:
if not b:
return False
return True
def _EvalScript(stack, scriptIn, txTo, inIdx, flags=()):
"""Evaluate a script
"""
if len(scriptIn) > MAX_SCRIPT_SIZE:
raise EvalScriptError('script too large; got %d bytes; maximum %d bytes' %
(len(scriptIn), MAX_SCRIPT_SIZE),
stack=stack,
scriptIn=scriptIn,
txTo=txTo,
inIdx=inIdx,
flags=flags)
altstack = []
vfExec = []
pbegincodehash = 0
nOpCount = [0]
for (sop, sop_data, sop_pc) in scriptIn.raw_iter():
fExec = _CheckExec(vfExec)
def err_raiser(cls, *args):
"""Helper function for raising EvalScriptError exceptions
cls - subclass you want to raise
*args - arguments
Fills in the state of execution for you.
"""
raise cls(*args,
sop=sop,
sop_data=sop_data,
sop_pc=sop_pc,
stack=stack, scriptIn=scriptIn, txTo=txTo, inIdx=inIdx, flags=flags,
altstack=altstack, vfExec=vfExec, pbegincodehash=pbegincodehash, nOpCount=nOpCount[0])
if sop in DISABLED_OPCODES:
err_raiser(EvalScriptError, 'opcode %s is disabled' % OPCODE_NAMES[sop])
if sop > OP_16:
nOpCount[0] += 1
if nOpCount[0] > MAX_SCRIPT_OPCODES:
err_raiser(MaxOpCountError)
def check_args(n):
if len(stack) < n:
err_raiser(MissingOpArgumentsError, sop, stack, n)
if sop <= OP_PUSHDATA4:
if len(sop_data) > MAX_SCRIPT_ELEMENT_SIZE:
err_raiser(EvalScriptError,
'PUSHDATA of length %d; maximum allowed is %d' %
(len(sop_data), MAX_SCRIPT_ELEMENT_SIZE))
elif fExec:
stack.append(sop_data)
continue
elif fExec or (OP_IF <= sop <= OP_ENDIF):
if sop == OP_1NEGATE or ((sop >= OP_1) and (sop <= OP_16)):
v = sop - (OP_1 - 1)
stack.append(bitcoin.core._bignum.bn2vch(v))
elif sop in _ISA_BINOP:
_BinOp(sop, stack, err_raiser)
elif sop in _ISA_UNOP:
_UnaryOp(sop, stack, err_raiser)
elif sop == OP_2DROP:
check_args(2)
stack.pop()
stack.pop()
elif sop == OP_2DUP:
check_args(2)
v1 = stack[-2]
v2 = stack[-1]
stack.append(v1)
stack.append(v2)
elif sop == OP_2OVER:
check_args(4)
v1 = stack[-4]
v2 = stack[-3]
stack.append(v1)
stack.append(v2)
elif sop == OP_2ROT:
check_args(6)
v1 = stack[-6]
v2 = stack[-5]
del stack[-6]
del stack[-5]
stack.append(v1)
stack.append(v2)
elif sop == OP_2SWAP:
check_args(4)
tmp = stack[-4]
stack[-4] = stack[-2]
stack[-2] = tmp
tmp = stack[-3]
stack[-3] = stack[-1]
stack[-1] = tmp
elif sop == OP_3DUP:
check_args(3)
v1 = stack[-3]
v2 = stack[-2]
v3 = stack[-1]
stack.append(v1)
stack.append(v2)
stack.append(v3)
elif sop == OP_CHECKMULTISIG or sop == OP_CHECKMULTISIGVERIFY:
tmpScript = CScript(scriptIn[pbegincodehash:])
_CheckMultiSig(sop, tmpScript, stack, txTo, inIdx, err_raiser, nOpCount)
elif sop == OP_CHECKSIG or sop == OP_CHECKSIGVERIFY:
check_args(2)
vchPubKey = stack[-1]
vchSig = stack[-2]
tmpScript = CScript(scriptIn[pbegincodehash:])
# Drop the signature, since there's no way for a signature to sign itself
#
# Of course, this can only come up in very contrived cases now that
# scriptSig and scriptPubKey are processed separately.
tmpScript = FindAndDelete(tmpScript, CScript([vchSig]))
ok = _CheckSig(vchSig, vchPubKey, tmpScript, txTo, inIdx,
err_raiser)
if not ok and sop == OP_CHECKSIGVERIFY:
err_raiser(VerifyOpFailedError, sop)
else:
stack.pop()
stack.pop()
if ok:
if sop != OP_CHECKSIGVERIFY:
stack.append(b"\x01")
else:
stack.append(b"\x00")
elif sop == OP_CODESEPARATOR:
pbegincodehash = sop_pc
elif sop == OP_DEPTH:
bn = len(stack)
stack.append(bitcoin.core._bignum.bn2vch(bn))
elif sop == OP_DROP:
check_args(1)
stack.pop()
elif sop == OP_DUP:
check_args(1)
v = stack[-1]
stack.append(v)
elif sop == OP_ELSE:
if len(vfExec) == 0:
err_raiser(EvalScriptError, 'ELSE found without prior IF')
vfExec[-1] = not vfExec[-1]
elif sop == OP_ENDIF:
if len(vfExec) == 0:
err_raiser(EvalScriptError, 'ENDIF found without prior IF')
vfExec.pop()
elif sop == OP_EQUAL:
check_args(2)
v1 = stack.pop()
v2 = stack.pop()
if v1 == v2:
stack.append(b"\x01")
else:
stack.append(b"\x00")
elif sop == OP_EQUALVERIFY:
check_args(2)
v1 = stack[-1]
v2 = stack[-2]
if v1 == v2:
stack.pop()
stack.pop()
else:
err_raiser(VerifyOpFailedError, sop)
elif sop == OP_FROMALTSTACK:
if len(altstack) < 1:
err_raiser(MissingOpArgumentsError, sop, altstack, 1)
v = altstack.pop()
stack.append(v)
elif sop == OP_HASH160:
check_args(1)
stack.append(bitcoin.core.serialize.Hash160(stack.pop()))
elif sop == OP_HASH256:
check_args(1)
stack.append(bitcoin.core.serialize.Hash(stack.pop()))
elif sop == OP_IF or sop == OP_NOTIF:
val = False
if fExec:
check_args(1)
vch = stack.pop()
val = _CastToBool(vch)
if sop == OP_NOTIF:
val = not val
vfExec.append(val)
elif sop == OP_IFDUP:
check_args(1)
vch = stack[-1]
if _CastToBool(vch):
stack.append(vch)
elif sop == OP_NIP:
check_args(2)
del stack[-2]
elif sop == OP_NOP or (sop >= OP_NOP1 and sop <= OP_NOP10):
pass
elif sop == OP_OVER:
check_args(2)
vch = stack[-2]
stack.append(vch)
elif sop == OP_PICK or sop == OP_ROLL:
check_args(2)
n = _CastToBigNum(stack.pop(), err_raiser)
if n < 0 or n >= len(stack):
err_raiser(EvalScriptError, "Argument for %s out of bounds" % OPCODE_NAMES[sop])
vch = stack[-n-1]
if sop == OP_ROLL:
del stack[-n-1]
stack.append(vch)
elif sop == OP_RETURN:
err_raiser(EvalScriptError, "OP_RETURN called")
elif sop == OP_RIPEMD160:
check_args(1)
h = hashlib.new('ripemd160')
h.update(stack.pop())
stack.append(h.digest())
elif sop == OP_ROT:
check_args(3)
tmp = stack[-3]
stack[-3] = stack[-2]
stack[-2] = tmp
tmp = stack[-2]
stack[-2] = stack[-1]
stack[-1] = tmp
elif sop == OP_SIZE:
check_args(1)
bn = len(stack[-1])
stack.append(bitcoin.core._bignum.bn2vch(bn))
elif sop == OP_SHA1:
check_args(1)
stack.append(hashlib.sha1(stack.pop()).digest())
elif sop == OP_SHA256:
check_args(1)
stack.append(hashlib.sha256(stack.pop()).digest())
elif sop == OP_SWAP:
check_args(2)
tmp = stack[-2]
stack[-2] = stack[-1]
stack[-1] = tmp
elif sop == OP_TOALTSTACK:
check_args(1)
v = stack.pop()
altstack.append(v)
elif sop == OP_TUCK:
check_args(2)
vch = stack[-1]
stack.insert(len(stack) - 2, vch)
elif sop == OP_VERIFY:
check_args(1)
v = _CastToBool(stack[-1])
if v:
stack.pop()
else:
raise err_raiser(VerifyOpFailedError, sop)
elif sop == OP_WITHIN:
check_args(3)
bn3 = _CastToBigNum(stack[-1], err_raiser)
bn2 = _CastToBigNum(stack[-2], err_raiser)
bn1 = _CastToBigNum(stack[-3], err_raiser)
stack.pop()
stack.pop()
stack.pop()
v = (bn2 <= bn1) and (bn1 < bn3)
if v:
stack.append(b"\x01")
else:
stack.append(b"\x00")
else:
err_raiser(EvalScriptError, 'unsupported opcode 0x%x' % sop)
# size limits
if len(stack) + len(altstack) > MAX_STACK_ITEMS:
err_raiser(EvalScriptError, 'max stack items limit reached')
# Unterminated IF/NOTIF/ELSE block
if len(vfExec):
raise EvalScriptError('Unterminated IF/ELSE block',
stack=stack,
scriptIn=scriptIn,
txTo=txTo,
inIdx=inIdx,
flags=flags)
def EvalScript(stack, scriptIn, txTo, inIdx, flags=()):
"""Evaluate a script
stack - Initial stack
scriptIn - Script
txTo - Transaction the script is a part of
inIdx - txin index of the scriptSig
flags - SCRIPT_VERIFY_* flags to apply
"""
try:
_EvalScript(stack, scriptIn, txTo, inIdx, flags=flags)
except CScriptInvalidError as err:
raise EvalScriptError(repr(err),
stack=stack,
scriptIn=scriptIn,
txTo=txTo,
inIdx=inIdx,
flags=flags)
class VerifyScriptError(bitcoin.core.ValidationError):
pass
def VerifyScript(scriptSig, scriptPubKey, txTo, inIdx, flags=()):
"""Verify a scriptSig satisfies a scriptPubKey
scriptSig - Signature
scriptPubKey - PubKey
txTo - Spending transaction
inIdx - Index of the transaction input containing scriptSig
Raises a ValidationError subclass if the validation fails.
"""
stack = []
EvalScript(stack, scriptSig, txTo, inIdx, flags=flags)
if SCRIPT_VERIFY_P2SH in flags:
stackCopy = list(stack)
EvalScript(stack, scriptPubKey, txTo, inIdx, flags=flags)
if len(stack) == 0:
raise VerifyScriptError("scriptPubKey left an empty stack")
if not _CastToBool(stack[-1]):
raise VerifyScriptError("scriptPubKey returned false")
# Additional validation for spend-to-script-hash transactions
if SCRIPT_VERIFY_P2SH in flags and scriptPubKey.is_p2sh():
if not scriptSig.is_push_only():
raise VerifyScriptError("P2SH scriptSig not is_push_only()")
# stackCopy cannot be empty here, because if it was the
# P2SH HASH <> EQUAL scriptPubKey would be evaluated with
# an empty stack and the EvalScript above would return false.
assert len(stackCopy)
pubKey2 = CScript(stackCopy.pop())
EvalScript(stackCopy, pubKey2, txTo, inIdx, flags=flags)
if not len(stackCopy):
raise VerifyScriptError("P2SH inner scriptPubKey left an empty stack")
if not _CastToBool(stackCopy[-1]):
raise VerifyScriptError("P2SH inner scriptPubKey returned false")
class VerifySignatureError(bitcoin.core.ValidationError):
pass
def VerifySignature(txFrom, txTo, inIdx):
"""Verify a scriptSig signature can spend a txout
Verifies that the scriptSig in txTo.vin[inIdx] is a valid scriptSig for the
corresponding COutPoint in transaction txFrom.
"""
if inIdx < 0:
raise VerifySignatureError("inIdx negative")
if inIdx >= len(txTo.vin):
raise VerifySignatureError("inIdx >= len(txTo.vin)")
txin = txTo.vin[inIdx]
if txin.prevout.n < 0:
raise VerifySignatureError("txin prevout.n negative")
if txin.prevout.n >= len(txFrom.vout):
raise VerifySignatureError("txin prevout.n >= len(txFrom.vout)")
txout = txFrom.vout[txin.prevout.n]
if txin.prevout.hash != txFrom.GetHash():
raise VerifySignatureError("prevout hash does not match txFrom")
VerifyScript(txin.scriptSig, txout.scriptPubKey, txTo, inIdx)
__all__ = (
'MAX_STACK_ITEMS',
'SCRIPT_VERIFY_P2SH',
'SCRIPT_VERIFY_STRICTENC',
'SCRIPT_VERIFY_EVEN_S',
'SCRIPT_VERIFY_NOCACHE',
'EvalScriptError',
'MaxOpCountError',
'MissingOpArgumentsError',
'ArgumentsInvalidError',
'VerifyOpFailedError',
'EvalScript',
'VerifyScriptError',
'VerifyScript',
'VerifySignatureError',
'VerifySignature',
)
|
MarcosCommunity/odoo | refs/heads/marcos-8.0 | addons/account/report/account_balance.py | 198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class account_balance(report_sxw.rml_parse, common_report_header):
_name = 'report.account.account.balance'
def __init__(self, cr, uid, name, context=None):
super(account_balance, self).__init__(cr, uid, name, context=context)
self.sum_debit = 0.00
self.sum_credit = 0.00
self.date_lst = []
self.date_lst_string = ''
self.result_acc = []
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear':self._get_fiscalyear,
'get_filter': self._get_filter,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period ,
'get_account': self._get_account,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(account_balance, self).set_context(objects, data, new_ids, report_type=report_type)
def lines(self, form, ids=None, done=None):
def _process_child(accounts, disp_acc, parent):
account_rec = [acct for acct in accounts if acct['id']==parent][0]
currency_obj = self.pool.get('res.currency')
acc_id = self.pool.get('account.account').browse(self.cr, self.uid, account_rec['id'])
currency = acc_id.currency_id and acc_id.currency_id or acc_id.company_id.currency_id
res = {
'id': account_rec['id'],
'type': account_rec['type'],
'code': account_rec['code'],
'name': account_rec['name'],
'level': account_rec['level'],
'debit': account_rec['debit'],
'credit': account_rec['credit'],
'balance': account_rec['balance'],
'parent_id': account_rec['parent_id'],
'bal_type': '',
}
self.sum_debit += account_rec['debit']
self.sum_credit += account_rec['credit']
if disp_acc == 'movement':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['credit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['debit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
elif disp_acc == 'not_zero':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
else:
self.result_acc.append(res)
if account_rec['child_id']:
for child in account_rec['child_id']:
_process_child(accounts,disp_acc,child)
obj_account = self.pool.get('account.account')
if not ids:
ids = self.ids
if not ids:
return []
if not done:
done={}
ctx = self.context.copy()
ctx['fiscalyear'] = form['fiscalyear_id']
if form['filter'] == 'filter_period':
ctx['period_from'] = form['period_from']
ctx['period_to'] = form['period_to']
elif form['filter'] == 'filter_date':
ctx['date_from'] = form['date_from']
ctx['date_to'] = form['date_to']
ctx['state'] = form['target_move']
parents = ids
child_ids = obj_account._get_children_and_consol(self.cr, self.uid, ids, ctx)
if child_ids:
ids = child_ids
accounts = obj_account.read(self.cr, self.uid, ids, ['type','code','name','debit','credit','balance','parent_id','level','child_id'], ctx)
for parent in parents:
if parent in done:
continue
done[parent] = 1
_process_child(accounts,form['display_account'],parent)
return self.result_acc
class report_trialbalance(osv.AbstractModel):
_name = 'report.account.report_trialbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_trialbalance'
_wrapped_report_class = account_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
montoyjh/catmap | refs/heads/master | catmap/scalers/thermodynamic_scaler.py | 2 | from scaler_base import *
import numpy as np
class ThermodynamicScaler(ScalerBase):
"""Scaler which uses temperature/pressure/potential as descriptors and
treates energetics as a constant"""
def __init__(self,reaction_model=None):
ScalerBase.__init__(self,reaction_model)
def get_electronic_energies(self,descriptors):
if len(self.surface_names) > 1:
raise IndexError('Thermodynamic scaler works only with a \
single surface.')
if self.adsorbate_interaction_model not in [None,'ideal']:
if not getattr(self.thermodynamics.adsorbate_interactions,'_parameterized',None):
self.thermodynamics.adsorbate_interactions.parameterize_interactions()
energy_dict = {}
for species in self.adsorbate_names+self.transition_state_names:
energy_dict[species] = self.species_definitions[species]['formation_energy'][0]
for species in self.gas_names+self.site_names:
energy_dict[species] = self.species_definitions[species]['formation_energy']
return energy_dict
def get_thermodynamic_energies(self,descriptors,**kwargs):
thermo_state = {}
#synchronize all thermodynamic varibles
for var,val in zip(self.descriptor_names,descriptors):
thermo_state[var] = val
setattr(self,var,val)
if 'pressure' in self.descriptor_names:
P = thermo_state['pressure']
elif 'logPressure' in self.descriptor_names:
P = 10**thermo_state['logPressure']
else:
P = 1
if 'pressure' in self.descriptor_names or 'logPressure' in self.descriptor_names:
if self.pressure_mode == 'static':
#static pressure doesn't make sense if
#pressure is a descriptor
self.pressure_mode = 'concentration'
self.pressure = P
thermo_dict = self.thermodynamics.get_thermodynamic_corrections(
**kwargs)
for key in self.site_names:
if key not in thermo_dict:
thermo_dict[key] = 0
return thermo_dict
def get_rxn_parameters(self,descriptors, *args, **kwargs):
if self.adsorbate_interaction_model not in ['ideal',None]:
params = self.get_formation_energy_interaction_parameters(descriptors)
return params
else:
params = self.get_formation_energy_parameters(descriptors)
return params
def get_formation_energy_parameters(self,descriptors):
self.parameter_names = self.adsorbate_names + self.transition_state_names
free_energy_dict = self.get_free_energies(descriptors)
params = [free_energy_dict[sp] for sp in self.adsorbate_names+self.transition_state_names]
return params
def get_formation_energy_interaction_parameters(self,descriptors):
E_f = self.get_formation_energy_parameters(descriptors)
if self.interaction_cross_term_names:
param_names = self.adsorbate_names + self.interaction_cross_term_names
else:
param_names = self.adsorbate_names
if not self.interaction_parameters:
info = self.thermodynamics.adsorbate_interactions.get_interaction_info()
params = [info[pi][0] for pi in param_names]
params_valid = []
for p,pname in zip(params,param_names):
if p is not None:
params_valid.append(p)
else:
raise ValueError('No interaction parameter specified for '+pname)
self.interaction_parameters = params_valid
epsilon = self.thermodynamics.adsorbate_interactions.params_to_matrix(E_f+self.interaction_parameters)
epsilon = list(epsilon.ravel())
return E_f + epsilon
|
lmazuel/ansible | refs/heads/devel | lib/ansible/modules/cloud/ovirt/ovirt_clusters_facts.py | 45 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_clusters_facts
short_description: Retrieve facts about one or more oVirt/RHV clusters
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV clusters."
notes:
- "This module creates a new top-level C(ovirt_clusters) fact, which
contains a list of clusters."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search cluster X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all clusters which names start with C<production>:
- ovirt_clusters_facts:
pattern: name=production*
- debug:
var: ovirt_clusters
'''
RETURN = '''
ovirt_clusters:
description: "List of dictionaries describing the clusters. Cluster attribues are mapped to dictionary keys,
all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
clusters = clusters_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_clusters=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in clusters
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
John-Hart/autorest | refs/heads/master | src/generator/AutoRest.Python.Tests/AcceptanceTests/form_data_tests.py | 6 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import io
import isodate
import os
import tempfile
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir))
sys.path.append(join(root, "src" , "client" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyFormData"))
from msrest.exceptions import DeserializationError
from autorestswaggerbatformdataservice import AutoRestSwaggerBATFormDataService
class FormDataTests(unittest.TestCase):
def setUp(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as dummy:
self.dummy_file = dummy.name
dummy.write("Test file")
return super(FormDataTests, self).setUp()
def test_file_upload_stream(self):
def test_callback(data, response, progress = [0]):
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers.get('Content-Length', 100))
print("Progress... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 2
test_string = "Upload file test case"
test_bytes = bytearray(test_string, encoding='utf-8')
result = io.BytesIO()
with io.BytesIO(test_bytes) as stream_data:
resp = client.formdata.upload_file(stream_data, "UploadFile.txt", callback=test_callback)
for r in resp:
result.write(r)
self.assertEqual(result.getvalue().decode(), test_string)
def test_file_upload_stream_raw(self):
def test_callback(data, response, progress = [0]):
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers['Content-Length'])
print("Progress... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 2
test_string = "Upload file test case"
test_bytes = bytearray(test_string, encoding='utf-8')
result = io.BytesIO()
with io.BytesIO(test_bytes) as stream_data:
resp = client.formdata.upload_file(stream_data, "UploadFile.txt", raw=True)
for r in resp.output:
result.write(r)
self.assertEqual(result.getvalue().decode(), test_string)
def test_file_upload_file_stream(self):
def test_callback(data, response, progress = [0]):
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers.get('Content-Length', 100))
print("Progress... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 2
name = os.path.basename(self.dummy_file)
result = io.BytesIO()
with open(self.dummy_file, 'rb') as upload_data:
resp = client.formdata.upload_file(upload_data, name, callback=test_callback)
for r in resp:
result.write(r)
self.assertEqual(result.getvalue().decode(), "Test file")
def test_file_upload_file_stream_raw(self):
def test_callback(data, response, progress = [0]):
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers['Content-Length'])
print("Progress... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 2
name = os.path.basename(self.dummy_file)
result = io.BytesIO()
with open(self.dummy_file, 'rb') as upload_data:
resp = client.formdata.upload_file(upload_data, name, raw=True, callback=test_callback)
for r in resp.output:
result.write(r)
self.assertEqual(result.getvalue().decode(), "Test file")
def test_file_body_upload(self):
test_string = "Upload file test case"
test_bytes = bytearray(test_string, encoding='utf-8')
def test_callback(data, response, progress = [0]):
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(len(test_bytes))
if response:
print("Downloading... {}%".format(int(progress[0]*100/total)))
else:
print("Uploading... {}%".format(int(progress[0]*100/total)))
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 2
result = io.BytesIO()
with io.BytesIO(test_bytes) as stream_data:
resp = client.formdata.upload_file_via_body(stream_data, callback=test_callback)
for r in resp:
result.write(r)
self.assertEqual(result.getvalue().decode(), test_string)
result = io.BytesIO()
with open(self.dummy_file, 'rb') as upload_data:
resp = client.formdata.upload_file_via_body(upload_data, callback=test_callback)
for r in resp:
result.write(r)
self.assertEqual(result.getvalue().decode(), "Test file")
def test_file_body_upload_raw(self):
client = AutoRestSwaggerBATFormDataService(base_url="http://localhost:3000")
test_string = "Upload file test case"
test_bytes = bytearray(test_string, encoding='utf-8')
result = io.BytesIO()
with io.BytesIO(test_bytes) as stream_data:
resp = client.formdata.upload_file_via_body(stream_data)
for r in resp:
result.write(r)
self.assertEqual(result.getvalue().decode(), test_string)
result = io.BytesIO()
with open(self.dummy_file, 'rb') as upload_data:
resp = client.formdata.upload_file_via_body(upload_data, raw=True)
for r in resp.output:
result.write(r)
self.assertEqual(result.getvalue().decode(), "Test file")
def tearDown(self):
os.remove(self.dummy_file)
return super(FormDataTests, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
alinbalutoiu/tempest | refs/heads/master | tempest/cmd/cleanup_service.py | 7 | #!/usr/bin/env python
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
CONF_FLAVORS = None
CONF_IMAGES = None
CONF_NETWORKS = []
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
CONF_TENANTS = None
CONF_USERS = None
IS_CEILOMETER = None
IS_CINDER = None
IS_GLANCE = None
IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
def init_conf():
global CONF_FLAVORS
global CONF_IMAGES
global CONF_NETWORKS
global CONF_PRIV_NETWORK
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
global CONF_TENANTS
global CONF_USERS
global IS_CEILOMETER
global IS_CINDER
global IS_GLANCE
global IS_HEAT
global IS_NEUTRON
global IS_NOVA
IS_CEILOMETER = CONF.service_available.ceilometer
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
CONF_TENANTS = [CONF.identity.admin_tenant_name,
CONF.identity.tenant_name,
CONF.identity.alt_tenant_name]
CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
CONF.identity.alt_username]
if IS_NEUTRON:
CONF_PRIV_NETWORK = _get_network_id(CONF.compute.fixed_network_name,
CONF.identity.tenant_name)
CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
def _get_network_id(net_name, tenant_name):
am = clients.AdminManager()
net_cl = am.network_client
id_cl = am.identity_client
networks = net_cl.list_networks()
tenant = id_cl.get_tenant_by_name(tenant_name)
t_id = tenant['id']
n_id = None
for net in networks['networks']:
if (net['tenant_id'] == t_id and net['name'] == net_name):
n_id = net['id']
break
return n_id
class BaseService(object):
def __init__(self, kwargs):
self.client = None
for key, value in kwargs.items():
setattr(self, key, value)
self.tenant_filter = {}
if hasattr(self, 'tenant_id'):
self.tenant_filter['tenant_id'] = self.tenant_id
def _filter_by_tenant_id(self, item_list):
if (item_list is None
or len(item_list) == 0
or not hasattr(self, 'tenant_id')
or self.tenant_id is None
or 'tenant_id' not in item_list[0]):
return item_list
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
def list(self):
pass
def delete(self):
pass
def dry_run(self):
pass
def save_state(self):
pass
def run(self):
if self.is_dry_run:
self.dry_run()
elif self.is_save_state:
self.save_state()
else:
self.delete()
class SnapshotService(BaseService):
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
self.client = manager.snapshots_client
def list(self):
client = self.client
snaps = client.list_snapshots()
LOG.debug("List count, %s Snapshots" % len(snaps))
return snaps
def delete(self):
snaps = self.list()
client = self.client
for snap in snaps:
try:
client.delete_snapshot(snap['id'])
except Exception:
LOG.exception("Delete Snapshot exception.")
def dry_run(self):
snaps = self.list()
self.data['snapshots'] = snaps
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
super(ServerService, self).__init__(kwargs)
self.client = manager.servers_client
def list(self):
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
LOG.debug("List count, %s Servers" % len(servers))
return servers
def delete(self):
client = self.client
servers = self.list()
for server in servers:
try:
client.delete_server(server['id'])
except Exception:
LOG.exception("Delete Server exception.")
def dry_run(self):
servers = self.list()
self.data['servers'] = servers
class ServerGroupService(ServerService):
def list(self):
client = self.client
sgs = client.list_server_groups()
LOG.debug("List count, %s Server Groups" % len(sgs))
return sgs
def delete(self):
client = self.client
sgs = self.list()
for sg in sgs:
try:
client.delete_server_group(sg['id'])
except Exception:
LOG.exception("Delete Server Group exception.")
def dry_run(self):
sgs = self.list()
self.data['server_groups'] = sgs
class StackService(BaseService):
def __init__(self, manager, **kwargs):
super(StackService, self).__init__(kwargs)
self.client = manager.orchestration_client
def list(self):
client = self.client
stacks = client.list_stacks()
LOG.debug("List count, %s Stacks" % len(stacks))
return stacks
def delete(self):
client = self.client
stacks = self.list()
for stack in stacks:
try:
client.delete_stack(stack['id'])
except Exception:
LOG.exception("Delete Stack exception.")
def dry_run(self):
stacks = self.list()
self.data['stacks'] = stacks
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
self.client = manager.keypairs_client
def list(self):
client = self.client
keypairs = client.list_keypairs()
LOG.debug("List count, %s Keypairs" % len(keypairs))
return keypairs
def delete(self):
client = self.client
keypairs = self.list()
for k in keypairs:
try:
name = k['keypair']['name']
client.delete_keypair(name)
except Exception:
LOG.exception("Delete Keypairs exception.")
def dry_run(self):
keypairs = self.list()
self.data['keypairs'] = keypairs
class SecurityGroupService(BaseService):
def __init__(self, manager, **kwargs):
super(SecurityGroupService, self).__init__(kwargs)
self.client = manager.security_groups_client
def list(self):
client = self.client
secgrps = client.list_security_groups()
secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
LOG.debug("List count, %s Security Groups" % len(secgrp_del))
return secgrp_del
def delete(self):
client = self.client
secgrp_del = self.list()
for g in secgrp_del:
try:
client.delete_security_group(g['id'])
except Exception:
LOG.exception("Delete Security Groups exception.")
def dry_run(self):
secgrp_del = self.list()
self.data['security_groups'] = secgrp_del
class FloatingIpService(BaseService):
def __init__(self, manager, **kwargs):
super(FloatingIpService, self).__init__(kwargs)
self.client = manager.floating_ips_client
def list(self):
client = self.client
floating_ips = client.list_floating_ips()
LOG.debug("List count, %s Floating IPs" % len(floating_ips))
return floating_ips
def delete(self):
client = self.client
floating_ips = self.list()
for f in floating_ips:
try:
client.delete_floating_ip(f['id'])
except Exception:
LOG.exception("Delete Floating IPs exception.")
def dry_run(self):
floating_ips = self.list()
self.data['floating_ips'] = floating_ips
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
self.client = manager.volumes_client
def list(self):
client = self.client
vols = client.list_volumes()
LOG.debug("List count, %s Volumes" % len(vols))
return vols
def delete(self):
client = self.client
vols = self.list()
for v in vols:
try:
client.delete_volume(v['id'])
except Exception:
LOG.exception("Delete Volume exception.")
def dry_run(self):
vols = self.list()
self.data['volumes'] = vols
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
self.client = manager.volume_quotas_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Volume Quotas exception.")
def dry_run(self):
quotas = self.client.show_quota_usage(self.tenant_id)
self.data['volume_quotas'] = quotas
class NovaQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(NovaQuotaService, self).__init__(kwargs)
self.client = manager.quotas_client
self.limits_client = manager.limits_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Quotas exception.")
def dry_run(self):
client = self.limits_client
quotas = client.show_limits()
self.data['compute_quotas'] = quotas['absolute']
# Begin network service classes
class NetworkService(BaseService):
def __init__(self, manager, **kwargs):
super(NetworkService, self).__init__(kwargs)
self.client = manager.network_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
return item_list
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
def list(self):
client = self.client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
LOG.debug("List count, %s Networks" % networks)
return networks
def delete(self):
client = self.client
networks = self.list()
for n in networks:
try:
client.delete_network(n['id'])
except Exception:
LOG.exception("Delete Network exception.")
def dry_run(self):
networks = self.list()
self.data['networks'] = networks
class NetworkFloatingIpService(NetworkService):
def list(self):
client = self.client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
LOG.debug("List count, %s Network Floating IPs" % len(flips))
return flips
def delete(self):
client = self.client
flips = self.list()
for flip in flips:
try:
client.delete_floatingip(flip['id'])
except Exception:
LOG.exception("Delete Network Floating IP exception.")
def dry_run(self):
flips = self.list()
self.data['floating_ips'] = flips
class NetworkRouterService(NetworkService):
def list(self):
client = self.client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
LOG.debug("List count, %s Routers" % len(routers))
return routers
def delete(self):
client = self.client
routers = self.list()
for router in routers:
try:
rid = router['id']
ports = [port for port
in client.list_router_interfaces(rid)['ports']
if port["device_owner"] == "network:router_interface"]
for port in ports:
client.remove_router_interface_with_port_id(rid,
port['id'])
client.delete_router(rid)
except Exception:
LOG.exception("Delete Router exception.")
def dry_run(self):
routers = self.list()
self.data['routers'] = routers
class NetworkHealthMonitorService(NetworkService):
def list(self):
client = self.client
hms = client.list_health_monitors()
hms = hms['health_monitors']
hms = self._filter_by_tenant_id(hms)
LOG.debug("List count, %s Health Monitors" % len(hms))
return hms
def delete(self):
client = self.client
hms = self.list()
for hm in hms:
try:
client.delete_health_monitor(hm['id'])
except Exception:
LOG.exception("Delete Health Monitor exception.")
def dry_run(self):
hms = self.list()
self.data['health_monitors'] = hms
class NetworkMemberService(NetworkService):
def list(self):
client = self.client
members = client.list_members()
members = members['members']
members = self._filter_by_tenant_id(members)
LOG.debug("List count, %s Members" % len(members))
return members
def delete(self):
client = self.client
members = self.list()
for member in members:
try:
client.delete_member(member['id'])
except Exception:
LOG.exception("Delete Member exception.")
def dry_run(self):
members = self.list()
self.data['members'] = members
class NetworkVipService(NetworkService):
def list(self):
client = self.client
vips = client.list_vips()
vips = vips['vips']
vips = self._filter_by_tenant_id(vips)
LOG.debug("List count, %s VIPs" % len(vips))
return vips
def delete(self):
client = self.client
vips = self.list()
for vip in vips:
try:
client.delete_vip(vip['id'])
except Exception:
LOG.exception("Delete VIP exception.")
def dry_run(self):
vips = self.list()
self.data['vips'] = vips
class NetworkPoolService(NetworkService):
def list(self):
client = self.client
pools = client.list_pools()
pools = pools['pools']
pools = self._filter_by_tenant_id(pools)
LOG.debug("List count, %s Pools" % len(pools))
return pools
def delete(self):
client = self.client
pools = self.list()
for pool in pools:
try:
client.delete_pool(pool['id'])
except Exception:
LOG.exception("Delete Pool exception.")
def dry_run(self):
pools = self.list()
self.data['pools'] = pools
class NetworkMeteringLabelRuleService(NetworkService):
def list(self):
client = self.client
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
LOG.debug("List count, %s Metering Label Rules" % len(rules))
return rules
def delete(self):
client = self.client
rules = self.list()
for rule in rules:
try:
client.delete_metering_label_rule(rule['id'])
except Exception:
LOG.exception("Delete Metering Label Rule exception.")
def dry_run(self):
rules = self.list()
self.data['rules'] = rules
class NetworkMeteringLabelService(NetworkService):
def list(self):
client = self.client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
LOG.debug("List count, %s Metering Labels" % len(labels))
return labels
def delete(self):
client = self.client
labels = self.list()
for label in labels:
try:
client.delete_metering_label(label['id'])
except Exception:
LOG.exception("Delete Metering Label exception.")
def dry_run(self):
labels = self.list()
self.data['labels'] = labels
class NetworkPortService(NetworkService):
def list(self):
client = self.client
ports = [port for port in
client.list_ports(**self.tenant_filter)['ports']
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
LOG.debug("List count, %s Ports" % len(ports))
return ports
def delete(self):
client = self.client
ports = self.list()
for port in ports:
try:
client.delete_port(port['id'])
except Exception:
LOG.exception("Delete Port exception.")
def dry_run(self):
ports = self.list()
self.data['ports'] = ports
class NetworkSecGroupService(NetworkService):
def list(self):
client = self.client
filter = self.tenant_filter
# cannot delete default sec group so never show it.
secgroups = [secgroup for secgroup in
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
if self.is_preserve:
secgroups = self._filter_by_conf_networks(secgroups)
LOG.debug("List count, %s securtiy_groups" % len(secgroups))
return secgroups
def delete(self):
client = self.client
secgroups = self.list()
for secgroup in secgroups:
try:
client.delete_secgroup(secgroup['id'])
except Exception:
LOG.exception("Delete security_group exception.")
def dry_run(self):
secgroups = self.list()
self.data['secgroups'] = secgroups
class NetworkSubnetService(NetworkService):
def list(self):
client = self.client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets" % len(subnets))
return subnets
def delete(self):
client = self.client
subnets = self.list()
for subnet in subnets:
try:
client.delete_subnet(subnet['id'])
except Exception:
LOG.exception("Delete Subnet exception.")
def dry_run(self):
subnets = self.list()
self.data['subnets'] = subnets
# Telemetry services
class TelemetryAlarmService(BaseService):
def __init__(self, manager, **kwargs):
super(TelemetryAlarmService, self).__init__(kwargs)
self.client = manager.telemetry_client
def list(self):
client = self.client
alarms = client.list_alarms()
LOG.debug("List count, %s Alarms" % len(alarms))
return alarms
def delete(self):
client = self.client
alarms = self.list()
for alarm in alarms:
try:
client.delete_alarm(alarm['id'])
except Exception:
LOG.exception("Delete Alarms exception.")
def dry_run(self):
alarms = self.list()
self.data['alarms'] = alarms
# begin global services
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
self.client = manager.flavors_client
def list(self):
client = self.client
flavors = client.list_flavors({"is_public": None})
if not self.is_save_state:
# recreate list removing saved flavors
flavors = [flavor for flavor in flavors if flavor['id']
not in self.saved_state_json['flavors'].keys()]
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
return flavors
def delete(self):
client = self.client
flavors = self.list()
for flavor in flavors:
try:
client.delete_flavor(flavor['id'])
except Exception:
LOG.exception("Delete Flavor exception.")
def dry_run(self):
flavors = self.list()
self.data['flavors'] = flavors
def save_state(self):
flavors = self.list()
self.data['flavors'] = {}
for flavor in flavors:
self.data['flavors'][flavor['id']] = flavor['name']
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
self.client = manager.images_client
def list(self):
client = self.client
images = client.list_images({"all_tenants": True})
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
LOG.debug("List count, %s Images after reconcile" % len(images))
return images
def delete(self):
client = self.client
images = self.list()
for image in images:
try:
client.delete_image(image['id'])
except Exception:
LOG.exception("Delete Image exception.")
def dry_run(self):
images = self.list()
self.data['images'] = images
def save_state(self):
self.data['images'] = {}
images = self.list()
for image in images:
self.data['images'][image['id']] = image['name']
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
self.client = manager.identity_client
class UserService(IdentityService):
def list(self):
client = self.client
users = client.get_users()
if not self.is_save_state:
users = [user for user in users if user['id']
not in self.saved_state_json['users'].keys()]
if self.is_preserve:
users = [user for user in users if user['name']
not in CONF_USERS]
elif not self.is_save_state: # Never delete admin user
users = [user for user in users if user['name'] !=
CONF.identity.admin_username]
LOG.debug("List count, %s Users after reconcile" % len(users))
return users
def delete(self):
client = self.client
users = self.list()
for user in users:
try:
client.delete_user(user['id'])
except Exception:
LOG.exception("Delete User exception.")
def dry_run(self):
users = self.list()
self.data['users'] = users
def save_state(self):
users = self.list()
self.data['users'] = {}
for user in users:
self.data['users'][user['id']] = user['name']
class RoleService(IdentityService):
def list(self):
client = self.client
try:
roles = client.list_roles()
# reconcile roles with saved state and never list admin role
if not self.is_save_state:
roles = [role for role in roles if
(role['id'] not in
self.saved_state_json['roles'].keys()
and role['name'] != CONF.identity.admin_role)]
LOG.debug("List count, %s Roles after reconcile" % len(roles))
return roles
except Exception:
LOG.exception("Cannot retrieve Roles.")
return []
def delete(self):
client = self.client
roles = self.list()
for role in roles:
try:
client.delete_role(role['id'])
except Exception:
LOG.exception("Delete Role exception.")
def dry_run(self):
roles = self.list()
self.data['roles'] = roles
def save_state(self):
roles = self.list()
self.data['roles'] = {}
for role in roles:
self.data['roles'][role['id']] = role['name']
class TenantService(IdentityService):
def list(self):
client = self.client
tenants = client.list_tenants()
if not self.is_save_state:
tenants = [tenant for tenant in tenants if (tenant['id']
not in self.saved_state_json['tenants'].keys()
and tenant['name'] != CONF.identity.admin_tenant_name)]
if self.is_preserve:
tenants = [tenant for tenant in tenants if tenant['name']
not in CONF_TENANTS]
LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
return tenants
def delete(self):
client = self.client
tenants = self.list()
for tenant in tenants:
try:
client.delete_tenant(tenant['id'])
except Exception:
LOG.exception("Delete Tenant exception.")
def dry_run(self):
tenants = self.list()
self.data['tenants'] = tenants
def save_state(self):
tenants = self.list()
self.data['tenants'] = {}
for tenant in tenants:
self.data['tenants'][tenant['id']] = tenant['name']
class DomainService(BaseService):
def __init__(self, manager, **kwargs):
super(DomainService, self).__init__(kwargs)
self.client = manager.identity_v3_client
def list(self):
client = self.client
domains = client.list_domains()
if not self.is_save_state:
domains = [domain for domain in domains if domain['id']
not in self.saved_state_json['domains'].keys()]
LOG.debug("List count, %s Domains after reconcile" % len(domains))
return domains
def delete(self):
client = self.client
domains = self.list()
for domain in domains:
try:
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
LOG.exception("Delete Domain exception.")
def dry_run(self):
domains = self.list()
self.data['domains'] = domains
def save_state(self):
domains = self.list()
self.data['domains'] = {}
for domain in domains:
self.data['domains'][domain['id']] = domain['name']
def get_tenant_cleanup_services():
tenant_services = []
if IS_CEILOMETER:
tenant_services.append(TelemetryAlarmService)
if IS_NOVA:
tenant_services.append(ServerService)
tenant_services.append(KeyPairService)
tenant_services.append(SecurityGroupService)
tenant_services.append(ServerGroupService)
if not IS_NEUTRON:
tenant_services.append(FloatingIpService)
tenant_services.append(NovaQuotaService)
if IS_HEAT:
tenant_services.append(StackService)
if IS_NEUTRON:
tenant_services.append(NetworkFloatingIpService)
if test.is_extension_enabled('metering', 'network'):
tenant_services.append(NetworkMeteringLabelRuleService)
tenant_services.append(NetworkMeteringLabelService)
tenant_services.append(NetworkRouterService)
tenant_services.append(NetworkPortService)
tenant_services.append(NetworkSubnetService)
tenant_services.append(NetworkService)
tenant_services.append(NetworkSecGroupService)
if IS_CINDER:
tenant_services.append(SnapshotService)
tenant_services.append(VolumeService)
tenant_services.append(VolumeQuotaService)
return tenant_services
def get_global_cleanup_services():
global_services = []
if IS_NOVA:
global_services.append(FlavorService)
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
global_services.append(TenantService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
|
bytedance/fedlearner | refs/heads/master | web_console_v2/api/test/fedlearner_webconsole/utils/mixins_test.py | 1 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from fedlearner_webconsole.utils.mixins import from_dict_mixin, to_dict_mixin
Base = declarative_base()
@to_dict_mixin()
class DeclarativeClass(Base):
__tablename__ = 'just_a_test'
test = Column(Integer, primary_key=True)
@to_dict_mixin(to_dict_fields=['hhh'])
@from_dict_mixin(from_dict_fields=['hhh'], required_fields=['hhh'])
class SpecifyColumnsClass(object):
def __init__(self) -> None:
self.hhh = None
self.not_include = None
class MixinsTest(unittest.TestCase):
def test_to_dict_declarative_api(self):
obj = DeclarativeClass()
res = obj.to_dict()
self.assertEqual(len(res), 1)
self.assertTrue('test' in res)
def test_to_dict_specify_columns(self):
obj = SpecifyColumnsClass()
obj.hhh = 'hhh'
res = obj.to_dict()
self.assertEqual(len(res), 1)
self.assertTrue('hhh' in res)
def test_from_dict(self):
inputs_pass = {'hhh': 4, 'hhhh': 1}
inputs_raise = {'hhhh': 1}
obj = SpecifyColumnsClass.from_dict(inputs_pass)
self.assertEqual(obj.hhh, 4)
with self.assertRaises(ValueError):
obj = SpecifyColumnsClass.from_dict(inputs_raise)
if __name__ == '__main__':
unittest.main()
|
singlebrook/AWS-ElasticBeanstalk-CLI | refs/heads/master | eb/linux/python3/scli/operation_queue.py | 8 | #!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import logging as _logging
from scli.operation.base import OperationBase
from scli.operation.base import OperationResult
log = _logging.getLogger('cli')
class OperationQueue(object):
def __init__(self):
self._queue = []
self._index = -1;
def add(self, operation):
if not isinstance(operation, OperationBase):
raise AttributeError('{0} is not an operation instance'.format\
(operation.__class__.__name__))
self._queue.append(operation)
log.debug('Add operation "{0}" to queue'.format(operation.__class__.__name__))
# set index to 0 if adding first operation
if self._index < 0:
self._index = 0
@property
def index(self):
return self._index
@property
def size(self):
return len(self._queue)
@property
def required_parameters(self):
if self.size < 1:
return None
input_params = set()
output_params = set()
for op in self._queue:
input_params |= op.input_parameters - output_params
output_params |= op.output_parameters
return input_params
def run(self, parameter_pool, result):
if self._index < 0:
return # has no operation to run
if self._index > 0:
pass # we are resuming in the middle of execution
else:
for index, op in enumerate(self._queue):
try:
self._index = index
log.info('Run {0}th operation "{1}" now...'.format\
(index + 1, op.__class__.__name__))
operation_result = op.execute(parameter_pool)
if operation_result is None:
operation_result = OperationResult(op, None, None, None)
result.append(operation_result)
log.info('Operation "{0}" completed'.format(op.__class__.__name__))
except AttributeError:
raise
except Exception:
raise
|
amrdraz/brython | refs/heads/master | www/src/Lib/test/test_pep380.py | 34 | # -*- coding: utf-8 -*-
"""
Test suite for PEP 380 implementation
adapted from original tests written by Greg Ewing
see <http://www.cosc.canterbury.ac.nz/greg.ewing/python/yield-from/YieldFrom-Python3.1.2-rev5.zip>
"""
import unittest
import io
import sys
import inspect
import parser
from test.support import captured_stderr, disable_gc, gc_collect
class TestPEP380Operation(unittest.TestCase):
"""
Test semantics.
"""
def test_delegation_of_initial_next_to_subgenerator(self):
"""
Test delegation of initial next() call to subgenerator
"""
trace = []
def g1():
trace.append("Starting g1")
yield from g2()
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield 42
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Yielded 42",
"Finishing g2",
"Finishing g1",
])
def test_raising_exception_in_initial_next_call(self):
"""
Test raising exception in initial next() call
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield from g2()
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
raise ValueError("spanish inquisition occurred")
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "spanish inquisition occurred")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_next_call_to_subgenerator(self):
"""
Test delegation of next() call to subgenerator
"""
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"Yielded g1 eggs",
"Finishing g1",
])
def test_raising_exception_in_delegated_next_call(self):
"""
Test raising exception in delegated next() call
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
raise ValueError("hovercraft is full of eels")
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "hovercraft is full of eels")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_send(self):
"""
Test delegation of send()
"""
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
pass
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
"Yielded g2 more spam",
"g2 received 3",
"Finishing g2",
"Yielded g1 eggs",
"g1 received 4",
"Finishing g1",
])
def test_handling_exception_while_delegating_send(self):
"""
Test handling exception while delegating 'send'
"""
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
raise ValueError("hovercraft is full of eels")
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
def run():
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
trace.append("StopIteration")
self.assertRaises(ValueError,run)
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
])
def test_delegating_close(self):
"""
Test delegating 'close'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1"
])
def test_handing_exception_while_delegating_close(self):
"""
Test handling exception while delegating 'close'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
raise ValueError("nybbles have exploded with delight")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
except ValueError as e:
self.assertEqual(e.args[0], "nybbles have exploded with delight")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegating_throw(self):
"""
Test delegating 'throw'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
g.throw(e)
except ValueError as e:
self.assertEqual(e.args[0], "tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_value_attribute_of_StopIteration_exception(self):
"""
Test 'value' attribute of StopIteration exception
"""
trace = []
def pex(e):
trace.append("%s: %s" % (e.__class__.__name__, e))
trace.append("value = %s" % (e.value,))
e = StopIteration()
pex(e)
e = StopIteration("spam")
pex(e)
e.value = "eggs"
pex(e)
self.assertEqual(trace,[
"StopIteration: ",
"value = None",
"StopIteration: spam",
"value = spam",
"StopIteration: spam",
"value = eggs",
])
def test_exception_value_crash(self):
# There used to be a refcount error when the return value
# stored in the StopIteration has a refcount of 1.
def g1():
yield from g2()
def g2():
yield "g2"
return [42]
self.assertEqual(list(g1()), ["g2"])
def test_generator_return_value(self):
"""
Test generator return value
"""
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
ret = yield from g2()
trace.append("g2 returned %s" % (ret,))
ret = yield from g2(42)
trace.append("g2 returned %s" % (ret,))
yield "g1 eggs"
trace.append("Finishing g1")
def g2(v = None):
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
if v:
return v
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned None",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned 42",
"Yielded g1 eggs",
"Finishing g1",
])
def test_delegation_of_next_to_non_generator(self):
"""
Test delegation of next() to non-generator
"""
trace = []
def g():
yield from range(3)
for x in g():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Yielded 0",
"Yielded 1",
"Yielded 2",
])
def test_conversion_of_sendNone_to_next(self):
"""
Test conversion of send(None) to next()
"""
trace = []
def g():
yield from range(3)
gi = g()
for x in range(3):
y = gi.send(None)
trace.append("Yielded: %s" % (y,))
self.assertEqual(trace,[
"Yielded: 0",
"Yielded: 1",
"Yielded: 2",
])
def test_delegation_of_close_to_non_generator(self):
"""
Test delegation of close() to non-generator
"""
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
gi = g()
next(gi)
with captured_stderr() as output:
gi.close()
self.assertEqual(output.getvalue(), '')
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_delegating_throw_to_non_generator(self):
"""
Test delegating 'throw' to non-generator
"""
trace = []
def g():
try:
trace.append("Starting g")
yield from range(10)
finally:
trace.append("Finishing g")
try:
gi = g()
for i in range(5):
x = next(gi)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
gi.throw(e)
except ValueError as e:
self.assertEqual(e.args[0],"tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g",
"Yielded 0",
"Yielded 1",
"Yielded 2",
"Yielded 3",
"Yielded 4",
"Finishing g",
])
def test_attempting_to_send_to_non_generator(self):
"""
Test attempting to send to non-generator
"""
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
try:
gi = g()
next(gi)
for x in range(3):
y = gi.send(42)
trace.append("Should not have yielded: %s" % (y,))
except AttributeError as e:
self.assertIn("send", e.args[0])
else:
self.fail("was able to send into non-generator")
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_broken_getattr_handling(self):
"""
Test subiterator with a broken getattr implementation
"""
class Broken:
def __iter__(self):
return self
def __next__(self):
return 1
def __getattr__(self, attr):
1/0
def g():
yield from Broken()
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.send(1)
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.throw(AttributeError)
with captured_stderr() as output:
gi = g()
self.assertEqual(next(gi), 1)
gi.close()
self.assertIn('ZeroDivisionError', output.getvalue())
def test_exception_in_initial_next_call(self):
"""
Test exception in initial next() call
"""
trace = []
def g1():
trace.append("g1 about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
yield 1/0
def run():
gi = g1()
next(gi)
self.assertRaises(ZeroDivisionError,run)
self.assertEqual(trace,[
"g1 about to yield from g2"
])
def test_attempted_yield_from_loop(self):
"""
Test attempted yield-from loop
"""
trace = []
def g1():
trace.append("g1: starting")
yield "y1"
trace.append("g1: about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
trace.append("g2: starting")
yield "y2"
trace.append("g2: about to yield from g1")
yield from gi
trace.append("g2 should not be here")
try:
gi = g1()
for y in gi:
trace.append("Yielded: %s" % (y,))
except ValueError as e:
self.assertEqual(e.args[0],"generator already executing")
else:
self.fail("subgenerator didn't raise ValueError")
self.assertEqual(trace,[
"g1: starting",
"Yielded: y1",
"g1: about to yield from g2",
"g2: starting",
"Yielded: y2",
"g2: about to yield from g1",
])
def test_returning_value_from_delegated_throw(self):
"""
Test returning value from delegated 'throw'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
except LunchError:
trace.append("Caught LunchError in g2")
yield "g2 lunch saved"
yield "g2 yet more spam"
class LunchError(Exception):
pass
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = LunchError("tomato ejected")
g.throw(e)
for x in g:
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Caught LunchError in g2",
"Yielded g2 yet more spam",
"Yielded g1 eggs",
"Finishing g1",
])
def test_next_and_return_with_value(self):
"""
Test next and return with value
"""
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f resuming g")
next(gi)
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %s" % (repr(e),))
def g(r):
trace.append("g starting")
yield
trace.append("g returning %s" % (r,))
return r
f(None)
f(42)
self.assertEqual(trace,[
"g starting",
"f resuming g",
"g returning None",
"f caught StopIteration()",
"g starting",
"f resuming g",
"g returning 42",
"f caught StopIteration(42,)",
])
def test_send_and_return_with_value(self):
"""
Test send and return with value
"""
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f sending spam to g")
gi.send("spam")
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %r" % (e,))
def g(r):
trace.append("g starting")
x = yield
trace.append("g received %s" % (x,))
trace.append("g returning %s" % (r,))
return r
f(None)
f(42)
self.assertEqual(trace,[
"g starting",
"f sending spam to g",
"g received spam",
"g returning None",
"f caught StopIteration()",
"g starting",
"f sending spam to g",
"g received spam",
"g returning 42",
"f caught StopIteration(42,)",
])
def test_catching_exception_from_subgen_and_returning(self):
"""
Test catching an exception thrown into a
subgenerator and returning a value
"""
trace = []
def inner():
try:
yield 1
except ValueError:
trace.append("inner caught ValueError")
return 2
def outer():
v = yield from inner()
trace.append("inner returned %r to outer" % v)
yield v
g = outer()
trace.append(next(g))
trace.append(g.throw(ValueError))
self.assertEqual(trace,[
1,
"inner caught ValueError",
"inner returned 2 to outer",
2,
])
def test_throwing_GeneratorExit_into_subgen_that_returns(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and returns normally.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
return
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except GeneratorExit:
pass
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgenerator_that_yields(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and yields.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
yield
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except RuntimeError as e:
self.assertEqual(e.args[0], "generator ignored GeneratorExit")
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgen_that_raises(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and raises a different exception.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
raise ValueError("Vorpal bunny encountered")
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except ValueError as e:
self.assertEqual(e.args[0], "Vorpal bunny encountered")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_yield_from_empty(self):
def g():
yield from ()
self.assertRaises(StopIteration, next, g())
def test_delegating_generators_claim_to_be_running(self):
# Check with basic iteration
def one():
yield 0
yield from two()
yield 3
def two():
yield 1
try:
yield from g1
except ValueError:
pass
yield 2
g1 = one()
self.assertEqual(list(g1), [0, 1, 2, 3])
# Check with send
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.send(42))
except StopIteration:
pass
self.assertEqual(res, [0, 1, 2, 3])
# Check with throw
class MyErr(Exception):
pass
def one():
try:
yield 0
except MyErr:
pass
yield from two()
try:
yield 3
except MyErr:
pass
def two():
try:
yield 1
except MyErr:
pass
try:
yield from g1
except ValueError:
pass
try:
yield 2
except MyErr:
pass
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.throw(MyErr))
except StopIteration:
pass
# Check with close
class MyIt(object):
def __iter__(self):
return self
def __next__(self):
return 42
def close(self_):
self.assertTrue(g1.gi_running)
self.assertRaises(ValueError, next, g1)
def one():
yield from MyIt()
g1 = one()
next(g1)
g1.close()
def test_delegator_is_visible_to_debugger(self):
def call_stack():
return [f[3] for f in inspect.stack()]
def gen():
yield call_stack()
yield call_stack()
yield call_stack()
def spam(g):
yield from g
def eggs(g):
yield from g
for stack in spam(gen()):
self.assertTrue('spam' in stack)
for stack in spam(eggs(gen())):
self.assertTrue('spam' in stack and 'eggs' in stack)
def test_custom_iterator_return(self):
# See issue #15568
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen():
nonlocal ret
ret = yield from MyIter()
ret = None
list(gen())
self.assertEqual(ret, 42)
def test_close_with_cleared_frame(self):
# See issue #17669.
#
# Create a stack of generators: outer() delegating to inner()
# delegating to innermost(). The key point is that the instance of
# inner is created first: this ensures that its frame appears before
# the instance of outer in the GC linked list.
#
# At the gc.collect call:
# - frame_clear is called on the inner_gen frame.
# - gen_dealloc is called on the outer_gen generator (the only
# reference is in the frame's locals).
# - gen_close is called on the outer_gen generator.
# - gen_close_iter is called to close the inner_gen generator, which
# in turn calls gen_close, and gen_yf.
#
# Previously, gen_yf would crash since inner_gen's frame had been
# cleared (and in particular f_stacktop was NULL).
def innermost():
yield
def inner():
outer_gen = yield
yield from innermost()
def outer():
inner_gen = yield
yield from inner_gen
with disable_gc():
inner_gen = inner()
outer_gen = outer()
outer_gen.send(None)
outer_gen.send(inner_gen)
outer_gen.send(outer_gen)
del outer_gen
del inner_gen
gc_collect()
def test_main():
from test import support
test_classes = [TestPEP380Operation]
support.run_unittest(*test_classes)
if __name__ == '__main__':
test_main()
|
maxiee/MyCodes | refs/heads/master | AlgorithmInPython/Sort/sort_exer_151014.py | 1 | __author__ = 'maxiee'
import Sort.utils as utils
class Base:
data = []
aux = []
def __init__(self, data):
self.data = data
def sort(self):
pass
def less(self, i, j):
return self.data[i] < self.data[j]
def more(self, i, j):
return self.data[i] > self.data[j]
def exch(self, i, j):
temp = self.data[i]
self.data[i] = self.data[j]
self.data[j] = temp
def show(self):
print(self.data)
def is_sorted(self):
for I in range(len(self.data) - 1):
if self.less(I + 1, I):
return False
return True
#选择排序
class SelectSort(Base):
def sort(self):
#填写算法
return self.data
#插入排序
class InsertSort(Base):
def sort(self):
#填写算法
return self.data
#希尔排序
class Shell(Base):
def sort(self):
#填写算法
return self.data
#归并排序
class Merge(Base):
def __init__(self, data):
super().__init__(data)
def sort(self):
self.sort_sub(0, len(self.data) - 1)
return self.data
#递归排序
def sort_sub(self, lo, hi):
#填写算法
if hi <= lo:
return
mid = lo + int((hi - lo) / 2)
self.sort_sub(lo, mid)
self.sort_sub(mid+1, hi)
self.merge(lo, mid, hi)
#归并方法
def merge(self, lo, mid, hi):
#填写算法
i = lo
j = mid + 1
self.aux = list(self.data)
for k in range(lo, hi + 1):
if i > mid:
self.data[k] = self.aux[j]
j += 1
elif j > hi:
self.data[k] = self.aux[i]
i += 1
elif self.aux[i] < self.aux[j]:
self.data[k] = self.aux[i]
i += 1
else:
self.data[k] = self.aux[j]
j += 1
#快排
class Quick(Base):
def sort(self):
self.sort_sub(0, len(self.data) -1)
return self.data
#递归排序
def sort_sub(self, lo, hi):
#填写算法
if (hi <= lo):
return
j = self.partition(lo, hi)
self.sort_sub(lo, j - 1)
self.sort_sub(j + 1, hi)
#分区
def partition(self, lo, hi):
#填写算法
v = lo
i = lo
j = hi + 1
while True:
i += 1
while self.less(i, lo):
if i == hi:
break
i += 1
j -= 1
while self.more(j, lo):
if j == lo:
break
j -= 1
if i >= j:
break
self.exch(i, j)
self.exch(v, j)
return j
def sort_and_test(name, cls, l):
c = cls(list(l))
c.sort()
# c.show()
print(name, c.is_sorted())
if __name__ == "__main__":
length = 100
l = utils.gen_string_list(length)
# sort_and_test("选择排序", SelectSort, l)
# sort_and_test("插入排序", InsertSort, l)
# sort_and_test("希尔排序", Shell, l)
sort_and_test("自顶向下归并", Merge, l)
sort_and_test("快速排序", Quick, l) |
Stanford-Online/edx-platform | refs/heads/master | common/djangoapps/entitlements/tests/factories.py | 17 | import string
from uuid import uuid4
import factory
from factory.fuzzy import FuzzyChoice, FuzzyText
from student.tests.factories import UserFactory
from course_modes.helpers import CourseMode
from entitlements.models import CourseEntitlement, CourseEntitlementPolicy
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student.tests.factories import UserFactory
class CourseEntitlementPolicyFactory(factory.django.DjangoModelFactory):
"""
Factory for a a CourseEntitlementPolicy
"""
class Meta(object):
model = CourseEntitlementPolicy
site = factory.SubFactory(SiteFactory)
class CourseEntitlementFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = CourseEntitlement
uuid = factory.LazyFunction(uuid4)
course_uuid = factory.LazyFunction(uuid4)
expired_at = None
mode = FuzzyChoice([CourseMode.VERIFIED, CourseMode.PROFESSIONAL])
user = factory.SubFactory(UserFactory)
order_number = FuzzyText(prefix='TEXTX', chars=string.digits)
enrollment_course_run = None
policy = factory.SubFactory(CourseEntitlementPolicyFactory)
|
LouisePaulDelvaux/Til-Liam | refs/heads/master | src_liam/config.py | 1 | from __future__ import print_function
import os
debug = os.environ.get("DEBUG", False)
input_directory = "."
output_directory = "."
skip_shows = False
assertions = "raise"
show_timings = True
autodump = None
autodump_file = None
autodiff = None
|
msebire/intellij-community | refs/heads/master | python/testData/surround/CustomFoldingRegionSeveralMethods_after.py | 22 | class C:
def m1(self):
pass
# <editor-fold desc="Description">
def m2(self):
pass
def m3(self):
pass
# </editor-fold> |
theknightorc/p2pool-candycoin | refs/heads/master | wstools/tests/test_wstools_net.py | 308 | #!/usr/bin/env python
############################################################################
# Joshua R. Boverhof, David W. Robertson, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import unittest
import test_wsdl
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(test_wsdl.makeTestSuite("services_by_http"))
return suite
def main():
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
|
peterorum/functal | refs/heads/master | titles/nltk2-tweets.py | 1 | #!/usr/bin/python
import nltk
import os
import re
# import sys
# import random
# import collections
from pprint import pprint
import pymongo
client = pymongo.MongoClient(os.getenv('mongo_functal'))
db = client['topics']
possibly_sensitive_words = set(line.strip().replace(r'^', r'\b').replace(r'$', r'\b')
for line in open(os.getenv('functal_folder') + '/words/words.txt') if len(line.strip()) > 0)
sensitive_re = '|'.join(list(possibly_sensitive_words))
def analyze_tweets():
total_tweets = db.tweets.find().count()
print('total tweets: ' + str(total_tweets))
# about 7% are sensitive
sensitive_tweets = list([tweet['text'] for tweet in db.tweets.find({'text': {'$regex': sensitive_re}})])
print('total sensitive tweets: ' + str(round(len(sensitive_tweets) / total_tweets * 100, 2)) + '%')
pprint(sensitive_tweets[:5])
def get_corpus():
words = []
topic = 'grid'
tweets = set([tweet['text'] for tweet in db.tweets.find({'topic': topic})])
for tweet in tweets:
text = tweet.lower()
# remove links
text = re.sub(r'http[^\b]*', '', text)
# remove names
text = re.sub(r'@[^\b]*', '', text)
# remove hashtags
text = re.sub(r'#[^\b]*', '', text)
# remove special chars
text = re.sub(r'[!]', '', text)
# remove entities
text = re.sub(r'&', 'and', text)
text = re.sub(r'&.*;', '', text)
words = words + nltk.tokenize.WhitespaceTokenizer().tokenize(text)
return nltk.Text(words)
#--- main
def main():
# analyze_tweets()
corpus = get_corpus()
# pprint(corpus.concordance('grid'))
for i in range(1, 2):
corpus.generate(100)
#---
main()
|
joshowen/django-allauth | refs/heads/master | allauth/socialaccount/providers/oauth/provider.py | 6 | from django.utils.http import urlencode
from allauth.compat import parse_qsl, reverse
from allauth.socialaccount.providers.base import Provider
class OAuthProvider(Provider):
def get_login_url(self, request, **kwargs):
url = reverse(self.id + "_login")
if kwargs:
url = url + '?' + urlencode(kwargs)
return url
def get_auth_params(self, request, action):
settings = self.get_settings()
ret = dict(settings.get('AUTH_PARAMS', {}))
dynamic_auth_params = request.GET.get('auth_params', None)
if dynamic_auth_params:
ret.update(dict(parse_qsl(dynamic_auth_params)))
return ret
def get_auth_url(self, request, action):
# TODO: This is ugly. Move authorization_url away from the
# adapter into the provider. Hmpf, the line between
# adapter/provider is a bit too thin here.
return None
def get_scope(self, request):
settings = self.get_settings()
scope = settings.get('SCOPE')
if scope is None:
scope = self.get_default_scope()
return scope
def get_default_scope(self):
return []
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/isosurface/hoverlabel/font/_familysrc.py | 1 | import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="familysrc",
parent_name="isosurface.hoverlabel.font",
**kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
ishank08/scikit-learn | refs/heads/master | doc/sphinxext/sphinx_gallery/__init__.py | 25 | """
==============
Sphinx Gallery
==============
"""
import os
__version__ = '0.1.7'
def glr_path_static():
"""Returns path to packaged static files"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
|
serviceagility/boto | refs/heads/develop | boto/roboto/__init__.py | 9480 | #
|
joshwalawender/POCS | refs/heads/develop | pocs/tests/test_dispatch_scheduler.py | 1 | import os
import pytest
import yaml
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.time import Time
from astroplan import Observer
from pocs.scheduler.dispatch import Scheduler
from pocs.scheduler.constraint import Duration
from pocs.scheduler.constraint import MoonAvoidance
# Simple constraint to maximize duration above a certain altitude
constraints = [MoonAvoidance(), Duration(30 * u.deg)]
@pytest.fixture
def observer(config):
loc = config['location']
location = EarthLocation(lon=loc['longitude'], lat=loc['latitude'], height=loc['elevation'])
return Observer(location=location, name="Test Observer", timezone=loc['timezone'])
@pytest.fixture()
def field_list():
return yaml.load("""
-
name: HD 189733
position: 20h00m43.7135s +22d42m39.0645s
priority: 100
-
name: HD 209458
position: 22h03m10.7721s +18d53m03.543s
priority: 100
-
name: Tres 3
position: 17h52m07.02s +37d32m46.2012s
priority: 100
exp_set_size: 15
min_nexp: 240
-
name: M5
position: 15h18m33.2201s +02d04m51.7008s
priority: 50
-
name: KIC 8462852
position: 20h06m15.4536s +44d27m24.75s
priority: 50
exp_time: 60
exp_set_size: 15
min_nexp: 45
-
name: Wasp 33
position: 02h26m51.0582s +37d33m01.733s
priority: 100
-
name: M42
position: 05h35m17.2992s -05d23m27.996s
priority: 25
exp_time: 240
-
name: M44
position: 08h40m24s +19d40m00.12s
priority: 50
""")
@pytest.fixture
def scheduler(field_list, observer):
return Scheduler(observer, fields_list=field_list, constraints=constraints)
def test_get_observation(scheduler):
time = Time('2016-08-13 10:00:00')
best = scheduler.get_observation(time=time)
assert best[0] == 'HD 189733'
assert type(best[1]) == float
def test_observation_seq_time(scheduler):
time = Time('2016-08-13 10:00:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation.seq_time is not None
def test_no_valid_obseravtion(scheduler):
time = Time('2016-08-13 15:00:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation is None
def test_continue_observation(scheduler):
time = Time('2016-08-13 11:00:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation is not None
obs = scheduler.current_observation
time = Time('2016-08-13 13:00:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation == obs
time = Time('2016-08-13 14:30:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation is None
def test_set_observation_then_reset(scheduler):
try:
del os.environ['POCSTIME']
except Exception:
pass
time = Time('2016-08-13 05:00:00')
scheduler.get_observation(time=time)
obs1 = scheduler.current_observation
original_seq_time = obs1.seq_time
# Reset priority
scheduler.observations[obs1.name].priority = 1.0
time = Time('2016-08-13 05:30:00')
scheduler.get_observation(time=time)
obs2 = scheduler.current_observation
assert obs1 != obs2
scheduler.observations[obs1.name].priority = 500.0
time = Time('2016-08-13 06:00:00')
scheduler.get_observation(time=time)
obs3 = scheduler.current_observation
obs3_seq_time = obs3.seq_time
assert original_seq_time != obs3_seq_time
# Now reselect same target and test that seq_time does not change
scheduler.get_observation(time=time)
obs4 = scheduler.current_observation
assert obs4.seq_time == obs3_seq_time
def test_reset_observation(scheduler):
time = Time('2016-08-13 05:00:00')
scheduler.get_observation(time=time)
# We have an observation so we have a seq_time
assert scheduler.current_observation.seq_time is not None
obs = scheduler.current_observation
# Trigger a reset
scheduler.current_observation = None
assert obs.seq_time is None
def test_new_observation_seq_time(scheduler):
time = Time('2016-09-11 07:08:00')
scheduler.get_observation(time=time)
# We have an observation so we have a seq_time
assert scheduler.current_observation.seq_time is not None
# A few hours later
time = Time('2016-09-11 10:30:00')
scheduler.get_observation(time=time)
assert scheduler.current_observation.seq_time is not None
def test_observed_list(scheduler):
assert len(scheduler.observed_list) == 0
time = Time('2016-09-11 07:08:00')
scheduler.get_observation(time=time)
assert len(scheduler.observed_list) == 1
# A few hours later should now be different
time = Time('2016-09-11 10:30:00')
scheduler.get_observation(time=time)
assert len(scheduler.observed_list) == 2
# A few hours later should be the same
time = Time('2016-09-11 14:30:00')
scheduler.get_observation(time=time)
assert len(scheduler.observed_list) == 2
scheduler.reset_observed_list()
assert len(scheduler.observed_list) == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.