code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
''' Copyright (c) 2014 Potential Ventures Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
"""
Drivers for Advanced Microcontroller Bus Architecture
"""
import cocotb
from cocotb.triggers import RisingEdge, ReadOnly, Lock
from cocotb.drivers import BusDriver
from cocotb.result import ReturnValue
from cocotb.binary import BinaryValue
import binascii
import array
class AXIReadError(Exception):
pass
class AXI4LiteMaster(BusDriver):
"""
AXI4-Lite Master
TODO: Kill all pending transactions if reset is asserted...
"""
_signals = ["AWVALID", "AWADDR", "AWREADY", # Write address channel
"WVALID", "WREADY", "WDATA", "WSTRB", # Write data channel
"BVALID", "BREADY", "BRESP", # Write response channel
"ARVALID", "ARADDR", "ARREADY", # Read address channel
"RVALID", "RREADY", "RRESP", "RDATA"] # Read data channel
def __init__(self, entity, name, clock):
BusDriver.__init__(self, entity, name, clock)
# Drive some sensible defaults (setimmediatevalue to avoid x asserts)
self.bus.AWVALID.setimmediatevalue(0)
self.bus.WVALID.setimmediatevalue(0)
self.bus.ARVALID.setimmediatevalue(0)
self.bus.BREADY.setimmediatevalue(1)
self.bus.RREADY.setimmediatevalue(1)
# Mutex for each channel that we master to prevent contention
self.write_address_busy = Lock("%s_wabusy" % name)
self.read_address_busy = Lock("%s_rabusy" % name)
self.write_data_busy = Lock("%s_wbusy" % name)
@cocotb.coroutine
def _send_write_address(self, address, delay=0):
"""
Send the write address, with optional delay
"""
yield self.write_address_busy.acquire()
for cycle in range(delay):
yield RisingEdge(self.clock)
self.bus.AWADDR <= address
self.bus.AWVALID <= 1
while True:
yield ReadOnly()
if self.bus.AWREADY.value:
break
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.AWVALID <= 0
self.write_address_busy.release()
@cocotb.coroutine
def _send_write_data(self, data, delay=0, byte_enable=0xF):
"""
Send the write address, with optional delay
"""
yield self.write_data_busy.acquire()
for cycle in range(delay):
yield RisingEdge(self.clock)
self.bus.WDATA <= data
self.bus.WVALID <= 1
self.bus.WSTRB <= byte_enable
while True:
yield ReadOnly()
if self.bus.WREADY.value:
break
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.WVALID <= 0
self.write_data_busy.release()
@cocotb.coroutine
def write(self, address, value, byte_enable=0xf, address_latency=0,
data_latency=0):
"""
Write a value to an address.
The *_latency KWargs allow control over the delta
"""
c_addr = cocotb.fork(self._send_write_address(address,
delay=address_latency))
c_data = cocotb.fork(self._send_write_data(value,
byte_enable=byte_enable,
delay=data_latency))
if c_addr:
yield c_addr.join()
if c_data:
yield c_data.join()
# Wait for the response
while True:
yield ReadOnly()
if self.bus.BVALID.value and self.bus.BREADY.value:
result = self.bus.BRESP.value
break
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
if int(result):
raise AXIReadError("Write to address 0x%08x failed with BRESP: %d"
% (address, int(result)))
raise ReturnValue(result)
@cocotb.coroutine
def read(self, address, sync=True):
"""
Read from an address.
"""
if sync:
yield RisingEdge(self.clock)
self.bus.ARADDR <= address
self.bus.ARVALID <= 1
while True:
yield ReadOnly()
if self.bus.ARREADY.value:
break
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ARVALID <= 0
while True:
yield ReadOnly()
if self.bus.RVALID.value and self.bus.RREADY.value:
data = self.bus.RDATA.value
result = self.bus.RRESP.value
break
yield RisingEdge(self.clock)
if int(result):
raise AXIReadError("Read address 0x%08x failed with RRESP: %d" %
(address, int(result)))
raise ReturnValue(data)
class AXI4Slave(BusDriver):
'''
AXI4 Slave
Monitors an internal memory and handles read and write requests.
'''
_signals = [
"ARREADY", "ARVALID", "ARADDR", # Read address channel
"ARLEN", "ARSIZE", "ARBURST", "ARPROT",
"RREADY", "RVALID", "RDATA", "RLAST", # Read response channel
"AWREADY", "AWADDR", "AWVALID", # Write address channel
"AWPROT", "AWSIZE", "AWBURST", "AWLEN",
"WREADY", "WVALID", "WDATA",
]
# Not currently supported by this driver
_optional_signals = [
"WLAST", "WSTRB",
"BVALID", "BREADY", "BRESP", "RRESP",
"RCOUNT", "WCOUNT", "RACOUNT", "WACOUNT",
"ARLOCK", "AWLOCK", "ARCACHE", "AWCACHE",
"ARQOS", "AWQOS", "ARID", "AWID",
"BID", "RID", "WID"
]
def __init__(self, entity, name, clock, memory, callback=None, event=None,
big_endian=False):
BusDriver.__init__(self, entity, name, clock)
self.clock = clock
self.big_endain = big_endian
self.bus.ARREADY.setimmediatevalue(1)
self.bus.RVALID.setimmediatevalue(0)
self.bus.RLAST.setimmediatevalue(0)
self.bus.AWREADY.setimmediatevalue(1)
self._memory = memory
self.write_address_busy = Lock("%s_wabusy" % name)
self.read_address_busy = Lock("%s_rabusy" % name)
self.write_data_busy = Lock("%s_wbusy" % name)
cocotb.fork(self._read_data())
cocotb.fork(self._write_data())
def _size_to_bytes_in_beat(self, AxSIZE):
if AxSIZE < 7:
return 2 ** AxSIZE
return None
@cocotb.coroutine
def _write_data(self):
clock_re = RisingEdge(self.clock)
while True:
while True:
self.bus.WREADY <= 0
yield ReadOnly()
if self.bus.AWVALID.value:
self.bus.WREADY <= 1
break
yield clock_re
yield ReadOnly()
_awaddr = int(self.bus.AWADDR)
_awlen = int(self.bus.AWLEN)
_awsize = int(self.bus.AWSIZE)
_awburst = int(self.bus.AWBURST)
_awprot = int(self.bus.AWPROT)
burst_length = _awlen + 1
bytes_in_beat = self._size_to_bytes_in_beat(_awsize)
word = BinaryValue(bits=bytes_in_beat*8, bigEndian=self.big_endain)
if __debug__:
self.log.debug(
"AWADDR %d\n" % _awaddr +
"AWLEN %d\n" % _awlen +
"AWSIZE %d\n" % _awsize +
"AWBURST %d\n" % _awburst +
"BURST_LENGTH %d\n" % burst_length +
"Bytes in beat %d\n" % bytes_in_beat)
burst_count = burst_length
yield clock_re
while True:
if self.bus.WVALID.value:
word = self.bus.WDATA.value
word.big_endian = self.big_endain
_burst_diff = burst_length - burst_count
_st = _awaddr + (_burst_diff * bytes_in_beat) # start
_end = _awaddr + ((_burst_diff + 1) * bytes_in_beat) # end
self._memory[_st:_end] = array.array('B', word.get_buff())
burst_count -= 1
if burst_count == 0:
break
yield clock_re
@cocotb.coroutine
def _read_data(self):
clock_re = RisingEdge(self.clock)
while True:
while True:
yield ReadOnly()
if self.bus.ARVALID.value:
break
yield clock_re
yield ReadOnly()
_araddr = int(self.bus.ARADDR)
_arlen = int(self.bus.ARLEN)
_arsize = int(self.bus.ARSIZE)
_arburst = int(self.bus.ARBURST)
_arprot = int(self.bus.ARPROT)
burst_length = _arlen + 1
bytes_in_beat = self._size_to_bytes_in_beat(_arsize)
word = BinaryValue(bits=bytes_in_beat*8, bigEndian=self.big_endain)
if __debug__:
self.log.debug(
"ARADDR %d\n" % _araddr +
"ARLEN %d\n" % _arlen +
"ARSIZE %d\n" % _arsize +
"ARBURST %d\n" % _arburst +
"BURST_LENGTH %d\n" % burst_length +
"Bytes in beat %d\n" % bytes_in_beat)
burst_count = burst_length
yield clock_re
while True:
self.bus.RVALID <= 1
yield ReadOnly()
if self.bus.RREADY.value:
_burst_diff = burst_length - burst_count
_st = _araddr + (_burst_diff * bytes_in_beat)
_end = _araddr + ((_burst_diff + 1) * bytes_in_beat)
word.buff = self._memory[_st:_end].tostring()
self.bus.RDATA <= word
if burst_count == 1:
self.bus.RLAST <= 1
yield clock_re
burst_count -= 1
self.bus.RLAST <= 0
if burst_count == 0:
break
|
stuarthodgson/cocotb
|
cocotb/drivers/amba.py
|
Python
|
bsd-3-clause
| 11,722
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcreditTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
|
psydrake/bicreditsnew
|
qa/rpc-tests/txn_doublespend.py
|
Python
|
mit
| 4,976
|
import smtplib
import argparse
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def main(args):
# Allow HTML-formatted emails (very simplistic atm, should be expanded if used)
msg = MIMEMultipart("alternative")
if args["body"].startswith("<html>", 0, 10):
msg.attach(MIMEText(args["body"],"html"))
else:
msg.attach(MIMEText(args["body"],"plain"))
msg["Subject"] = args["sub"]
msg["From"] = args["from"]
msg["To"] = args["to"]
s = smtplib.SMTP(args["smtp"])
# If authentication is required:
# s.starttls()
# s.login(user, pass)
s.sendmail(args["from"], [args["to"]], msg.as_string())
s.quit()
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Send an email")
p.add_argument("--to", "-t", required=True, help="To address")
p.add_argument("--from", "-f", required=True, help="From address")
p.add_argument("--sub", "-s", required=True, help="Subject")
p.add_argument("--body", "-b", required=True, help="Message body")
p.add_argument("--smtp", default="localhost", help="SMTP server")
args = p.parse_args()
main(vars(args))
|
jgruselius/standalone_scripts
|
bravo_mailer.py
|
Python
|
mit
| 1,107
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, re
import cPickle as pickle
from frappe.utils import cstr
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def make_key(self, key, user=None):
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return "{0}|{1}".format(frappe.conf.db_name, key).encode('utf-8')
def set_value(self, key, val, user=None, expires_in_sec=None):
"""Sets cache value.
:param key: Cache key
:param val: Value to be cached
:param user: Prepends key with User
:param expires_in_sec: Expire value of this key in X seconds
"""
key = self.make_key(key, user)
if not expires_in_sec:
frappe.local.cache[key] = val
try:
if expires_in_sec:
self.setex(key, pickle.dumps(val), expires_in_sec)
else:
self.set(key, pickle.dumps(val))
except redis.exceptions.ConnectionError:
return None
def get_value(self, key, generator=None, user=None, expires=False):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned.
:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
"""
original_key = key
key = self.make_key(key, user)
if key in frappe.local.cache:
val = frappe.local.cache[key]
else:
val = None
try:
val = self.get(key)
except redis.exceptions.ConnectionError:
pass
if val is not None:
val = pickle.loads(val)
if not expires:
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys starting with `key`."""
try:
key = self.make_key(key + "*")
return self.keys(key)
except redis.exceptions.ConnectionError:
regex = re.compile(cstr(key).replace("|", "\|").replace("*", "[\w]*"))
return [k for k in frappe.local.cache.keys() if regex.match(k)]
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
try:
self.delete_value(self.get_keys(key), make_keys=False)
except redis.exceptions.ConnectionError:
pass
def delete_key(self, *args, **kwargs):
self.delete_value(*args, **kwargs)
def delete_value(self, keys, user=None, make_keys=True):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_key(key)
try:
self.delete(key)
except redis.exceptions.ConnectionError:
pass
if key in frappe.local.cache:
del frappe.local.cache[key]
def lpush(self, key, value):
super(redis.Redis, self).lpush(self.make_key(key), value)
def rpush(self, key, value):
super(redis.Redis, self).rpush(self.make_key(key), value)
def lpop(self, key):
return super(redis.Redis, self).lpop(self.make_key(key))
def llen(self, key):
return super(redis.Redis, self).llen(self.make_key(key))
def hset(self, name, key, value):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
frappe.local.cache[name][key] = value
try:
super(redis.Redis, self).hset(self.make_key(name), key, pickle.dumps(value))
except redis.exceptions.ConnectionError:
pass
def hgetall(self, name):
return {key: pickle.loads(value) for key, value in
super(redis.Redis, self).hgetall(self.make_key(name)).iteritems()}
def hget(self, name, key, generator=None):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
if key in frappe.local.cache[name]:
return frappe.local.cache[name][key]
value = None
try:
value = super(redis.Redis, self).hget(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
if value:
value = pickle.loads(value)
frappe.local.cache[name][key] = value
elif generator:
value = generator()
try:
self.hset(name, key, value)
except redis.exceptions.ConnectionError:
pass
return value
def hdel(self, name, key):
if name in frappe.local.cache:
if key in frappe.local.cache[name]:
del frappe.local.cache[name][key]
try:
super(redis.Redis, self).hdel(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
def hdel_keys(self, name_starts_with, key):
"""Delete hash names with wildcard `*` and key"""
for name in frappe.cache().get_keys(name_starts_with):
name = name.split("|", 1)[1]
self.hdel(name, key)
def hkeys(self, name):
try:
return super(redis.Redis, self).hkeys(self.make_key(name))
except redis.exceptions.ConnectionError:
return []
|
aboganas/frappe
|
frappe/utils/redis_wrapper.py
|
Python
|
mit
| 4,952
|
"""rename documentpage
Revision ID: e34d28e9a167
Revises: a5ccf5eaa73f
Create Date: 2016-03-08 10:22:16.063105
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e34d28e9a167'
down_revision = 'a5ccf5eaa73f'
def upgrade():
op.rename_table('page', 'document_page')
def downgrade():
op.rename_table('document_page', 'page')
|
gazeti/aleph
|
aleph/migrate/versions/e34d28e9a167_rename_documentpage.py
|
Python
|
mit
| 365
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# chr(81) is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50 * COIN)
tx2 = create_transaction(tx1, 0, chr(81), 50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16,'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16,'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
kleetus/bitcoin
|
qa/rpc-tests/invalidblockrequest.py
|
Python
|
mit
| 4,242
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
}))
class Log1pFunctionTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
self.ggx = \
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = F.log1p(x)
testing.assert_allclose(
numpy.log1p(self.x), y.data, atol=1e-7, rtol=1e-7)
def test_log1p_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_log1p_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(F.log1p, x_data, y_grad, dtype='d')
def test_log1p_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_log1p_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
F.log1p, x_data, y_grad, x_grad_grad, dtype=numpy.float64)
def test_log1p_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_log1p_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def test_log1p(self):
self.assertEqual(
chainer.functions.math.logarithm_1p.Log1p().label, 'log1p')
testing.run_module(__name__, __file__)
|
rezoo/chainer
|
tests/chainer_tests/functions_tests/math_tests/test_logarithm_1p.py
|
Python
|
mit
| 1,946
|
# This file is part of 'NTLM Authorization Proxy Server' http://sourceforge.net/projects/ntlmaps/
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>.
from U32 import U32
# --NON ASCII COMMENT ELIDED--
#typedef unsigned char des_cblock[8];
#define HDRSIZE 4
def c2l(c):
"char[4] to unsigned long"
l = U32(c[0])
l = l | (U32(c[1]) << 8)
l = l | (U32(c[2]) << 16)
l = l | (U32(c[3]) << 24)
return l
def c2ln(c,l1,l2,n):
"char[n] to two unsigned long???"
c = c + n
l1, l2 = U32(0), U32(0)
f = 0
if n == 8:
l2 = l2 | (U32(c[7]) << 24)
f = 1
if f or (n == 7):
l2 = l2 | (U32(c[6]) << 16)
f = 1
if f or (n == 6):
l2 = l2 | (U32(c[5]) << 8)
f = 1
if f or (n == 5):
l2 = l2 | U32(c[4])
f = 1
if f or (n == 4):
l1 = l1 | (U32(c[3]) << 24)
f = 1
if f or (n == 3):
l1 = l1 | (U32(c[2]) << 16)
f = 1
if f or (n == 2):
l1 = l1 | (U32(c[1]) << 8)
f = 1
if f or (n == 1):
l1 = l1 | U32(c[0])
return (l1, l2)
def l2c(l):
"unsigned long to char[4]"
c = []
c.append(int(l & U32(0xFF)))
c.append(int((l >> 8) & U32(0xFF)))
c.append(int((l >> 16) & U32(0xFF)))
c.append(int((l >> 24) & U32(0xFF)))
return c
def n2l(c, l):
"network to host long"
l = U32(c[0] << 24)
l = l | (U32(c[1]) << 16)
l = l | (U32(c[2]) << 8)
l = l | (U32(c[3]))
return l
def l2n(l, c):
"host to network long"
c = []
c.append(int((l >> 24) & U32(0xFF)))
c.append(int((l >> 16) & U32(0xFF)))
c.append(int((l >> 8) & U32(0xFF)))
c.append(int((l ) & U32(0xFF)))
return c
def l2cn(l1, l2, c, n):
""
for i in range(n): c.append(0x00)
f = 0
if f or (n == 8):
c[7] = int((l2 >> 24) & U32(0xFF))
f = 1
if f or (n == 7):
c[6] = int((l2 >> 16) & U32(0xFF))
f = 1
if f or (n == 6):
c[5] = int((l2 >> 8) & U32(0xFF))
f = 1
if f or (n == 5):
c[4] = int((l2 ) & U32(0xFF))
f = 1
if f or (n == 4):
c[3] = int((l1 >> 24) & U32(0xFF))
f = 1
if f or (n == 3):
c[2] = int((l1 >> 16) & U32(0xFF))
f = 1
if f or (n == 2):
c[1] = int((l1 >> 8) & U32(0xFF))
f = 1
if f or (n == 1):
c[0] = int((l1 ) & U32(0xFF))
f = 1
return c[:n]
# array of data
# static unsigned long des_SPtrans[8][64]={
# static unsigned long des_skb[8][64]={
from des_data import des_SPtrans, des_skb
def D_ENCRYPT(tup, u, t, s):
L, R, S = tup
#print 'LRS1', L, R, S, u, t, '-->',
u = (R ^ s[S])
t = R ^ s[S + 1]
t = ((t >> 4) + (t << 28))
L = L ^ (des_SPtrans[1][int((t ) & U32(0x3f))] | \
des_SPtrans[3][int((t >> 8) & U32(0x3f))] | \
des_SPtrans[5][int((t >> 16) & U32(0x3f))] | \
des_SPtrans[7][int((t >> 24) & U32(0x3f))] | \
des_SPtrans[0][int((u ) & U32(0x3f))] | \
des_SPtrans[2][int((u >> 8) & U32(0x3f))] | \
des_SPtrans[4][int((u >> 16) & U32(0x3f))] | \
des_SPtrans[6][int((u >> 24) & U32(0x3f))])
#print 'LRS:', L, R, S, u, t
return ((L, R, S), u, t, s)
def PERM_OP (tup, n, m):
"tup - (a, b, t)"
a, b, t = tup
t = ((a >> n) ^ b) & m
b = b ^ t
a = a ^ (t << n)
return (a, b, t)
def HPERM_OP (tup, n, m):
"tup - (a, t)"
a, t = tup
t = ((a << (16 - n)) ^ a) & m
a = a ^ t ^ (t >> (16 - n))
return (a, t)
shifts2 = [0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0]
class DES:
KeySched = None # des_key_schedule
def __init__(self, key_str):
# key - UChar[8]
key = []
for i in key_str: key.append(ord(i))
#print 'key:', key
self.KeySched = des_set_key(key)
#print 'schedule:', self.KeySched, len(self.KeySched)
def decrypt(self, str):
# block - UChar[]
block = []
for i in str: block.append(ord(i))
#print block
block = des_ecb_encrypt(block, self.KeySched, 0)
res = ''
for i in block: res = res + (chr(i))
return res
def encrypt(self, str):
# block - UChar[]
block = []
for i in str: block.append(ord(i))
block = des_ecb_encrypt(block, self.KeySched, 1)
res = ''
for i in block: res = res + (chr(i))
return res
#------------------------
def des_encript(input, ks, encrypt):
# input - U32[]
# output - U32[]
# ks - des_key_shedule - U32[2][16]
# encrypt - int
# l, r, t, u - U32
# i - int
# s - U32[]
l = input[0]
r = input[1]
t = U32(0)
u = U32(0)
r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0fL))
l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffffL))
r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333L))
l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ffL))
r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555L))
t = (r << 1)|(r >> 31)
r = (l << 1)|(l >> 31)
l = t
s = ks # ???????????????
#print l, r
if(encrypt):
for i in range(0, 32, 4):
rtup, u, t, s = D_ENCRYPT((l, r, i + 0), u, t, s)
l = rtup[0]
r = rtup[1]
rtup, u, t, s = D_ENCRYPT((r, l, i + 2), u, t, s)
r = rtup[0]
l = rtup[1]
else:
for i in range(30, 0, -4):
rtup, u, t, s = D_ENCRYPT((l, r, i - 0), u, t, s)
l = rtup[0]
r = rtup[1]
rtup, u, t, s = D_ENCRYPT((r, l, i - 2), u, t, s)
r = rtup[0]
l = rtup[1]
#print l, r
l = (l >> 1)|(l << 31)
r = (r >> 1)|(r << 31)
r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555L))
l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ffL))
r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333L))
l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffffL))
r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0fL))
output = [l]
output.append(r)
l, r, t, u = U32(0), U32(0), U32(0), U32(0)
return output
def des_ecb_encrypt(input, ks, encrypt):
# input - des_cblock - UChar[8]
# output - des_cblock - UChar[8]
# ks - des_key_shedule - U32[2][16]
# encrypt - int
#print input
l0 = c2l(input[0:4])
l1 = c2l(input[4:8])
ll = [l0]
ll.append(l1)
#print ll
ll = des_encript(ll, ks, encrypt)
#print ll
l0 = ll[0]
l1 = ll[1]
output = l2c(l0)
output = output + l2c(l1)
#print output
l0, l1, ll[0], ll[1] = U32(0), U32(0), U32(0), U32(0)
return output
def des_set_key(key):
# key - des_cblock - UChar[8]
# schedule - des_key_schedule
# register unsigned long c,d,t,s;
# register unsigned char *in;
# register unsigned long *k;
# register int i;
#k = schedule
# in = key
k = []
c = c2l(key[0:4])
d = c2l(key[4:8])
t = U32(0)
d, c, t = PERM_OP((d, c, t), 4, U32(0x0f0f0f0fL))
c, t = HPERM_OP((c, t), -2, U32(0xcccc0000L))
d, t = HPERM_OP((d, t), -2, U32(0xcccc0000L))
d, c, t = PERM_OP((d, c, t), 1, U32(0x55555555L))
c, d, t = PERM_OP((c, d, t), 8, U32(0x00ff00ffL))
d, c, t = PERM_OP((d, c, t), 1, U32(0x55555555L))
d = (((d & U32(0x000000ffL)) << 16)|(d & U32(0x0000ff00L))|((d & U32(0x00ff0000L)) >> 16)|((c & U32(0xf0000000L)) >> 4))
c = c & U32(0x0fffffffL)
for i in range(16):
if (shifts2[i]):
c = ((c >> 2)|(c << 26))
d = ((d >> 2)|(d << 26))
else:
c = ((c >> 1)|(c << 27))
d = ((d >> 1)|(d << 27))
c = c & U32(0x0fffffffL)
d = d & U32(0x0fffffffL)
s= des_skb[0][int((c ) & U32(0x3f))]|\
des_skb[1][int(((c>> 6) & U32(0x03))|((c>> 7) & U32(0x3c)))]|\
des_skb[2][int(((c>>13) & U32(0x0f))|((c>>14) & U32(0x30)))]|\
des_skb[3][int(((c>>20) & U32(0x01))|((c>>21) & U32(0x06)) | ((c>>22) & U32(0x38)))]
t= des_skb[4][int((d ) & U32(0x3f) )]|\
des_skb[5][int(((d>> 7) & U32(0x03))|((d>> 8) & U32(0x3c)))]|\
des_skb[6][int((d>>15) & U32(0x3f) )]|\
des_skb[7][int(((d>>21) & U32(0x0f))|((d>>22) & U32(0x30)))]
#print s, t
k.append(((t << 16)|(s & U32(0x0000ffffL))) & U32(0xffffffffL))
s = ((s >> 16)|(t & U32(0xffff0000L)))
s = (s << 4)|(s >> 28)
k.append(s & U32(0xffffffffL))
schedule = k
return schedule
|
listyque/TACTIC-Handler
|
thlib/side/ntlm/des_c.py
|
Python
|
epl-1.0
| 9,208
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2021 IBM
# Author: Nageswara R Sastry <rnsastry@linux.vnet.ibm.com>
import os
from avocado import Test
from avocado.utils import archive, process
from avocado.utils.software_manager import SoftwareManager
class EvmCtl(Test):
"""
evmctl-testsuite
:avocado: tags=security,testsuite
"""
def setUp(self):
'''
Install the basic packages to support evmctl
'''
# Check for basic utilities
smm = SoftwareManager()
deps = ['gcc', 'make']
for package in deps:
if not smm.check_installed(package) and not smm.install(package):
self.cancel('%s is needed for the test to be run' % package)
url = "https://sourceforge.net/projects/linux-ima/files/latest/download"
tarball = self.fetch_asset(name="download.tar.gz", locations=url, expire='7d')
archive.extract(tarball, self.workdir)
self.sourcedir = os.path.join(self.workdir, os.listdir(self.workdir)[0])
self.log.info("sourcedir - %s" % self.sourcedir)
os.chdir(self.sourcedir)
process.run('./autogen.sh', ignore_status=True)
def test(self):
'''
Running tests from evmctl
'''
count = 0
output = process.system_output('./build.sh', ignore_status=True).decode()
for line in reversed(output.splitlines()):
if '# FAIL' in line:
count = int(line.split(":")[1].strip())
self.log.info(line)
break
# If the fail count is more than 0 then there are some failed tests
if count:
self.fail("%s test(s) failed, please refer to the log" % count)
|
sacsant/avocado-misc-tests
|
security/evmctl-tests.py
|
Python
|
gpl-2.0
| 2,182
|
"""Plugins that are not OS-specific"""
# pylint: disable=unused-import
from rekall.plugins.common import address_resolver
from rekall.plugins.common import api
from rekall.plugins.common import bovine
from rekall.plugins.common import efilter_plugins
from rekall.plugins.common import inspection
from rekall.plugins.common import memmap
from rekall.plugins.common import profile_index
from rekall.plugins.common import scanners
from rekall.plugins.common import sigscan
|
dsweet04/rekall
|
rekall-core/rekall/plugins/common/__init__.py
|
Python
|
gpl-2.0
| 471
|
#!/usr/bin/python
#
# Server that will accept connections from a Vim channel.
# Run this server and then in Vim you can open the channel:
# :let handle = ch_open('localhost:8765')
#
# Then Vim can send requests to the server:
# :let response = ch_sendexpr(handle, 'hello!')
#
# And you can control Vim by typing a JSON message here, e.g.:
# ["ex","echo 'hi there'"]
#
# There is no prompt, just type a line and press Enter.
# To exit cleanly type "quit<Enter>".
#
# See ":help channel-demo" in Vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
thesocket = None
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
global thesocket
thesocket = self.request
while True:
try:
data = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if data == '':
print("=== socket closed ===")
break
print("received: {}".format(data))
try:
decoded = json.loads(data)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
# Negative numbers are used for "eval" responses.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
response = "got it"
else:
response = "what?"
encoded = json.dumps([decoded[0], response])
print("sending {}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
thesocket = None
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 8765
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread: ", server_thread.name)
print("Listening on port {}".format(PORT))
while True:
typed = sys.stdin.readline()
if "quit" in typed:
print("Goodbye!")
break
if thesocket is None:
print("No socket yet")
else:
print("sending {}".format(typed))
thesocket.sendall(typed.encode('utf-8'))
server.shutdown()
server.server_close()
|
kostyakudinov/Prog
|
usr/share/vim/vim74/tools/demoserver.py
|
Python
|
gpl-2.0
| 3,102
|
# -*- coding: utf-8 -*-
#
# test_pp_psc_delta_stdp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
# Moritz Deger, moritz.deger@epfl.ch, Aug 14, 2015
#
#
# Python script to reproduce failure of pp_psc_delta to show spike timing
# dependent plasticity (STDP), as opposed to iaf_psc_delta.
# The problem is probably related to the setting of 'archiver_length'
# (printed at the end of the script)
import nest
import nest.raster_plot
import numpy as np
import pylab
Dt = 1.
nsteps = 100
w_0 = 100.
nest.ResetKernel()
nrn_pre = nest.Create('parrot_neuron')
nrn_post1 = nest.Create('iaf_psc_delta')
nrn_post2 = nest.Create('pp_psc_delta')
nest.Connect(nrn_pre, nrn_post1 + nrn_post2,
syn_spec={'model': 'stdp_synapse', 'weight': w_0})
conn1 = nest.GetConnections(nrn_pre, nrn_post1)
conn2 = nest.GetConnections(nrn_pre, nrn_post2)
sg_pre = nest.Create('spike_generator')
nest.SetStatus(sg_pre, {'spike_times': np.arange(Dt, nsteps * Dt, 10. * Dt)})
nest.Connect(sg_pre, nrn_pre)
mm = nest.Create('multimeter')
nest.SetStatus(mm, {'record_from': ['V_m']})
nest.Connect(mm, nrn_post1 + nrn_post2)
sd = nest.Create('spike_detector')
nest.Connect(nrn_pre + nrn_post1 + nrn_post2, sd)
t = []
w1 = []
w2 = []
t.append(0.)
w1.append(nest.GetStatus(conn1, keys=['weight'])[0][0])
w2.append(nest.GetStatus(conn2, keys=['weight'])[0][0])
for i in xrange(nsteps):
nest.Simulate(Dt)
t.append(i * Dt)
w1.append(nest.GetStatus(conn1, keys=['weight'])[0][0])
w2.append(nest.GetStatus(conn2, keys=['weight'])[0][0])
pylab.figure(1)
pylab.plot(t, w1, 'g', label='iaf_psc_delta, ' + str(nrn_post1[0]))
pylab.plot(t, w2, 'r', label='pp_psc_delta, ' + str(nrn_post2[0]))
pylab.xlabel('time [ms]')
pylab.ylabel('weight [mV]')
pylab.legend(loc='best')
ylims = pylab.ylim()
pylab.ylim(ylims[0] - 5, ylims[1] + 5)
# pylab.savefig('test_pp_psc_delta_stdp_fig1.png')
nest.raster_plot.from_device(sd)
ylims = pylab.ylim()
pylab.ylim(ylims[0] - .5, ylims[1] + .5)
pylab.show()
# pylab.savefig('test_pp_psc_delta_stdp_fig2.png')
print 'Archiver lengths shall be equal:'
for nrn in [nrn_post1, nrn_post2]:
print nest.GetStatus(nrn, keys=['model', 'archiver_length'])[0]
|
HBPNeurorobotics/nest-simulator
|
testsuite/manualtests/test_pp_psc_delta_stdp.py
|
Python
|
gpl-2.0
| 2,827
|
# Copyright (C) 2007 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Carlos Garcia Campos <carlosgc@gsyc.escet.urjc.es>
import re
import time
import datetime
from Parser import Parser
from Repository import Commit, Action, Person
from utils import printout, printdbg
class GitParser(Parser):
class GitCommit:
def __init__(self, commit, parents):
self.commit = commit
self.parents = parents
self.svn_tag = None
def is_my_child(self, git_commit):
return git_commit.parents and self.commit.revision in git_commit.parents
class GitBranch:
(REMOTE, LOCAL, STASH) = range(3)
def __init__(self, type, name, tail):
self.type = type
self.name = name
self.set_tail(tail)
def is_my_parent(self, git_commit):
return git_commit.is_my_child(self.tail)
def is_stash(self):
return self.type == self.STASH
def set_tail(self, tail):
self.tail = tail
self.tail.commit.branch = self.name
patterns = {}
# commit 801c1b2511957ea99308bf0733e695cc78cd4a31 481e028bba471b788bddc53e0b925256c4585295
patterns['commit'] = re.compile("^commit[ \t]+([^ ]+)( ([^\(]+))?( \((.*)\))?$")
# Author: Santiago Duenas <sduenas@bitergia.com>
patterns['author'] = re.compile("^Author:[ \t]+(.*)[ \t]+<(.*)>$")
# AuthorDate: Wed Apr 16 18:44:59 2014 +0200
patterns['author_date'] = re.compile(
"^AuthorDate: (.* [0-9]+ [0-9]+:[0-9]+:[0-9]+ [0-9][0-9][0-9][0-9]) ([+-][0-9][0-9][0-9][0-9])$")
# Commit: Santiago Duenas <sduenas@bitergia.com>
patterns['committer'] = re.compile("^Commit:[ \t]+(.*)[ \t]+<(.*)>$")
# CommitDate: Wed Apr 16 18:44:59 2014 +0200
patterns['date'] = re.compile(
"^CommitDate: (.* [0-9]+ [0-9]+:[0-9]+:[0-9]+ [0-9][0-9][0-9][0-9]) ([+-][0-9][0-9][0-9][0-9])$")
patterns['file'] = re.compile("^([MAD]+)[ \t]+(.*)$")
patterns['file-moved'] = re.compile("^([RC])[0-9]+[ \t]+(.*)[ \t]+(.*)$")
patterns['branch'] = re.compile("refs/remotes/origin/([^,]*)")
patterns['local-branch'] = re.compile("refs/heads/([^,]*)")
patterns['tag'] = re.compile("tag: refs/tags/([^,]*)")
patterns['stash'] = re.compile("refs/stash")
patterns['ignore'] = [re.compile("^Merge: .*$")]
patterns['svn-tag'] = re.compile("^svn path=/tags/(.*)/?; revision=([0-9]+)$")
def __init__(self):
Parser.__init__(self)
self.is_gnome = None
# Parser context
self.commit = None
self.branch = None
self.branches = []
def set_repository(self, repo, uri):
Parser.set_repository(self, repo, uri)
self.is_gnome = re.search("^[a-z]+://(.*@)?git\.gnome\.org/.*$", repo.get_uri()) is not None
def flush(self):
if self.branches:
self.handler.commit(self.branch.tail.commit)
self.branch = None
self.branches = None
def _parse_line(self, line):
if line is None or line == '':
return
# Ignore
for patt in self.patterns['ignore']:
if patt.match(line):
return
# Commit
match = self.patterns['commit'].match(line)
if match:
if self.commit is not None and self.branch is not None:
if self.branch.tail.svn_tag is None: # Skip commits on svn tags
self.handler.commit(self.branch.tail.commit)
self.commit = Commit()
self.commit.revision = match.group(1)
parents = match.group(3)
if parents:
parents = parents.split()
self.commit.parents = parents
git_commit = self.GitCommit(self.commit, parents)
decorate = match.group(5)
branch = None
if decorate:
# Remote branch
m = re.search(self.patterns['branch'], decorate)
if m:
branch = self.GitBranch(self.GitBranch.REMOTE, m.group(1), git_commit)
printdbg("Branch '%s' head at acommit %s", (branch.name, self.commit.revision))
else:
# Local Branch
m = re.search(self.patterns['local-branch'], decorate)
if m:
branch = self.GitBranch(self.GitBranch.LOCAL, m.group(1), git_commit)
printdbg("Commit %s on local branch '%s'", (self.commit.revision, branch.name))
# If local branch was merged we just ignore this decoration
if self.branch and self.branch.is_my_parent(git_commit):
printdbg("Local branch '%s' was merged", (branch.name,))
branch = None
else:
# Stash
m = re.search(self.patterns['stash'], decorate)
if m:
branch = self.GitBranch(self.GitBranch.STASH, "stash", git_commit)
printdbg("Commit %s on stash", (self.commit.revision,))
# Tag
m = re.search(self.patterns['tag'], decorate)
if m:
self.commit.tags = [m.group(1)]
printdbg("Commit %s tagged as '%s'", (self.commit.revision, self.commit.tags[0]))
if not branch and not self.branch:
branch = self.GitBranch(self.GitBranch.LOCAL, "(no-branch)", git_commit)
printdbg("Commit %s on unknown local branch '%s'", (self.commit.revision, branch.name))
# This part of code looks wired at first time so here is a small description what it does:
#
# * self.branch is the branch to which the last inspected commit belonged to
# * branch is the branch of the current parsed commit
#
# This check is only to find branches which are fully merged into a already analyzed branch
#
# For more detailed information see https://github.com/MetricsGrimoire/CVSAnalY/issues/64
if branch is not None and self.branch is not None:
# Detect empty branches.
# Ideally, the head of a branch can't have children.
# When this happens is because the branch is empty, so we just ignore such branch.
if self.branch.is_my_parent(git_commit):
printout(
"Info: Branch '%s' will be ignored, because it was already merged in an active one.",
(branch.name,)
)
branch = None
if len(self.branches) >= 2:
# If current commit is the start point of a new branch
# we have to look at all the current branches since
# we haven't inserted the new branch yet.
# If not, look at all other branches excluding the current one
for i, b in enumerate(self.branches):
if i == 0 and branch is None:
continue
if b.is_my_parent(git_commit):
# We assume current branch is always the last one
# AFAIK there's no way to make sure this is right
printdbg("Start point of branch '%s' at commit %s",
(self.branches[0].name, self.commit.revision))
self.branches.pop(0)
self.branch = b
if self.branch and self.branch.tail.svn_tag is not None and self.branch.is_my_parent(git_commit):
# There's a pending tag in previous commit
pending_tag = self.branch.tail.svn_tag
printdbg("Move pending tag '%s' from previous commit %s to current %s", (pending_tag,
self.branch.tail.commit.revision,
self.commit.revision))
if self.commit.tags and pending_tag not in self.commit.tags:
self.commit.tags.append(pending_tag)
else:
self.commit.tags = [pending_tag]
self.branch.tail.svn_tag = None
if branch is not None:
self.branch = branch
# Insert master always at the end
if branch.name == 'master':
self.branches.append(self.branch)
else:
self.branches.insert(0, self.branch)
else:
if self.branch is not None:
self.branch.set_tail(git_commit)
return
# Committer
match = self.patterns['committer'].match(line)
if match:
self.commit.committer = Person()
self.commit.committer.name = match.group(1)
self.commit.committer.email = match.group(2)
self.handler.committer(self.commit.committer)
return
# Author
match = self.patterns['author'].match(line)
if match:
self.commit.author = Person()
self.commit.author.name = match.group(1)
self.commit.author.email = match.group(2)
self.handler.author(self.commit.author)
return
# Commit date
match = self.patterns['date'].match(line)
if match:
self.commit.date = datetime.datetime(
*(time.strptime(match.group(1).strip(" "), "%a %b %d %H:%M:%S %Y")[0:6]))
# datetime.datetime.strptime not supported by Python2.4
#self.commit.date = datetime.datetime.strptime (match.group (1).strip (" "), "%a %b %d %H:%M:%S %Y")
# match.group(2) represents the timezone. E.g. -0300, +0200, +0430 (Afghanistan)
# This string will be parsed to int and recalculated into seconds (60 * 60)
self.commit.date_tz = (((int(match.group(2))) * 60 * 60) / 100)
return
# Author date
match = self.patterns['author_date'].match(line)
if match:
self.commit.author_date = datetime.datetime(
*(time.strptime(match.group(1).strip(" "), "%a %b %d %H:%M:%S %Y")[0:6]))
# datetime.datetime.strptime not supported by Python2.4
#self.commit.author_date = datetime.datetime.strptime (match.group (1).strip (" "), "%a %b %d %H:%M:%S %Y")
# match.group(2) represents the timezone. E.g. -0300, +0200, +0430 (Afghanistan)
# This string will be parsed to int and recalculated into seconds (60 * 60)
self.commit.author_date_tz = (((int(match.group(2))) * 60 * 60) / 100)
return
# File
match = self.patterns['file'].match(line)
if match:
action = Action()
type = match.group(1)
if len(type) > 1:
# merge actions
if 'M' in type:
type = 'M'
else:
# ignore merge actions without 'M'
return
action.type = type
action.f1 = match.group(2)
self.commit.actions.append(action)
self.handler.file(action.f1)
return
# File moved/copied
match = self.patterns['file-moved'].match(line)
if match:
action = Action()
type = match.group(1)
if type == 'R':
action.type = 'V'
else:
action.type = type
action.f1 = match.group(3)
action.f2 = match.group(2)
action.rev = self.commit.revision
self.commit.actions.append(action)
self.handler.file(action.f1)
return
# This is a workaround for a bug in the GNOME Git migration
# There are commits on tags not correctly detected like this one:
# http://git.gnome.org/cgit/evolution/commit/?id=b8e52acac2b9fc5414a7795a73c74f7ee4eeb71f
# We want to ignore commits on tags since it doesn't make any sense in Git
if self.is_gnome:
match = self.patterns['svn-tag'].match(line.strip())
if match:
printout("Warning: detected a commit on a svn tag: %s", (match.group(0),))
tag = match.group(1)
if self.commit.tags and tag in self.commit.tags:
# The commit will be ignored, so move the tag
# to the next (previous in history) commit
self.branch.tail.svn_tag = tag
# Message
self.commit.message += line + '\n'
assert True, "Not match for line %s" % (line)
|
beni55/CVSAnalY
|
pycvsanaly2/GitParser.py
|
Python
|
gpl-2.0
| 13,635
|
#! /usr/bin/env python
# This example illustrates how to use non-homogeneous(nonzero)
# Dirichlet boundary conditions.
#
# PDE: Poisson equation -Laplace u = CONST_F, where CONST_F is
# a constant right-hand side. It is not difficult to see that
# the function u(x,y) = (-CONST_F/4)*(x^2 + y^2) satisfies the
# above PDE. Since also the Dirichlet boundary conditions
# are chosen to match u(x,y), this function is the exact
# solution.
#
# Note that since the exact solution is a quadratic polynomial,
# Hermes will compute it exactly if all mesh elements are quadratic
# or higher (then the exact solution lies in the finite element space).
# If some elements in the mesh are linear, Hermes will only find
# an approximation.
# Import modules
from hermes2d import (Mesh, MeshView, H1Shapeset, PrecalcShapeset, H1Space,
LinSystem, Solution, ScalarView, WeakForm, DummySolver)
from hermes2d.examples.c04 import set_bc
from hermes2d.examples import get_example_mesh
from hermes2d.forms import set_forms
# Below you can play with the parameters CONST_F, P_INIT, and UNIFORM_REF_LEVEL.
INIT_REF_NUM = 2 # number of initial uniform mesh refinements
P_INIT = 2 # initial polynomial degree in all elements
# Load the mesh file
mesh = Mesh()
mesh.load(get_example_mesh())
# Perform initial mesh refinements
for i in range(INIT_REF_NUM):
mesh.refine_all_elements()
# Create an H1 space with default shapeset
space = H1Space(mesh, P_INIT)
set_bc(space)
# Initialize the weak formulation
wf = WeakForm()
set_forms(wf)
# Initialize the linear system
ls = LinSystem(wf)
ls.set_spaces(space)
# Assemble and solve the matrix problem
sln = Solution()
ls.assemble()
ls.solve_system(sln)
# Visualize the solution
sln.plot()
# Visualize the mesh
mesh.plot(space=space)
|
davidquantum/hermes2d
|
python/examples/04.py
|
Python
|
gpl-2.0
| 1,796
|
"""Editor window that can serve as an output file.
"""
import re
from tkinter import messagebox
from idlelib.editor import EditorWindow
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def compile_progs():
"Compile the patterns for matching to file name and line number."
global file_line_progs
file_line_progs = [re.compile(pat, re.IGNORECASE)
for pat in file_line_pats]
def file_line_helper(line):
"""Extract file name and line number from line of text.
Check if line of text contains one of the file/line patterns.
If it does and if the file and line are valid, return
a tuple of the file name and line number. If it doesn't match
or if the file or line is invalid, return None.
"""
if not file_line_progs:
compile_progs()
for prog in file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except OSError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
Adds binding to open a file at a line to the text widget.
"""
# Our own right-button menu
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Go to file/line", "<<goto-file-line>>", None),
]
allow_code_context = False
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
"Python source is only part of output: do not colorize."
return False
def short_title(self):
"Customize EditorWindow title."
return "Output"
def maybesave(self):
"Customize EditorWindow to not display save file messagebox."
return 'yes' if self.get_saved() else 'no'
# Act as output file
def write(self, s, tags=(), mark="insert"):
"""Write text to text widget.
The text is inserted at the given index with the provided
tags. The text widget is then scrolled to make it visible
and updated to display it, giving the effect of seeing each
line as it is added.
Args:
s: Text to insert into text widget.
tags: Tuple of tag strings to apply on the insert.
mark: Index for the insert.
Return:
Length of text inserted.
"""
assert isinstance(s, str)
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
return len(s)
def writelines(self, lines):
"Write each item in lines iterable."
for line in lines:
self.write(line)
def flush(self):
"No flushing needed as write() directly writes to widget."
pass
def showerror(self, *args, **kwargs):
messagebox.showerror(*args, **kwargs)
def goto_file_line(self, event=None):
"""Handle request to open file/line.
If the selected or previous line in the output window
contains a file name and line number, then open that file
name in a new window and position on the line number.
Otherwise, display an error messagebox.
"""
line = self.text.get("insert linestart", "insert lineend")
result = file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = file_line_helper(line)
if not result:
self.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
parent=self.text)
return
filename, lineno = result
self.flist.gotofileline(filename, lineno)
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_outwin', verbosity=2, exit=False)
|
bruderstein/PythonScript
|
PythonLib/full/idlelib/outwin.py
|
Python
|
gpl-2.0
| 5,709
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((14490.9, 3029.12, 3060.83), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((15116.6, 3760.52, 2692.79), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((13383.1, 4090.52, 3479.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11225.1, 4429.85, 4322.38), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((10554.9, 4586.88, 4596.63), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((12326, 4639.28, 5766.95), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((11370.4, 6195.88, 6398.83), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((12294.8, 7399.36, 7205.16), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((11975.4, 8902.22, 7425.72), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((11370.5, 10459.7, 8176.74), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((11026.4, 11464.8, 6823.83), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((12450.8, 13012.5, 6529.84), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((13874.2, 14489.8, 6268.22), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((14235.6, 13007.5, 5753.75), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((13215.9, 13710.7, 6892.86), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((11980.5, 13227.6, 7763.68), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((11432, 11904.7, 8032.92), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((10650.9, 10579.8, 8390.35), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((11842.6, 9383.56, 9068.83), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((11007.5, 8508.02, 10007.1), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9961.01, 7958.91, 11407.6), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9762.12, 8128.95, 13085.8), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9105.04, 8055.92, 11818.5), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8098.96, 8945.37, 10136.6), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7113.44, 10486.1, 9008.71), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6624.41, 11283.8, 8528.05), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5149.07, 9466.37, 7162.96), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3504.19, 8941.41, 6449.57), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3197.3, 7852.11, 7001.47), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2170.99, 6197.96, 8273.91), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2865.17, 6382.4, 7807.34), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1678.42, 6425.8, 7006.61), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((135.134, 4970.61, 6205.73), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((571.58, 4380.79, 4957.66), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1541.14, 3448.69, 4309.93), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1849.42, 1776.94, 3839.31), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2383.37, 290.48, 4448.44), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2421.7, 991.476, 5898.51), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1032.67, 1633.14, 5847.07), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1237.16, 3532.79, 5257.35), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((553.737, 3534.85, 4213.84), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1601.81, 4124.33, 4851.28), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2243.52, 3644.67, 4704.5), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1631.44, 3925.35, 4647.59), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1142.92, 5369.1, 5674.5), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2626.92, 7819.11, 5319.34), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3047.34, 9026.17, 3950.98), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3457.58, 9145.33, 2883.25), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2644.32, 8950.16, 1073.97), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((846.027, 9108.82, -893.839), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((-6.51386, 7800.76, -208.332), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((-40.7095, 8772.6, 2535.54), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((421.267, 8145, 2491.85), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((115.381, 6285.36, 2181.62), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((658.606, 4858.23, 1911.62), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2406.42, 4665.42, 2429.8), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models30110.py
|
Python
|
gpl-3.0
| 13,927
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2011 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import decimal
import re
# lame, but it matches at least up to 6 ORDER BY columns
__EXPR = re.compile(r"ORDER BY\s(\w+\.(?P<column_1>\w+)(\s+\w+)?)"
r"(,\s+\w+\.(?P<column_2>\w+)(\s+\w+)?)?"
r"(,\s+\w+\.(?P<column_3>\w+)(\s+\w+)?)?"
r"(,\s+\w+\.(?P<column_4>\w+)(\s+\w+)?)?"
r"(,\s+\w+\.(?P<column_5>\w+)(\s+\w+)?)?"
r"(,\s+\w+\.(?P<column_6>\w+)(\s+\w+)?)?")
VOLTTYPE_NULL = 1
VOLTTYPE_TINYINT = 3 # int8
VOLTTYPE_SMALLINT = 4 # int16
VOLTTYPE_INTEGER = 5 # int32
VOLTTYPE_BIGINT = 6 # int64
VOLTTYPE_FLOAT = 8 # float64
VOLTTYPE_STRING = 9
VOLTTYPE_TIMESTAMP = 11 # 8 byte long
VOLTTYPE_MONEY = 20 # 8 byte long
VOLTTYPE_DECIMAL = 22 # 9 byte long
__NULL = {VOLTTYPE_TINYINT: -128,
VOLTTYPE_SMALLINT: -32768,
VOLTTYPE_INTEGER: -2147483648,
VOLTTYPE_BIGINT: -9223372036854775808,
VOLTTYPE_FLOAT: -1.7E+308}
def normalize_value(v, type):
global __NULL
if type in __NULL and v == __NULL[type]:
return None
elif type == VOLTTYPE_FLOAT:
return round(v, 12)
elif type == VOLTTYPE_DECIMAL:
return decimal.Decimal(v)._rescale(-12, "ROUND_HALF_EVEN")
else:
return v
def normalize_values(tuples, columns):
# 'c' here is a voltdbclient.VoltColumn and
# I assume t is a voltdbclient.VoltTable.
if hasattr(tuples, "__iter__"):
for i in xrange(len(tuples)):
if hasattr(tuples[i], "__iter__"):
normalize_values(tuples[i], columns)
else:
tuples[i] = normalize_value(tuples[i], columns[i].type)
def filter_sorted(row, sorted_cols):
"""Extract the values in the ORDER BY columns from a row.
"""
ret = []
if not sorted_cols:
return ret
for i in sorted_cols:
ret.append(row[i])
return ret
def extract_key(sorted_cols, row):
"""Extract the values in the non-ORDERBY columns from a row.
"""
k = []
for i in xrange(len(row)):
if i not in sorted_cols:
k.append(row[i])
return k
def sort(l, sorted_cols):
"""Two steps:
1. find the subset of rows which have the same values in the ORDER BY
columns.
2. sort them on the rest of the columns.
"""
begin = 0
end = 0 # exclusive
prev = None
key = lambda x: extract_key(sorted_cols, x)
for i in xrange(len(l)):
if not sorted_cols:
l[:] = sorted(l, cmp=cmp, key=key)
return
tmp = filter_sorted(l[i], sorted_cols)
if prev != tmp:
if prev is not None:
end = i
l[begin:end] = sorted(l[begin:end], cmp=cmp, key=key)
prev = tmp
begin = i
l[begin:] = sorted(l[begin:], cmp=cmp, key=key)
def parse_sql(x):
"""Finds if the SQL statement contains ORDER BY command.
"""
global __EXPR
result = __EXPR.search(x)
if result:
return filter(lambda x: x, result.groupdict().values())
else:
return None
def normalize(table, sql):
"""Normalizes the result tuples of ORDER BY statements.
"""
normalize_values(table.tuples, table.columns)
sort_cols = parse_sql(sql)
indices = []
if sort_cols:
for i in xrange(len(table.columns)):
if table.columns[i].name in sort_cols:
indices.append(i)
# Make sure if there is an ORDER BY clause, the order by columns appear in
# the result table. Otherwise all the columns will be sorted by the
# normalizer.
sort(table.tuples, indices)
return table
|
ifcharming/original2.0
|
tests/scripts/examples/sql_coverage/normalizer.py
|
Python
|
gpl-3.0
| 4,812
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsing API Discovery document."""
import mock_api_types
from gcutil_lib import mock_api_types
class Parser(object):
"""Discovery document parser.
Parses discovery document types, resources and methods. Result of parsing is a
dictionary method_id -> method.
"""
__slots__ = ('_discovery_document', '_parsed_schemas', '_parsed_methods',
'_base_url', '_common_parameters')
def __init__(self, doc):
self._discovery_document = doc
self._parsed_schemas = {}
self._parsed_methods = {}
self._base_url = ''
self._common_parameters = {}
def _ParseType(self, discovery_type):
ref = discovery_type.get('$ref')
if ref:
return self._GetSchema(ref)
type_name = discovery_type['type']
if type_name == 'any':
return mock_api_types.AnyType()
elif type_name == 'array':
return mock_api_types.ArrayType(self._ParseType(discovery_type['items']))
elif type_name == 'boolean':
return mock_api_types.BooleanType()
elif type_name == 'integer':
return self._ParseIntegerType(discovery_type)
elif type_name == 'number':
return self._ParseNumberType(discovery_type)
elif type_name == 'object':
return self._ParseObjectType(discovery_type)
elif type_name == 'string':
return self._ParseStringType(discovery_type)
else:
raise ValueError('Unrecognized type {type}'.format(type=type_name))
def _ParseIntegerType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'int32', 'uint32'):
return mock_api_types.IntegerType(value_format or 'int32')
raise ValueError('Invalid integer format {value}'.format(
value=value_format))
def _ParseNumberType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'double', 'float'):
return mock_api_types.NumberType(value_format or 'double')
raise ValueError('Invalid number format {value}'.format(
value=value_format))
def _ParseStringType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'byte', 'date', 'date-time', 'int64', 'uint64'):
return mock_api_types.StringType(value_format)
raise ValueError('Invalid string format {value}'.format(
value=value_format))
def _ParseObjectType(self, discovery_type):
properties, additional = self._ParseProperties(discovery_type)
object_type = mock_api_types.ObjectType()
object_type.Define('', properties, additional)
return object_type
def _ParseSchema(self, discovery_schema):
properties, additional = self._ParseProperties(discovery_schema)
return self._CreateSchema(
discovery_schema.get('id'), properties, additional)
def _ParseProperties(self, discovery_object_type):
"""Parses properties of a discovery document object tyoe."""
assert discovery_object_type.get('type') == 'object'
properties = []
for property_name, property_type in (
discovery_object_type.get('properties', {}).iteritems()):
properties.append(mock_api_types.Property(
property_name, self._ParseType(property_type)))
additional = None
additional_properties = discovery_object_type.get('additionalProperties')
if additional_properties is not None:
additional = self._ParseType(additional_properties)
return properties, additional
def _ParseSchemas(self, discovery_schemas):
for _, discovery_schema in discovery_schemas.iteritems():
self._ParseSchema(discovery_schema)
def _ParseMethods(self, discovery_methods):
for method_name, discovery_method in discovery_methods.iteritems():
self._ParseMethod(method_name, discovery_method)
def _ParseParameter(self, parameter_name, parameter_type):
return mock_api_types.Parameter(
parameter_name, self._ParseType(parameter_type))
def _ParseParameters(self, discovery_method_parameters):
parameters = []
for parameter_name, parameter_type in (
discovery_method_parameters.iteritems()):
parameters.append(
self._ParseParameter(parameter_name, parameter_type))
parameters.sort(key=lambda parameter: parameter.name)
return parameters
def _ParseMethod(self, method_name, discovery_method):
parameters = self._ParseParameters(discovery_method.get('parameters', {}))
# Parse request type
discovery_method_request = discovery_method.get('request')
if discovery_method_request is None:
request_type = None
else:
request_type = self._ParseType(discovery_method_request)
# Parse response type.
discovery_method_response = discovery_method.get('response')
if discovery_method_response is None:
response_type = None
else:
response_type = self._ParseType(discovery_method_response)
return self._CreateMethod(
discovery_method.get('id'), method_name,
discovery_method.get('path', ''), parameters,
request_type, response_type)
def _ParseResources(self, discovery_resources):
for _, discovery_resource in discovery_resources.iteritems():
self._ParseResource(discovery_resource)
# Return all accumulated methods.
return self._parsed_methods
def _ParseResource(self, discovery_resource):
discovery_methods = discovery_resource.get('methods')
if discovery_methods:
self._ParseMethods(discovery_methods)
discovery_resources = discovery_resource.get('resources')
if discovery_resources:
self._ParseResources(discovery_resources)
def _ParseGlobals(self, discovery_document):
self._base_url = discovery_document.get('baseUrl')
self._common_parameters = self._ParseParameters(
discovery_document.get('parameters', {}))
def Parse(self):
self._ParseGlobals(self._discovery_document)
self._ParseSchemas(self._discovery_document.get('schemas'))
return self._ParseResources(self._discovery_document.get('resources'))
def _GetSchema(self, name):
schema = self._parsed_schemas.get(name)
if schema is None:
self._parsed_schemas[name] = schema = mock_api_types.ObjectType()
return schema
def _CreateSchema(self, name, properties, additional):
schema = self._GetSchema(name)
schema.Define(name, properties, additional)
return schema
def _CreateMethod(self, method_id, name, path, parameters, request, response):
if method_id in self._parsed_methods:
raise ValueError('Duplicate method {method}'.format(method=method_id))
all_parameters = dict((p.name, p) for p in self._common_parameters)
all_parameters.update(dict((p.name, p) for p in parameters))
path = self._base_url + path
method = mock_api_types.Method(
method_id, name, path, all_parameters, request, response)
self._parsed_methods[method_id] = method
return method
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/mock_api_parser.py
|
Python
|
gpl-3.0
| 7,467
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_iapp_service
short_description: Manages TCL iApp services on a BIG-IP.
description:
- Manages TCL iApp services on a BIG-IP.
version_added: "2.4"
options:
name:
description:
- The name of the iApp service that you want to deploy.
required: True
template:
description:
- The iApp template from which to instantiate a new service. This
template must exist on your BIG-IP before you can successfully
create a service. This parameter is required if the C(state)
parameter is C(present).
parameters:
description:
- A hash of all the required template variables for the iApp template.
If your parameters are stored in a file (the more common scenario)
it is recommended you use either the `file` or `template` lookups
to supply the expected parameters.
force:
description:
- Forces the updating of an iApp service even if the parameters to the
service have not changed. This option is of particular importance if
the iApp template that underlies the service has been updated in-place.
This option is equivalent to re-configuring the iApp if that template
has changed.
default: False
state:
description:
- When C(present), ensures that the iApp service is created and running.
When C(absent), ensures that the iApp service has been removed.
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the deepdiff Python package on the host. This is as easy as pip
install f5-sdk.
requirements:
- f5-sdk
- deepdiff
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create HTTP iApp service from iApp template
bigip_iapp_service:
name: "foo-service"
template: "f5.http"
parameters: "{{ lookup('file', 'f5.http.parameters.json') }}"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Upgrade foo-service to v1.2.0rc4 of the f5.http template
bigip_iapp_service:
name: "foo-service"
template: "f5.http.v1.2.0rc4"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Configure a service using parameters in YAML
bigip_iapp_service:
name: "tests"
template: "web_frontends"
password: "admin"
server: "{{ inventory_hostname }}"
server_port: "{{ bigip_port }}"
validate_certs: "{{ validate_certs }}"
state: "present"
user: "admin"
parameters:
variables:
- name: "var__vs_address"
value: "1.1.1.1"
- name: "pm__apache_servers_for_http"
value: "2.2.2.1:80"
- name: "pm__apache_servers_for_https"
value: "2.2.2.2:80"
delegate_to: localhost
- name: Re-configure a service whose underlying iApp was updated in place
bigip_iapp_service:
name: "tests"
template: "web_frontends"
password: "admin"
force: yes
server: "{{ inventory_hostname }}"
server_port: "{{ bigip_port }}"
validate_certs: "{{ validate_certs }}"
state: "present"
user: "admin"
parameters:
variables:
- name: "var__vs_address"
value: "1.1.1.1"
- name: "pm__apache_servers_for_http"
value: "2.2.2.1:80"
- name: "pm__apache_servers_for_https"
value: "2.2.2.2:80"
delegate_to: localhost
'''
RETURN = '''
# only common fields returned
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iteritems,
iControlUnexpectedHTTPError
)
from deepdiff import DeepDiff
class Parameters(AnsibleF5Parameters):
returnables = []
api_attributes = [
'tables', 'variables', 'template', 'lists'
]
updatables = ['tables', 'variables', 'lists']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def tables(self):
result = []
if not self._values['tables']:
return None
tables = self._values['tables']
for table in tables:
tmp = dict()
name = table.get('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.get('columnNames', None)
if columns:
tmp['columnNames'] = [str(x) for x in columns]
# You cannot have rows without columns
rows = table.get('rows', None)
if rows:
tmp['rows'] = []
for row in rows:
tmp['rows'].append(dict(row=[str(x) for x in row['row']]))
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@tables.setter
def tables(self, value):
self._values['tables'] = value
@property
def variables(self):
result = []
if not self._values['variables']:
return None
variables = self._values['variables']
for variable in variables:
tmp = dict((str(k), str(v)) for k, v in iteritems(variable))
if 'encrypted' not in tmp:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' not in tmp:
tmp['value'] = ''
# This seems to happen only on 12.0.0
elif tmp['value'] == 'none':
tmp['value'] = ''
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@variables.setter
def variables(self, value):
self._values['variables'] = value
@property
def lists(self):
result = []
if not self._values['lists']:
return None
lists = self._values['lists']
for list in lists:
tmp = dict((str(k), str(v)) for k, v in iteritems(list) if k != 'value')
if 'encrypted' not in list:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' in list:
if len(list['value']) > 0:
# BIG-IP removes empty values entries, so mimic this behavior
# for user-supplied values.
tmp['value'] = [str(x) for x in list['value']]
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@lists.setter
def lists(self, value):
self._values['lists'] = value
@property
def parameters(self):
return dict(
tables=self.tables,
variables=self.variables,
lists=self.lists
)
@parameters.setter
def parameters(self, value):
if value is None:
return
if 'tables' in value:
self.tables = value['tables']
if 'variables' in value:
self.variables = value['variables']
if 'lists' in value:
self.lists = value['lists']
@property
def template(self):
if self._values['template'] is None:
return None
if self._values['template'].startswith("/" + self.partition):
return self._values['template']
elif self._values['template'].startswith("/"):
return self._values['template']
else:
return '/{0}/{1}'.format(
self.partition, self._values['template']
)
@template.setter
def template(self, value):
self._values['template'] = value
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = str(DeepDiff(attr1, attr2))
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.tm.sys.application.services.service.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update() and not self.want.force:
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
params['execute-action'] = 'definition'
resource = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
)
resource.update(**params)
def read_current_from_device(self):
result = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
).to_dict()
result.pop('_meta_data', None)
return Parameters(result)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.sys.application.services.service.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp service")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.services.service.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
template=dict(),
parameters=dict(
type='dict'
),
state=dict(
default='present',
choices=['absent', 'present']
),
force=dict(
default=False,
type='bool'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
rcarrillocruz/ansible
|
lib/ansible/modules/network/f5/bigip_iapp_service.py
|
Python
|
gpl-3.0
| 14,587
|
import pygame
from pygame import gfxdraw
from .rangable import Rangable
import random
class Pizza(Rangable):
"""docstring for Pizza"""
def __init__(self, context):
Rangable.__init__(self)
self.context = context
self.pizza = self.context.plain_pizza
self.trashed = False
self.perfected = False
self.trashing = False
self.trash_can = None
self.trash_pos = None
self.slices = None
self.offset = random.randint(0,4)
self.color=(0,0,0)
self.x = 100
self.y = 400 # 5=> margin between top and pizza
self.location = (self.x,self.y)
self.width = 150
self.height = 150
self.toppings = [0, 0, 0, 0]
self.requirements = []
self.potentalClues = []
self.drawing = None
self.draw()
"""
update the button drawing surface.
"""
def draw(self):
surf = pygame.Surface((self.width, self.height), pygame.SRCALPHA)
pizza_img = pygame.transform.scale(self.context.plain_pizza, (self.width, self.height))
surf.blit(pizza_img, (0,0))
for i in range(0, len(self.toppings)):
if self.toppings[i] > 0:
self.drawTopping(surf, i, 0)
#gfxdraw.filled_ellipse(surf, self.width//2,self.height//2, self.width/2, self.height/2, (219,162,74))#pizza
#pygame.draw.arc(surf, (225,216,0), [0, 0, self.width, self.height], 0, 360, 2)#crust
#draw slices on here afterwards
self.drawing = surf
self.dirty = False
"""
draw on a surface
"""
def drawOn(self, screen=None):
S = 8 #speed towards trash can
A = 9.8 #acceleration towards trash can
if self.trashing:
if self.touches(self.trash_can):
self.trashed = True
self.trashing = False
else:
self.setLocation(self.trash_pos[0] + 50, self.y + ((S)*A) )
if screen:
if self.dirty:
self.draw()
screen.blit(self.drawing, self.location)
else:
print("Error: drawOn was called on Button object but no screen argument was passed")
"""
return topping drawing
"""
def drawTopping(self, surf, i, pad=0):
#needs serious refactoring
topping_img = pygame.transform.scale(self.context.game_toppings[i], (self.width/4, self.height/4))
if self.context.difficulty == "Advanced":
amount = self.context.fractions[self.toppings[i]]
else:
amount = self.toppings[i]
#center portion
surf.blit(topping_img, ( (surf.get_width()/2) - (topping_img.get_width()/2), (surf.get_height()/2) - (topping_img.get_height()/2)))
#top portion
w,h = (surf.get_width()/6) + pad, surf.get_height()/6
if amount > 0:
surf.blit( pygame.transform.rotate(topping_img, 45), ( w, h ))
if amount > 0.25:
surf.blit( pygame.transform.rotate(topping_img, 45), ( 3*w , h ))
#bottom portion
if amount > 0.5:
surf.blit( pygame.transform.rotate(topping_img, 45), ( w, 3*h ))
if amount > 0.75:
surf.blit( pygame.transform.rotate(topping_img, 45), ( 3*w , 3*h ))
return surf
"""
draw on a surface
"""
def moveToTrash(self, trash_pos=None, trash_can=None):
if not(self.trashing or self.trashed):
if trash_pos and trash_can:
self.trash_pos = trash_pos
self.trash_can = pygame.Rect((trash_pos[0], trash_pos[1]+self.height), (trash_can.get_width(), trash_can.get_height()))
self.trashing = True
self.setLocation(trash_pos[0] + 50, 200)
else:
print("Error: expected a trash_pos, trash_can got {}, {}".format(trash_pos, trash_can))
"""
Add topping
"""
def addTopping(self, index):
if self.toppings[index] == 0:
self.toppings[index] = 1
else:
self.toppings[index] = 0
self.dirty = True
"""
Change Topping
"""
def changeTopping(self, index, amount):
self.toppings[index] = amount
self.dirty = True
"""
set Costumer hidden Pizza requirements
"""
def setRequirements(self, requirements):
self.requirements = requirements
"""
Checks if Pizza meets customer requirements.
Currently only support topping requirements
returns a tuple, boolean indicating whether it met the requirement
or not. (Boolean, Message)
"""
def checkRequirements(self):
if self.context.difficulty == "Easy":
message = []
metRequirement = False
notwanted = 0
missing = 0
for i in range(0, len(self.toppings)):
if self.toppings[i] > 0 and self.requirements[i] == 0:
notwanted += 1
elif self.toppings[i] == 0 and self.requirements[i] > 0:
missing += 1
if missing > 0:
message += ["There aren't enough toppings on the pizza. :(".format(notwanted)]
elif missing < 0:
message += ["There are more toppings on the pizza than I wanted. :(".format(notwanted)]
if notwanted > 0:
message += ["There {} {} {} on the pizza I don't like. :(".format(
'is' if notwanted == 1 else 'are', notwanted, 'topping' if notwanted == 1 else 'toppings'
)]
if not(notwanted) and missing == 0:
metRequirement = True
message += ["Thank you, that was the perfect pizza I was looking for! :)\n"]
return (metRequirement, message)
elif self.context.difficulty == "Advanced":
metRequirement = True
messages = []
names = ["Cheese", "Pepperoni", "Mushroom", "Pineapple"]
# calculate full pizza requirements
totalRequirements = [0 for i in range(0, len(self.toppings))]
for arr in self.requirements:
for i in range(0, len(arr)):
totalRequirements[i] += arr[i]
# check if pizza matches requirements
for i in range(0, len(self.toppings)):
topping = self.context.fractions[self.toppings[i]]
if topping > totalRequirements[i] or topping < totalRequirements[i]:
metRequirement = False
# set up person-specific messages
for personPreference in self.requirements:
message = []
notwanted = 0
missing = 0
for i in range(0, len(self.toppings)):
toppingAmount = self.context.fractions[self.toppings[i]]
if personPreference[i] == 0 and toppingAmount > totalRequirements[i]:
notwanted += 1
elif personPreference[i] > 0 and toppingAmount < totalRequirements[i]:
missing += 1
if notwanted == 1:
message += ["I want less of one topping"]
elif notwanted > 1:
message += ["I want less of {} toppings".format(notwanted)]
if missing == 1:
message += ["I want more of one topping"]
elif missing > 1:
message += ["I want more of {} toppings".format(missing)]
messages.append(message)
# Unique person messages
personSpecificMessages = []
# Wrong / correct pizza
if metRequirement:
personSpecificMessages.append(["The is the correct pizza!"])
else:
personSpecificMessages.append(["This is not the pizza I want."])
# Gather some potental 'simple' clues
potentialCluesMuch = []
potentialCluesLittle = []
for i in range(0, len(self.toppings)):
guessAmount = self.context.fractions[self.toppings[i]]
correctAmount = totalRequirements[i]
if guessAmount > correctAmount:
potentialCluesMuch.append(["Too much {} ".format(names[i])])
elif guessAmount < correctAmount:
potentialCluesLittle.append(["Too little {} ".format(names[i])])
# Back up for the 'simple clues'
if len(potentialCluesMuch) == 0:
for i in range(0, len(self.toppings)):
guessAmount = self.context.fractions[self.toppings[i]]
correctAmount = totalRequirements[i]
if guessAmount == correctAmount:
potentialCluesMuch.append(["The {} is just right".format(names[i])])
if len(potentialCluesLittle) == 0:
for i in range(0, len(self.toppings)):
guessAmount = self.context.fractions[self.toppings[i]]
correctAmount = totalRequirements[i]
if guessAmount == correctAmount:
potentialCluesLittle.append(["The {} is just right".format(names[i])])
# To much of a topping
if len(potentialCluesMuch) == 0:
personSpecificMessages.append(["Looks fine to me"])
else:
msg = potentialCluesMuch[random.randint(1,len(potentialCluesMuch)) - 1]
personSpecificMessages.append(msg)
# To little of a topping
if len(potentialCluesLittle) == 0:
personSpecificMessages.append(["Looks fine to me"])
else:
msg = potentialCluesLittle[random.randint(1,len(potentialCluesLittle)) - 1]
personSpecificMessages.append(msg)
self.generateClues(names)
# Random clue as the final person
if len(self.potentalClues) == 0:
personSpecificMessages.append(["Looks fine to me"])
else:
personSpecificMessages.append(self.potentalClues[random.randint(1,len(self.potentalClues)) - 1])
formattedMessages = [[] for i in range(0, 4)]
for i in range(0, len(personSpecificMessages)):
for j in range(0, len(personSpecificMessages[i])):
strArray = self.formatString(personSpecificMessages[i][j], 22)
formattedMessages[i] += strArray
# return (metRequirement, messages[0], messages[1], messages[2], messages[3])
return (metRequirement, formattedMessages[0], formattedMessages[1], formattedMessages[2], formattedMessages[3])
def formatString(self, msg, lineLength):
strArray = [];
#keep adding snippets as long as there is more to add
while len(msg) > lineLength:
#get space closest to end of line
index = lineLength
while index > 0 and msg[index] != " ":
index = index - 1
if index == 0:
index = lineLength
strArray += [msg[:index]]
msg = msg[index+1:]
#add remainder of message
strArray += [msg]
return strArray
"""
draw on a surface
"""
def setPerfect(self):
self.perfected = True
"""
x,y are the center points of the text.
"""
def setLocation(self, x, y):
self.x = x
self.y = y
self.location = (x, y)
"""
Logic for generating clues
"""
def generateClues(self, names):
self.potentalClues = []
# calculate full pizza requirements
totalRequirements = [0 for i in range(0, len(self.toppings))]
for arr in self.requirements:
for i in range(0, len(arr)):
totalRequirements[i] += arr[i]
for i in range(0, len(totalRequirements)):
print(totalRequirements[i])
# Same as
for i in range(0, len(self.toppings) - 1):
for j in range(i+1, len(self.toppings)):
if totalRequirements[i] == totalRequirements[j]:
self.potentalClues.append(["I want the same {} as {}".format(names[i], names[j])])
# Double
for i in range(0, len(self.toppings) - 1):
for j in range(i+1, len(self.toppings)):
if totalRequirements[i] == 2 * totalRequirements[j] and totalRequirements[j] != 0:
self.potentalClues.append(["I want twice the {} as {}".format(names[i], names[j])])
if totalRequirements[j] == 2 * totalRequirements[i] and totalRequirements[i] != 0:
self.potentalClues.append(["I want twice the {} as {}".format(names[j], names[i])])
# Tripple
for i in range(0, len(self.toppings) - 1):
for j in range(i+1, len(self.toppings)):
if totalRequirements[i] == 3 * totalRequirements[j] and totalRequirements[j] != 0:
self.potentalClues.append(["I want triple the {} as {}".format(names[i], names[j])])
if totalRequirements[j] == 3 * totalRequirements[i] and totalRequirements[i] != 0:
self.potentalClues.append(["I want triple the {} as {}".format(names[j], names[i])])
# As much as others
for i in range(0, len(self.toppings)):
total = 0.0
for j in range(0, len(self.toppings)):
if i != j:
total += self.toppings[j]
if self.toppings[i] == total:
self.potentalClues.append(["I want as much {} as everything else combined".format(names[i])])
|
FOSSRIT/PyCut
|
game/objects/pizza.py
|
Python
|
mpl-2.0
| 13,934
|
#!/usr/bin/python
import sys
file = sys.argv[1]
f = open(file)
print '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude"
py:strip="">
'''
try:
for lang in f:
lang = lang.strip()
if lang and not lang.startswith('#'):
print ' <option value="' + lang + '" py:attrs="{\'selected\': lang == \'' + lang + '\' and \'selected\' or None}">' + lang + '</option>'
finally:
f.close()
print '''</html>
'''
|
shreyankg/Dorrie
|
mckup/build/translations.py
|
Python
|
agpl-3.0
| 628
|
#!/usr/bin/env @python@
# ROOT command line tools module: cmdLineUtils
# Author: Julien Ripoche
# Mail: julien.ripoche@u-psud.fr
# Date: 20/08/15
"""Contain utils for ROOT command line tools"""
##########
# Stream redirect functions
# The original code of the these functions can be found here :
# http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
# Thanks J.F. Sebastian !!
from contextlib import contextmanager
import os
import sys
def fileno(file_or_fd):
"""
Look for 'fileno' attribute.
"""
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def streamRedirected(source=sys.stdout, destination=os.devnull):
"""
Redirect the output from source to destination.
"""
stdout_fd = fileno(source)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
source.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(destination), stdout_fd) # $ exec >&destination
except ValueError: # filename
with open(destination, 'wb') as destination_file:
os.dup2(destination_file.fileno(), stdout_fd) # $ exec > destination
try:
yield source # allow code to be run with the redirected stream
finally:
# restore source to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
source.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def stdoutRedirected():
"""
Redirect the output from sys.stdout to os.devnull.
"""
return streamRedirected(sys.stdout, os.devnull)
def stderrRedirected():
"""
Redirect the output from sys.stderr to os.devnull.
"""
return streamRedirected(sys.stderr, os.devnull)
# The end of streamRedirected functions
##########
##########
# Imports
##
# redirect output (escape characters during ROOT importation...)
# The gymnastic with sys argv is necessary to workaround for ROOT-7577
argvTmp = sys.argv[:]
sys.argv = []
with stdoutRedirected():
import ROOT
ROOT.gROOT.GetVersion()
sys.argv = argvTmp
import argparse
import glob
import fnmatch
import logging
LOG_FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT)
# The end of imports
##########
##########
# Different functions to get a parser of arguments and options
def _getParser(theHelp, theEpilog):
"""
Get a commandline parser with the defaults of the commandline utils.
"""
return argparse.ArgumentParser(description=theHelp,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = theEpilog)
def getParserSingleFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
source file or not.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='?', help="Input file")
return parser
def getParserFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
list of source files.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='+', help="Input file")
return parser
def getParserSourceDest(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils,
a list of source files and a destination file.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("SOURCE", nargs='+', help="Source file")
parser.add_argument("DEST", help="Destination file")
return parser
# The end of get parser functions
##########
##########
# Several utils
@contextmanager
def _setIgnoreLevel(level):
originalLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = level
yield
ROOT.gErrorIgnoreLevel = originalLevel
def changeDirectory(rootFile,pathSplit):
"""
Change the current directory (ROOT.gDirectory) by the corresponding (rootFile,pathSplit)
"""
rootFile.cd()
for directoryName in pathSplit:
theDir = ROOT.gDirectory.Get(directoryName)
if not theDir:
logging.warning("Directory %s does not exist." %directoryName)
return 1
else:
theDir.cd()
return 0
def createDirectory(rootFile,pathSplit):
"""
Add a directory named 'pathSplit[-1]' in (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0: ROOT.gDirectory.mkdir(pathSplit[-1])
return retcode
def getFromDirectory(objName):
"""
Get the object objName from the current directory
"""
return ROOT.gDirectory.Get(objName)
def isExisting(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), exits
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetListOfKeys().Contains(pathSplit[-1])
def isDirectoryKey(key):
"""
Return True if the object, corresponding to the key, inherits from TDirectory
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TDirectory.Class())
def isTreeKey(key):
"""
Return True if the object, corresponding to the key, inherits from TTree
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TTree.Class())
def getKey(rootFile,pathSplit):
"""
Get the key of the corresponding object (rootFile,pathSplit)
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetKey(pathSplit[-1])
def isDirectory(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TDirectory
"""
if pathSplit == []: return True # the object is the rootFile itself
else: return isDirectoryKey(getKey(rootFile,pathSplit))
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit))
def getKeyList(rootFile,pathSplit):
"""
Get the list of keys of the directory (rootFile,pathSplit),
if (rootFile,pathSplit) is not a directory then get the key in a list
"""
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
return ROOT.gDirectory.GetListOfKeys()
else: return [getKey(rootFile,pathSplit)]
def keyListSort(keyList):
"""
Sort list of keys by their names ignoring the case
"""
keyList.sort(key=lambda x: x.GetName().lower())
def tupleListSort(tupleList):
"""
Sort list of tuples by their first elements ignoring the case
"""
tupleList.sort(key=lambda x: x[0].lower())
def dirListSort(dirList):
"""
Sort list of directories by their names ignoring the case
"""
dirList.sort(key=lambda x: [n.lower() for n in x])
def keyClassSpliter(rootFile,pathSplitList):
"""
Return a list of directories and a list of keys corresponding
to the other objects, for rootLs and rooprint use
"""
keyList = []
dirList = []
for pathSplit in pathSplitList:
if pathSplit == []: dirList.append(pathSplit)
elif isDirectory(rootFile,pathSplit): dirList.append(pathSplit)
else: keyList.append(getKey(rootFile,pathSplit))
keyListSort(keyList)
dirListSort(dirList)
return keyList,dirList
def openROOTFile(fileName, mode="read"):
"""
Open the ROOT file corresponding to fileName in the corresponding mode,
redirecting the output not to see missing dictionnaries
"""
#with stderrRedirected():
with _setIgnoreLevel(ROOT.kError):
theFile = ROOT.TFile.Open(fileName, mode)
if not theFile:
logging.warning("File %s does not exist", fileName)
return theFile
def openROOTFileCompress(fileName, compress, recreate):
"""
Open a ROOT file (like openROOTFile) with the possibility
to change compression settings
"""
if compress != None and os.path.isfile(fileName):
logging.warning("can't change compression settings on existing file")
return None
mode = "recreate" if recreate else "update"
theFile = openROOTFile(fileName, mode)
if compress != None: theFile.SetCompressionSettings(compress)
return theFile
def joinPathSplit(pathSplit):
"""
Join the pathSplit with '/'
"""
return "/".join(pathSplit)
MANY_OCCURENCE_WARNING = "Same name objects aren't supported: '{0}' of '{1}' won't be processed"
def manyOccurenceRemove(pathSplitList,fileName):
"""
Search for double occurence of the same pathSplit and remove them
"""
if len(pathSplitList) > 1:
for n in pathSplitList:
if pathSplitList.count(n) != 1:
logging.warning(MANY_OCCURENCE_WARNING.format(joinPathSplit(n),fileName))
while n in pathSplitList: pathSplitList.remove(n)
def patternToPathSplitList(fileName,pattern):
"""
Get the list of pathSplit of objects in the ROOT file
corresponding to fileName that match with the pattern
"""
# Open ROOT file
rootFile = openROOTFile(fileName)
if not rootFile: return []
# Split pattern avoiding multiple slash problem
patternSplit = [n for n in pattern.split("/") if n != ""]
# Main loop
pathSplitList = [[]]
for patternPiece in patternSplit:
newPathSplitList = []
for pathSplit in pathSplitList:
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
newPathSplitList.extend( \
[pathSplit + [key.GetName()] \
for key in ROOT.gDirectory.GetListOfKeys() \
if fnmatch.fnmatch(key.GetName(),patternPiece)])
pathSplitList = newPathSplitList
# No match
if pathSplitList == []:
logging.warning("can't find {0} in {1}".format(pattern,fileName))
# Same match (remove double occurences from the list)
manyOccurenceRemove(pathSplitList,fileName)
return pathSplitList
def fileNameListMatch(filePattern,wildcards):
"""
Get the list of fileName that match with objPattern
"""
if wildcards: return [os.path.expandvars(os.path.expanduser(i)) for i in glob.iglob(filePattern)]
else: return [os.path.expandvars(os.path.expanduser(filePattern))]
def pathSplitListMatch(fileName,objPattern,wildcards):
"""
Get the list of pathSplit that match with objPattern
"""
if wildcards: return patternToPathSplitList(fileName,objPattern)
else: return [[n for n in objPattern.split("/") if n != ""]]
def patternToFileNameAndPathSplitList(pattern,wildcards = True):
"""
Get the list of tuple containing both :
- ROOT file name
- list of splited path (in the corresponding file) of objects that matche
Use unix wildcards by default
"""
rootFilePattern = "*.root"
rootObjPattern = rootFilePattern+":*"
httpRootFilePattern = "htt*://*.root"
httpRootObjPattern = httpRootFilePattern+":*"
xrootdRootFilePattern = "root://*.root"
xrootdRootObjPattern = xrootdRootFilePattern+":*"
s3RootFilePattern = "s3://*.root"
s3RootObjPattern = s3RootFilePattern+":*"
gsRootFilePattern = "gs://*.root"
gsRootObjPattern = gsRootFilePattern+":*"
rfioRootFilePattern = "rfio://*.root"
rfioRootObjPattern = rfioRootFilePattern+":*"
pcmFilePattern = "*.pcm"
pcmObjPattern = pcmFilePattern+":*"
if fnmatch.fnmatch(pattern,httpRootObjPattern) or \
fnmatch.fnmatch(pattern,xrootdRootObjPattern) or \
fnmatch.fnmatch(pattern,s3RootObjPattern) or \
fnmatch.fnmatch(pattern,gsRootObjPattern) or \
fnmatch.fnmatch(pattern,rfioRootObjPattern):
patternSplit = pattern.rsplit(":", 1)
fileName = patternSplit[0]
objPattern = patternSplit[1]
pathSplitList = pathSplitListMatch(fileName,objPattern,wildcards)
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,httpRootFilePattern) or \
fnmatch.fnmatch(pattern,xrootdRootFilePattern) or \
fnmatch.fnmatch(pattern,s3RootFilePattern) or \
fnmatch.fnmatch(pattern,gsRootFilePattern) or \
fnmatch.fnmatch(pattern,rfioRootFilePattern):
fileName = pattern
pathSplitList = [[]]
return [(fileName,pathSplitList)]
if fnmatch.fnmatch(pattern,rootObjPattern) or \
fnmatch.fnmatch(pattern,pcmObjPattern):
patternSplit = pattern.split(":")
filePattern = patternSplit[0]
objPattern = patternSplit[1]
fileNameList = fileNameListMatch(filePattern,wildcards)
return [(fileName,pathSplitListMatch(fileName,objPattern,wildcards)) for fileName in fileNameList]
if fnmatch.fnmatch(pattern,rootFilePattern) or \
fnmatch.fnmatch(pattern,pcmFilePattern):
filePattern = pattern
fileNameList = fileNameListMatch(filePattern,wildcards)
pathSplitList = [[]]
return [(fileName,pathSplitList) for fileName in fileNameList]
logging.warning("{0}: No such file (or extension not supported)".format(pattern))
return []
# End of utils
##########
##########
# Set of functions to put the arguments in shape
def getArgs(parser):
"""
Get arguments corresponding to parser.
"""
return parser.parse_args()
def getSourceListArgs(parser, wildcards = True):
"""
Create a list of tuples that contain source ROOT file names
and lists of path in these files as well as the original arguments
"""
args = getArgs(parser)
inputFiles = []
try:
inputFiles = args.FILE
except:
inputFiles = args.SOURCE
sourceList = \
[tup for pattern in inputFiles \
for tup in patternToFileNameAndPathSplitList(pattern,wildcards)]
return sourceList, args
def getSourceListOptDict(parser, wildcards = True):
"""
Get the list of tuples and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
if sourceList == []:
logging.error("Input file(s) not found!")
return sourceList, vars(args)
def getSourceDestListOptDict(parser, wildcards = True):
"""
Get the list of tuples of sources, create destination name, destination pathSplit
and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
destList = \
patternToFileNameAndPathSplitList( \
args.DEST,wildcards=False)
if destList != []:
destFileName,destPathSplitList = destList[0]
destPathSplit = destPathSplitList[0]
else:
destFileName = ""
destPathSplit = []
return sourceList, destFileName, destPathSplit, vars(args)
# The end of the set of functions to put the arguments in shape
##########
##########
# Several functions shared by roocp, roomv and roorm
TARGET_ERROR = "target '{0}' is not a directory"
OMITTING_FILE_ERROR = "omitting file '{0}'"
OMITTING_DIRECTORY_ERROR = "omitting directory '{0}'"
OVERWRITE_ERROR = "cannot overwrite non-directory '{0}' with directory '{1}'"
def copyRootObject(sourceFile,sourcePathSplit,destFile,destPathSplit,oneSource,recursive,replace):
"""
Initialize the recursive function 'copyRootObjectRecursive', written to be as unix-like as possible
"""
retcode = 0
isMultipleInput = not (oneSource and sourcePathSplit != [])
recursiveOption = recursive
# Multiple input and unexisting or non-directory destination
# TARGET_ERROR
if isMultipleInput and destPathSplit != [] \
and not (isExisting(destFile,destPathSplit) \
and isDirectory(destFile,destPathSplit)):
logging.warning(TARGET_ERROR.format(destPathSplit[-1]))
retcode += 1
# Entire ROOT file or directory in input omitting "-r" option
# OMITTING_FILE_ERROR or OMITTING_DIRECTORY_ERROR
if not recursiveOption:
if sourcePathSplit == []:
logging.warning(OMITTING_FILE_ERROR.format( \
sourceFile.GetName()))
retcode += 1
elif isDirectory(sourceFile,sourcePathSplit):
logging.warning(OMITTING_DIRECTORY_ERROR.format( \
sourcePathSplit[-1]))
retcode += 1
# Run copyRootObjectRecursive function with the wish
# to follow the unix copy behaviour
if sourcePathSplit == []:
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = ""
if not isMultipleInput and (destPathSplit != [] \
and not isExisting(destFile,destPathSplit)):
setName = destPathSplit[-1]
objectName = sourcePathSplit[-1]
if isDirectory(sourceFile,sourcePathSplit):
if setName != "":
createDirectory(destFile,destPathSplit[:-1]+[setName])
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1]+[setName],replace)
elif isDirectory(destFile,destPathSplit):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
else:
logging.warning(OVERWRITE_ERROR.format( \
destPathSplit[-1],objectName))
retcode += 1
else:
if setName != "":
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
elif isDirectory(destFile,destPathSplit):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = destPathSplit[-1]
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
return retcode
DELETE_ERROR = "object {0} was not existing, so it is not deleted"
def deleteObject(rootFile,pathSplit):
"""
Delete the object 'pathSplit[-1]' from (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0:
fileName = pathSplit[-1]
if isExisting(rootFile,pathSplit):
ROOT.gDirectory.Delete(fileName+";*")
else:
logging.warning(DELETE_ERROR.format(fileName))
retcode += 1
return retcode
def copyRootObjectRecursive(sourceFile,sourcePathSplit,destFile,destPathSplit,replace,setName=""):
"""
Copy objects from a file or directory (sourceFile,sourcePathSplit)
to an other file or directory (destFile,destPathSplit)
- Has the will to be unix-like
- that's a recursive function
- Python adaptation of a root input/output tutorial :
$ROOTSYS/tutorials/io/copyFiles.C
"""
retcode = 0
replaceOption = replace
for key in getKeyList(sourceFile,sourcePathSplit):
objectName = key.GetName()
if isDirectoryKey(key):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode +=copyRootObjectRecursive(sourceFile, \
sourcePathSplit+[objectName], \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
elif isTreeKey(key):
T = key.GetMotherDir().Get(objectName+";"+str(key.GetCycle()))
if replaceOption and isExisting(destFile,destPathSplit+[T.GetName()]):
retcodeTemp = deleteObject(destFile,destPathSplit+[T.GetName()])
if retcodeTemp:
retcode += retcodeTemp
continue
changeDirectory(destFile,destPathSplit)
newT = T.CloneTree(-1,"fast")
if setName != "":
newT.SetName(setName)
newT.Write()
else:
obj = key.ReadObj()
if replaceOption and isExisting(destFile,destPathSplit+[setName]):
changeDirectory(destFile,destPathSplit)
otherObj = getFromDirectory(setName)
if not otherObj == obj:
retcodeTemp = deleteObject(destFile,destPathSplit+[setName])
if retcodeTemp:
retcode += retcodeTemp
continue
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
if setName != "":
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
obj.Delete()
changeDirectory(destFile,destPathSplit)
ROOT.gDirectory.SaveSelf(ROOT.kTRUE)
return retcode
FILE_REMOVE_ERROR = "cannot remove '{0}': Is a ROOT file"
DIRECTORY_REMOVE_ERROR = "cannot remove '{0}': Is a directory"
ASK_FILE_REMOVE = "remove '{0}' ? (y/n) : "
ASK_OBJECT_REMOVE = "remove '{0}' from '{1}' ? (y/n) : "
def deleteRootObject(rootFile, pathSplit, interactive, recursive):
"""
Remove the object (rootFile,pathSplit)
-interactive : prompt before every removal
-recursive : allow directory, and ROOT file, removal
"""
retcode = 0
if not recursive and isDirectory(rootFile,pathSplit):
if pathSplit == []:
logging.warning(FILE_REMOVE_ERROR.format(rootFile.GetName()))
retcode += 1
else:
logging.warning(DIRECTORY_REMOVE_ERROR.format(pathSplit[-1]))
retcode += 1
else:
if interactive:
if pathSplit != []:
answer = raw_input(ASK_OBJECT_REMOVE \
.format("/".join(pathSplit),rootFile.GetName()))
else:
answer = raw_input(ASK_FILE_REMOVE \
.format(rootFile.GetName()))
remove = answer.lower() == 'y'
else:
remove = True
if remove:
if pathSplit != []:
retcode += deleteObject(rootFile,pathSplit)
else:
rootFile.Close()
os.remove(rootFile.GetName())
return retcode
# End of functions shared by roocp, roomv and roorm
##########
##########
# Help strings for ROOT command line tools
# Arguments
SOURCE_HELP = "path of the source."
SOURCES_HELP = "path of the source(s)."
DEST_HELP = "path of the destination."
# Options
COMPRESS_HELP = \
"""change the compression settings of the
destination file (if not already existing)."""
INTERACTIVE_HELP = "prompt before every removal."
RECREATE_HELP = "recreate the destination file."
RECURSIVE_HELP = "recurse inside directories"
REPLACE_HELP = "replace object if already existing"
# End of help strings
##########
##########
# ROOTBROWSE
def _openBrowser(rootFile=None):
browser = ROOT.TBrowser()
if rootFile: rootFile.Browse(browser)
ROOT.PyROOT.TPyROOTApplication.Run(ROOT.gApplication)
def rootBrowse(fileName=None):
if fileName:
rootFile = openROOTFile(fileName)
if not rootFile: return 1
_openBrowser(rootFile)
rootFile.Close()
else:
_openBrowser()
return 0
# End of ROOTBROWSE
##########
##########
# ROOTCP
def _copyObjects(fileName, pathSplitList, destFile, destPathSplit, oneFile, \
recursive, replace):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcode += copyRootObject(rootFile, pathSplit, destFile, destPathSplit, \
oneSource, recursive, replace)
if fileName != destFileName: rootFile.Close()
return retcode
def rootCp(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, recursive=False, replace=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in [n[0] for n in sourceList]:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, recursive, replace)
destFile.Close()
return retcode
# End of ROOTCP
##########
##########
# ROOTEVENTSELECTOR
def _copyTreeSubset(sourceFile,sourcePathSplit,destFile,destPathSplit,firstEvent,lastEvent):
"""Copy a subset of the tree from (sourceFile,sourcePathSplit)
to (destFile,destPathSplit) according to options in optDict"""
retcode = changeDirectory(sourceFile,sourcePathSplit[:-1])
if retcode != 0: return retcode
bigTree = getFromDirectory(sourcePathSplit[-1])
nbrEntries = bigTree.GetEntries()
# changeDirectory for the small tree not to be memory-resident
retcode = changeDirectory(destFile,destPathSplit)
if retcode != 0: return retcode
smallTree = bigTree.CloneTree(0)
if lastEvent == -1:
lastEvent = nbrEntries-1
isNtuple = bigTree.InheritsFrom(ROOT.TNtuple.Class())
for i in range(firstEvent, lastEvent+1):
bigTree.GetEntry(i)
if isNtuple:
super(ROOT.TNtuple,smallTree).Fill()
else:
smallTree.Fill()
smallTree.Write()
return retcode
def _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, first, last):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
for pathSplit in pathSplitList:
if isTree(rootFile,pathSplit):
retcode += _copyTreeSubset(rootFile,pathSplit, \
destFile,destPathSplit,first,last)
if fileName != destFileName: rootFile.Close()
return retcode
def rootEventselector(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, first=0, last=-1):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
# Loop on the root file
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, \
first, last)
destFile.Close()
return retcode
# End of ROOTEVENTSELECTOR
##########
##########
# ROOTLS
# Ansi characters
ANSI_BOLD = "\x1B[1m"
ANSI_BLUE = "\x1B[34m"
ANSI_GREEN = "\x1B[32m"
ANSI_END = "\x1B[0m"
# Needed for column width calculation
ANSI_BOLD_LENGTH = len(ANSI_BOLD+ANSI_END)
ANSI_BLUE_LENGTH = len(ANSI_BLUE+ANSI_END)
ANSI_GREEN_LENGTH = len(ANSI_GREEN+ANSI_END)
# Terminal and platform booleans
IS_TERMINAL = sys.stdout.isatty()
IS_WIN32 = sys.platform == 'win32'
def isSpecial(ansiCode,string):
"""Use ansi code on 'string' if the output is the
terminal of a not Windows platform"""
if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END
else: return string
def write(string,indent=0,end=""):
"""Use sys.stdout.write to write the string with an indentation
equal to indent and specifying the end character"""
sys.stdout.write(" "*indent+string+end)
TREE_TEMPLATE = "{0:{nameWidth}}"+"{1:{titleWidth}}{2:{memoryWidth}}"
def _recursifTreePrinter(tree,indent):
"""Print recursively tree informations"""
listOfBranches = tree.GetListOfBranches()
if len(listOfBranches) > 0: # Width informations
maxCharName = max([len(branch.GetName()) \
for branch in listOfBranches])
maxCharTitle = max([len(branch.GetTitle()) \
for branch in listOfBranches])
dic = { \
"nameWidth":maxCharName+2, \
"titleWidth":maxCharTitle+4, \
"memoryWidth":1}
for branch in listOfBranches: # Print loop
rec = \
[branch.GetName(), \
"\""+branch.GetTitle()+"\"", \
str(branch.GetTotBytes())]
write(TREE_TEMPLATE.format(*rec,**dic),indent,end="\n")
_recursifTreePrinter(branch,indent+2)
def _prepareTime(time):
"""Get time in the proper shape
ex : 174512 for 17h 45m 12s
ex : 094023 for 09h 40m 23s"""
time = str(time)
time = '000000'+time
time = time[len(time)-6:]
return time
MONTH = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun', \
7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
LONG_TEMPLATE = \
isSpecial(ANSI_BOLD,"{0:{classWidth}}")+"{1:{timeWidth}}" + \
"{2:{nameWidth}}{3:{titleWidth}}"
def _rootLsPrintLongLs(keyList,indent,treeListing):
"""Print a list of Tkey in columns
pattern : classname, datetime, name and title"""
if len(keyList) > 0: # Width informations
maxCharClass = max([len(key.GetClassName()) for key in keyList])
maxCharTime = 12
maxCharName = max([len(key.GetName()) for key in keyList])
dic = { \
"classWidth":maxCharClass+2, \
"timeWidth":maxCharTime+2, \
"nameWidth":maxCharName+2, \
"titleWidth":1}
date = ROOT.Long(0)
for key in keyList:
datime = key.GetDatime()
time = datime.GetTime()
date = datime.GetDate()
time = _prepareTime(time)
rec = \
[key.GetClassName(), \
MONTH[int(str(date)[4:6])]+" " +str(date)[6:]+ \
" "+time[:2]+":"+time[2:4], \
key.GetName(), \
"\""+key.GetTitle()+"\""]
write(LONG_TEMPLATE.format(*rec,**dic),indent,end="\n")
if treeListing and isTreeKey(key):
tree = key.ReadObj()
_recursifTreePrinter(tree,indent+2)
##
# The code of the getTerminalSize function can be found here :
# https://gist.github.com/jtriley/1108174
# Thanks jtriley !!
import os
import shlex
import struct
import platform
import subprocess
def getTerminalSize():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
#print "default"
#_get_terminal_size_windows() or _get_terminal_size_tput don't work
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# End of getTerminalSize code
##
def _rootLsPrintSimpleLs(keyList,indent,oneColumn):
"""Print list of strings in columns
- blue for directories
- green for trees"""
# This code is adaptated from the pprint_list function here :
# http://stackoverflow.com/questions/25026556/output-list-like-ls
# Thanks hawkjo !!
if len(keyList) == 0: return
(term_width, term_height) = getTerminalSize()
term_width = term_width - indent
min_chars_between = 2
min_element_width = min( len(key.GetName()) for key in keyList ) \
+ min_chars_between
max_element_width = max( len(key.GetName()) for key in keyList ) \
+ min_chars_between
if max_element_width >= term_width: ncol,col_widths = 1,[1]
else:
# Start with max possible number of columns and reduce until it fits
ncol = 1 if oneColumn else min( len(keyList), term_width / min_element_width )
while True:
col_widths = \
[ max( len(key.GetName()) + min_chars_between \
for j, key in enumerate(keyList) if j % ncol == i ) \
for i in range(ncol) ]
if sum( col_widths ) <= term_width: break
else: ncol -= 1
for i, key in enumerate(keyList):
if i%ncol == 0: write("",indent) # indentation
# Don't add spaces after the last element of the line or of the list
if (i+1)%ncol != 0 and i != len(keyList)-1:
if not IS_TERMINAL: write( \
key.GetName().ljust(col_widths[i%ncol]))
elif isDirectoryKey(keyList[i]): write( \
isSpecial(ANSI_BLUE,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_BLUE_LENGTH))
elif isTreeKey(keyList[i]): write( \
isSpecial(ANSI_GREEN,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_GREEN_LENGTH))
else: write(key.GetName().ljust(col_widths[i%ncol]))
else: # No spaces after the last element of the line or of the list
if not IS_TERMINAL: write(key.GetName())
elif isDirectoryKey(keyList[i]):
write(isSpecial(ANSI_BLUE, key.GetName()))
elif isTreeKey(keyList[i]):
write(isSpecial(ANSI_GREEN, key.GetName()))
else: write(key.GetName())
write('\n')
def _rootLsPrint(keyList, indent, oneColumn, \
longListing, treeListing):
"""Print informations given by keyList with a rootLs
style choosen with the options"""
if longListing or treeListing: \
_rootLsPrintLongLs(keyList, indent, treeListing)
else:
_rootLsPrintSimpleLs(keyList, indent, oneColumn)
def _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing):
retcode = 0
rootFile = openROOTFile(fileName)
if not rootFile: return 1
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
if manySources: write("{0} :".format(fileName)+"\n")
_rootLsPrint(keyList, indent, oneColumn, longListing, treeListing)
# Loop on the directories
manyPathSplits = len(pathSplitList) > 1
indentDir = 2 if manyPathSplits else 0
for pathSplit in dirList:
keyList = getKeyList(rootFile,pathSplit)
keyListSort(keyList)
if manyPathSplits: write("{0} :".format("/".join(pathSplit)),indent,end="\n")
_rootLsPrint(keyList, indent+indentDir, oneColumn, longListing, treeListing)
rootFile.Close()
return retcode
def rootLs(sourceList, oneColumn=False, longListing=False, treeListing=False):
# Check arguments
if sourceList == []: return 1
tupleListSort(sourceList)
# Loop on the ROOT files
retcode = 0
manySources = len(sourceList) > 1
indent = 2 if manySources else 0
for fileName, pathSplitList in sourceList:
retcode += _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing)
return retcode
# End of ROOTLS
##########
##########
# ROOTMKDIR
MKDIR_ERROR = "cannot create directory '{0}'"
def _createDirectories(rootFile,pathSplit,parents):
"""Same behaviour as createDirectory but allows the possibility
to build an whole path recursively with the option \"parents\" """
retcode = 0
lenPathSplit = len(pathSplit)
if lenPathSplit == 0:
pass
elif parents:
for i in range(lenPathSplit):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
retcode += createDirectory(rootFile,currentPathSplit)
else:
doMkdir = True
for i in range(lenPathSplit-1):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
doMkdir = False
break
if doMkdir:
retcode += createDirectory(rootFile,pathSplit)
else:
logging.warning(MKDIR_ERROR.format("/".join(pathSplit)))
retcode += 1
return retcode
def _rootMkdirProcessFile(fileName, pathSplitList, parents):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode+=_createDirectories(rootFile,pathSplit,parents)
rootFile.Close()
return retcode
def rootMkdir(sourceList, parents=False):
# Check arguments
if sourceList == []: return 1
# Loop on the ROOT files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _rootMkdirProcessFile(fileName, pathSplitList, parents)
return retcode
# End of ROOTMKDIR
##########
##########
# ROOTMV
MOVE_ERROR = "error during copy of {0}, it is not removed from {1}"
def _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
oneFile, interactive):
retcode = 0
recursive = True
replace = True
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName,"update") \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcodeTemp = copyRootObject(rootFile,pathSplit, \
destFile,destPathSplit,oneSource,recursive,replace)
if not retcodeTemp:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
else:
logging.warning(MOVE_ERROR.format("/".join(pathSplit),rootFile.GetName()))
retcode += retcodeTemp
if fileName != destFileName: rootFile.Close()
return retcode
def rootMv(sourceList, destFileName, destPathSplit, compress=None, \
interactive=False, recreate=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName,compress,recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _moveObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, interactive)
destFile.Close()
return retcode
# End of ROOTMV
##########
##########
# ROOTPRINT
def _keyListExtended(rootFile,pathSplitList):
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
for pathSplit in dirList: keyList.extend(getKeyList(rootFile,pathSplit))
keyList = [key for key in keyList if not isDirectoryKey(key)]
keyListSort(keyList)
return keyList
def rootPrint(sourceList, directoryOption = None, divideOption = None, drawOption = "", formatOption = None, \
outputOption = None, sizeOption = None, styleOption = None, verboseOption = False):
# Check arguments
if sourceList == []: return 1
tupleListSort(sourceList)
# Don't open windows
ROOT.gROOT.SetBatch()
# (Style option)
if styleOption: ROOT.gInterpreter.ProcessLine(".x {0}".format(styleOption))
# (Verbose option)
if not verboseOption: ROOT.gErrorIgnoreLevel = 9999
# Initialize the canvas (Size option)
if sizeOption:
try:
width,height = sizeOption.split("x")
width = int(width)
height = int(height)
except ValueError:
logging.warning("canvas size is on a wrong format")
return 1
canvas = ROOT.TCanvas("canvas","canvas",width,height)
else:
canvas = ROOT.TCanvas("canvas")
# Divide the canvas (Divide option)
if divideOption:
try:
x,y = divideOption.split(",")
x = int(x)
y = int(y)
except ValueError:
logging.warning("divide is on a wrong format")
return 1
canvas.Divide(x,y)
caseNumber = x*y
# Take the format of the output file (formatOutput option)
if not formatOption and outputOption:
fileName = outputOption
fileFormat = fileName.split(".")[-1]
formatOption = fileFormat
# Use pdf as default format
if not formatOption: formatOption = "pdf"
# Create the output directory (directory option)
if directoryOption:
if not os.path.isdir(os.path.join(os.getcwd(),directoryOption)):
os.mkdir(directoryOption)
# Make the output name, begin to print (output option)
if outputOption:
if formatOption in ['ps','pdf']:
outputFileName = outputOption
if directoryOption: outputFileName = \
directoryOption + "/" + outputFileName
canvas.Print(outputFileName+"[",formatOption)
else:
logging.warning("can't merge pictures, only postscript or pdf files")
return 1
# Loop on the root files
retcode = 0
objDrawnNumber = 0
openRootFiles = []
for fileName, pathSplitList in sourceList:
rootFile = openROOTFile(fileName)
if not rootFile:
retcode += 1
continue
openRootFiles.append(rootFile)
# Fill the key list (almost the same as in rools)
keyList = _keyListExtended(rootFile,pathSplitList)
for key in keyList:
if isTreeKey(key):
pass
else:
if divideOption:
canvas.cd(objDrawnNumber%caseNumber + 1)
objDrawnNumber += 1
obj = key.ReadObj()
obj.Draw(drawOption)
if divideOption:
if objDrawnNumber%caseNumber == 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber)+"."+formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
canvas.Clear()
canvas.Divide(x,y)
else:
if not outputOption:
outputFileName = key.GetName() + "." +formatOption
if directoryOption:
outputFileName = os.path.join( \
directoryOption,outputFileName)
if outputOption or formatOption == 'pdf':
objTitle = "Title:"+key.GetClassName()+" : "+key.GetTitle()
canvas.Print(outputFileName,objTitle)
else:
canvas.Print(outputFileName,formatOption)
# Last page (divideOption)
if divideOption:
if objDrawnNumber%caseNumber != 0:
if not outputOption:
outputFileName = str(objDrawnNumber//caseNumber + 1)+"."+formatOption
if directoryOption:
outputFileName = os.path.join(directoryOption,outputFileName)
canvas.Print(outputFileName,formatOption)
# End to print (output option)
if outputOption:
if not divideOption:
canvas.Print(outputFileName+"]",objTitle)
else:
canvas.Print(outputFileName+"]")
# Close ROOT files
map(lambda rootFile: rootFile.Close(),openRootFiles)
return retcode
# End of ROOTPRINT
##########
##########
# ROOTRM
def _removeObjects(fileName, pathSplitList, interactive=False, recursive=False):
retcode = 0
rootFile = openROOTFile(fileName,"update")
if not rootFile: return 1
for pathSplit in pathSplitList:
retcode += deleteRootObject(rootFile, pathSplit, interactive, recursive)
rootFile.Close()
return retcode
def rootRm(sourceList, interactive=False, recursive=False):
# Check arguments
if sourceList == []: return 1
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _removeObjects(fileName, pathSplitList, interactive, recursive)
return retcode
# End of ROOTRM
##########
|
jrtomps/root
|
main/python/cmdLineUtils.py
|
Python
|
lgpl-2.1
| 47,974
|
"""Parsing and conversion of NTP dates contained in datagrams."""
import datetime
import struct
import time
# 63 zero bits followed by a one in the least signifigant bit is a special
# case meaning "immediately."
IMMEDIATELY = struct.pack('>q', 1)
# From NTP lib.
_SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3])
_NTP_EPOCH = datetime.date(1900, 1, 1)
_NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600
class NtpError(Exception):
"""Base class for ntp module errors."""
def ntp_to_system_time(date):
"""Convert a NTP time to system time.
System time is reprensented by seconds since the epoch in UTC.
"""
return date - _NTP_DELTA
def system_time_to_ntp(date):
"""Convert a system time to a NTP time datagram.
System time is reprensented by seconds since the epoch in UTC.
"""
try:
ntp = date + _NTP_DELTA
except TypeError as ve:
raise NtpError('Invalud date: {}'.format(ve))
num_secs, fraction = str(ntp).split('.')
return struct.pack('>I', int(num_secs)) + struct.pack('>I', int(fraction))
|
emlyn/python-osc
|
pythonosc/parsing/ntp.py
|
Python
|
unlicense
| 1,072
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Keras has undeclared dependency on tensorflow/estimator:estimator_py.
# As long as you depend //third_party/py/tensorflow:tensorflow target
# everything will work as normal.
try:
import tensorflow.python.estimator.keras as keras_lib # pylint: disable=g-import-not-at-top
model_to_estimator = tf_export('keras.estimator.model_to_estimator')(
keras_lib.model_to_estimator)
except Exception: # pylint: disable=broad-except
# pylint: disable=unused-argument
def stub_model_to_estimator(keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None):
raise NotImplementedError(
'tf.keras.estimator.model_to_estimator function not available in your '
'installation.')
# pylint: enable=unused-argument
model_to_estimator = tf_export('keras.estimator.model_to_estimator')(
stub_model_to_estimator)
|
lukeiwanski/tensorflow
|
tensorflow/python/keras/estimator/__init__.py
|
Python
|
apache-2.0
| 1,873
|
#/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
import time
import syndicate.ag.curation.specfile as AG_specfile
import syndicate.ag.curation.crawl as AG_crawl
DRIVER_NAME = "disk"
# list a directory
def disk_listdir( root_dir, dirpath ):
return os.listdir( "/" + os.path.join( root_dir.strip("/"), dirpath.strip("/") ) )
# is this a directory?
def disk_isdir( root_dir, dirpath ):
return os.path.isdir( "/" + os.path.join( root_dir.strip("/"), dirpath.strip("/") ) )
# build a hierarchy, using sensible default callbacks
def build_hierarchy( root_dir, include_cb, disk_specfile_cbs, max_retries=1, num_threads=2, allow_partial_failure=False ):
disk_crawler_cbs = AG_crawl.crawler_callbacks( include_cb=include_cb,
listdir_cb=disk_listdir,
isdir_cb=disk_isdir )
hierarchy = AG_crawl.build_hierarchy( [root_dir] * num_threads, "/", DRIVER_NAME, disk_crawler_cbs, disk_specfile_cbs, allow_partial_failure=allow_partial_failure, max_retries=max_retries )
return hierarchy
|
iychoi/syndicate-core
|
python/syndicate/ag/datasets/disk.py
|
Python
|
apache-2.0
| 1,718
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['filebrowser']
NICE_NAME = "File Browser"
REQUIRES_HADOOP = False
ICON = "/filebrowser/static/art/icon_filebrowser_24.png"
MENU_INDEX = 20
|
2013Commons/HUE-SHARK
|
apps/filebrowser/src/filebrowser/settings.py
|
Python
|
apache-2.0
| 946
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for relu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"fully_quantize": [True, False],
"input_range": [(-8, 8)]
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
min_value, max_value = parameters["input_range"]
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value, max_value)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
arborh/tensorflow
|
tensorflow/lite/testing/op_tests/relu.py
|
Python
|
apache-2.0
| 2,072
|
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import string
import subprocess
import sys
import re
from cli_test_parameters import CLITestParameters
class CLITest:
def __init__(self):
pass
@staticmethod
def check_description(test_case, cli):
parameters = CLITestParameters()
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'description'), cli.get_description())
@staticmethod
def check_curl(test_case, cli, output):
parameters = CLITestParameters()
p = re.compile(r'-u ".*?"\s')
a = p.findall(output)
output = output.replace(a[0], '')
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'curl').encode('utf-8'), output.encode('utf-8'))
@staticmethod
def get_cli_name_from_class(i):
name = i.__class__.__name__
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
cli_name = str.join('-', m)
return cli_name
@staticmethod
def check_cli_help(test_case, cli):
parameters = CLITestParameters()
name = cli.__class__.__name__
expected_output = parameters.get_cli_help(name)
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
command = str.join('-', m)
try:
output = subprocess.check_output([command, '-h'])
test_case.assertEqual(expected_output, output)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
@staticmethod
def get_cli_output(cli, args):
output = None
try:
command = CLITest.get_cli_name_from_class(cli)
args.insert(0, command)
output = subprocess.check_output(args=args)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
return output
@staticmethod
def random_string(n):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
@staticmethod
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
|
jdgwartney/boundary-api-cli
|
tests/unit/boundary/cli_test.py
|
Python
|
apache-2.0
| 2,812
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines some base class related to managing green threads.
"""
from __future__ import absolute_import
import abc
from collections import OrderedDict
import logging
import socket
import time
import traceback
import weakref
import netaddr
import six
from ryu.lib import hub
from ryu.lib import sockopt
from ryu.lib import ip
from ryu.lib.hub import Timeout
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_L2_EVPN
from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC
from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC
from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC
from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC
from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.services.protocols.bgp.utils.circlist import CircularListType
from ryu.services.protocols.bgp.utils.evtlet import LoopingCall
# Logger instance for this module.
LOG = logging.getLogger('bgpspeaker.base')
# Pointer to active/available OrderedDict.
OrderedDict = OrderedDict
# Currently supported address families.
SUPPORTED_GLOBAL_RF = {
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_RTC_UC,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_IPv4_FLOWSPEC,
RF_IPv6_FLOWSPEC,
RF_VPNv4_FLOWSPEC,
RF_VPNv6_FLOWSPEC,
RF_L2VPN_FLOWSPEC,
}
# Various error codes
ACTIVITY_ERROR_CODE = 100
RUNTIME_CONF_ERROR_CODE = 200
BIN_ERROR = 300
NET_CTRL_ERROR_CODE = 400
API_ERROR_CODE = 500
PREFIX_ERROR_CODE = 600
BGP_PROCESSOR_ERROR_CODE = 700
CORE_ERROR_CODE = 800
# Registry of custom exceptions
# Key: code:sub-code
# Value: exception class
_EXCEPTION_REGISTRY = {}
class BGPSException(Exception):
"""Base exception class for all BGPS related exceptions.
"""
CODE = 1
SUB_CODE = 1
DEF_DESC = 'Unknown exception.'
def __init__(self, desc=None):
super(BGPSException, self).__init__()
if not desc:
desc = self.__class__.DEF_DESC
kls = self.__class__
self.message = '%d.%d - %s' % (kls.CODE, kls.SUB_CODE, desc)
def __repr__(self):
kls = self.__class__
return '<%s(desc=%s)>' % (kls, self.message)
def __str__(self, *args, **kwargs):
return self.message
def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
"""Decorator for all exceptions that want to set exception class meta-data.
"""
# Check registry if we already have an exception with same code/sub-code
if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
raise ValueError('BGPSException with code %d and sub-code %d '
'already defined.' % (code, sub_code))
def decorator(subclass):
"""Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
"""
if issubclass(subclass, BGPSException):
_EXCEPTION_REGISTRY[(code, sub_code)] = subclass
subclass.CODE = code
subclass.SUB_CODE = sub_code
subclass.DEF_DESC = def_desc
return subclass
return decorator
@add_bgp_error_metadata(code=ACTIVITY_ERROR_CODE,
sub_code=1,
def_desc='Unknown activity exception.')
class ActivityException(BGPSException):
"""Base class for exceptions related to Activity.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class Activity(object):
"""Base class for a thread of execution that provides some custom settings.
Activity is also a container of other activities or threads that it has
started. Inside a Activity you should always use one of the spawn method
to start another activity or greenthread. Activity is also holds pointers
to sockets that it or its child activities of threads have create.
"""
def __init__(self, name=None):
self._name = name
if self._name is None:
self._name = 'UnknownActivity: ' + str(time.time())
self._child_thread_map = weakref.WeakValueDictionary()
self._child_activity_map = weakref.WeakValueDictionary()
self._asso_socket_map = weakref.WeakValueDictionary()
self._timers = weakref.WeakValueDictionary()
self._started = False
@property
def name(self):
return self._name
@property
def started(self):
return self._started
def _validate_activity(self, activity):
"""Checks the validity of the given activity before it can be started.
"""
if not self._started:
raise ActivityException(desc='Tried to spawn a child activity'
' before Activity was started.')
if activity.started:
raise ActivityException(desc='Tried to start an Activity that was '
'already started.')
def _spawn_activity(self, activity, *args, **kwargs):
"""Starts *activity* in a new thread and passes *args* and *kwargs*.
Maintains pointer to this activity and stops *activity* when this
activity is stopped.
"""
self._validate_activity(activity)
# Spawn a new greenthread for given activity
greenthread = hub.spawn(activity.start, *args, **kwargs)
self._child_thread_map[activity.name] = greenthread
self._child_activity_map[activity.name] = activity
return greenthread
def _spawn_activity_after(self, seconds, activity, *args, **kwargs):
self._validate_activity(activity)
# Schedule to spawn a new greenthread after requested delay
greenthread = hub.spawn_after(seconds, activity.start, *args,
**kwargs)
self._child_thread_map[activity.name] = greenthread
self._child_activity_map[activity.name] = activity
return greenthread
def _validate_callable(self, callable_):
if callable_ is None:
raise ActivityException(desc='Callable cannot be None')
if not hasattr(callable_, '__call__'):
raise ActivityException(desc='Currently only supports instances'
' that have __call__ as callable which'
' is missing in given arg.')
if not self._started:
raise ActivityException(desc='Tried to spawn a child thread '
'before this Activity was started.')
def _spawn(self, name, callable_, *args, **kwargs):
self._validate_callable(callable_)
greenthread = hub.spawn(callable_, *args, **kwargs)
self._child_thread_map[name] = greenthread
return greenthread
def _spawn_after(self, name, seconds, callable_, *args, **kwargs):
self._validate_callable(callable_)
greenthread = hub.spawn_after(seconds, callable_, *args, **kwargs)
self._child_thread_map[name] = greenthread
return greenthread
def _create_timer(self, name, func, *arg, **kwarg):
timer = LoopingCall(func, *arg, **kwarg)
self._timers[name] = timer
return timer
@abc.abstractmethod
def _run(self, *args, **kwargs):
"""Main activity of this class.
Can launch other activity/callables here.
Sub-classes should override this method.
"""
raise NotImplementedError()
def start(self, *args, **kwargs):
"""Starts the main activity of this class.
Calls *_run* and calls *stop* when *_run* is finished.
This method should be run in a new greenthread as it may not return
immediately.
"""
if self.started:
raise ActivityException(desc='Activity already started')
self._started = True
try:
self._run(*args, **kwargs)
except BGPSException:
LOG.error(traceback.format_exc())
finally:
if self.started: # could have been stopped somewhere else
self.stop()
def pause(self, seconds=0):
"""Relinquishes hub for given number of seconds.
In other words is puts to sleep to give other greenthread a chance to
run.
"""
hub.sleep(seconds)
def _stop_child_activities(self, name=None):
"""Stop all child activities spawn by this activity.
"""
# Makes a list copy of items() to avoid dictionary size changed
# during iteration
for child_name, child in list(self._child_activity_map.items()):
if name is not None and name != child_name:
continue
LOG.debug('%s: Stopping child activity %s ', self.name, child_name)
if child.started:
child.stop()
self._child_activity_map.pop(child_name, None)
def _stop_child_threads(self, name=None):
"""Stops all threads spawn by this activity.
"""
for thread_name, thread in list(self._child_thread_map.items()):
if name is not None and thread_name is name:
LOG.debug('%s: Stopping child thread %s',
self.name, thread_name)
thread.kill()
self._child_thread_map.pop(thread_name, None)
def _close_asso_sockets(self):
"""Closes all the sockets linked to this activity.
"""
for sock_name, sock in list(self._asso_socket_map.items()):
LOG.debug('%s: Closing socket %s - %s', self.name, sock_name, sock)
sock.close()
def _stop_timers(self):
for timer_name, timer in list(self._timers.items()):
LOG.debug('%s: Stopping timer %s', self.name, timer_name)
timer.stop()
def stop(self):
"""Stops all child threads and activities and closes associated
sockets.
Re-initializes this activity to be able to start again.
Raise `ActivityException` if activity is not currently started.
"""
if not self.started:
raise ActivityException(desc='Cannot call stop when activity is '
'not started or has been stopped already.')
LOG.debug('Stopping activity %s.', self.name)
self._stop_timers()
self._stop_child_activities()
self._stop_child_threads()
self._close_asso_sockets()
# Setup activity for start again.
self._started = False
self._asso_socket_map = weakref.WeakValueDictionary()
self._child_activity_map = weakref.WeakValueDictionary()
self._child_thread_map = weakref.WeakValueDictionary()
self._timers = weakref.WeakValueDictionary()
LOG.debug('Stopping activity %s finished.', self.name)
def _canonicalize_ip(self, ip):
addr = netaddr.IPAddress(ip)
if addr.is_ipv4_mapped():
ip = str(addr.ipv4())
return ip
def get_remotename(self, sock):
addr, port = sock.getpeername()[:2]
return self._canonicalize_ip(addr), str(port)
def get_localname(self, sock):
addr, port = sock.getsockname()[:2]
return self._canonicalize_ip(addr), str(port)
def _create_listen_socket(self, family, loc_addr):
s = socket.socket(family)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(loc_addr)
s.listen(1)
return s
def _listen_socket_loop(self, s, conn_handle):
while True:
sock, client_address = s.accept()
client_address, port = self.get_remotename(sock)
LOG.debug('Connect request received from client for port'
' %s:%s', client_address, port)
client_name = self.name + '_client@' + client_address
self._asso_socket_map[client_name] = sock
self._spawn(client_name, conn_handle, sock)
def _listen_tcp(self, loc_addr, conn_handle):
"""Creates a TCP server socket which listens on `port` number.
For each connection `server_factory` starts a new protocol.
"""
info = socket.getaddrinfo(loc_addr[0], loc_addr[1], socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
listen_sockets = {}
for res in info:
af, socktype, proto, _, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.bind(sa)
sock.listen(50)
listen_sockets[sa] = sock
except socket.error as e:
LOG.error('Error creating socket: %s', e)
if sock:
sock.close()
count = 0
server = None
for sa in listen_sockets:
name = self.name + '_server@' + str(sa[0])
self._asso_socket_map[name] = listen_sockets[sa]
if count == 0:
import eventlet
server = eventlet.spawn(self._listen_socket_loop,
listen_sockets[sa], conn_handle)
self._child_thread_map[name] = server
count += 1
else:
server = self._spawn(name, self._listen_socket_loop,
listen_sockets[sa], conn_handle)
return server, listen_sockets
def _connect_tcp(self, peer_addr, conn_handler, time_out=None,
bind_address=None, password=None):
"""Creates a TCP connection to given peer address.
Tries to create a socket for `timeout` number of seconds. If
successful, uses the socket instance to start `client_factory`.
The socket is bound to `bind_address` if specified.
"""
LOG.debug('Connect TCP called for %s:%s', peer_addr[0], peer_addr[1])
if ip.valid_ipv4(peer_addr[0]):
family = socket.AF_INET
else:
family = socket.AF_INET6
with Timeout(time_out, socket.error):
sock = socket.socket(family)
if bind_address:
sock.bind(bind_address)
if password:
sockopt.set_tcp_md5sig(sock, peer_addr[0], password)
sock.connect(peer_addr)
# socket.error exception is raised in case of timeout and
# the following code is executed only when the connection
# is established.
# Connection name for pro-active connection is made up of
# local end address + remote end address
local = self.get_localname(sock)[0]
remote = self.get_remotename(sock)[0]
conn_name = ('L: ' + local + ', R: ' + remote)
self._asso_socket_map[conn_name] = sock
# If connection is established, we call connection handler
# in a new thread.
self._spawn(conn_name, conn_handler, sock)
return sock
#
# Sink
#
class Sink(object):
"""An entity to which we send out messages (eg. BGP routes)."""
#
# OutgoingMsgList
#
# A circular list type in which objects are linked to each
# other using the 'next_sink_out_route' and 'prev_sink_out_route'
# attributes.
#
OutgoingMsgList = CircularListType(next_attr_name='next_sink_out_route',
prev_attr_name='prev_sink_out_route')
# Next available index that can identify an instance uniquely.
idx = 0
@staticmethod
def next_index():
"""Increments the sink index and returns the value."""
Sink.idx += 1
return Sink.idx
def __init__(self):
# A small integer that represents this sink.
self.index = Sink.next_index()
# Create an event for signal enqueuing.
from .utils.evtlet import EventletIOFactory
self.outgoing_msg_event = EventletIOFactory.create_custom_event()
self.messages_queued = 0
# List of msgs. that are to be sent to this peer. Each item
# in the list is an instance of OutgoingRoute.
self.outgoing_msg_list = Sink.OutgoingMsgList()
def clear_outgoing_msg_list(self):
self.outgoing_msg_list = Sink.OutgoingMsgList()
def enque_outgoing_msg(self, msg):
self.outgoing_msg_list.append(msg)
self.outgoing_msg_event.set()
self.messages_queued += 1
def enque_first_outgoing_msg(self, msg):
self.outgoing_msg_list.prepend(msg)
self.outgoing_msg_event.set()
def __iter__(self):
return self
def next(self):
"""Pops and returns the first outgoing message from the list.
If message list currently has no messages, the calling thread will
be put to sleep until we have at-least one message in the list that
can be popped and returned.
"""
# We pick the first outgoing available and send it.
outgoing_msg = self.outgoing_msg_list.pop_first()
# If we do not have any outgoing msg., we wait.
if outgoing_msg is None:
self.outgoing_msg_event.clear()
self.outgoing_msg_event.wait()
outgoing_msg = self.outgoing_msg_list.pop_first()
return outgoing_msg
# For Python 3 compatibility
__next__ = next
#
# Source
#
class Source(object):
"""An entity that gives us BGP routes. A BGP peer, for example."""
def __init__(self, version_num):
# Number that is currently being used to stamp information
# received from this source. We will bump this number up when
# the information that is now expected from the source belongs
# to a different logical batch. This mechanism can be used to
# identify stale information.
self.version_num = version_num
class FlexinetPeer(Source, Sink):
def __init__(self):
# Initialize source and sink
Source.__init__(self, 1)
Sink.__init__(self)
# Registry of validators for configuration/settings.
_VALIDATORS = {}
def validate(**kwargs):
"""Defines a decorator to register a validator with a name for look-up.
If name is not provided we use function name as name of the validator.
"""
def decorator(func):
_VALIDATORS[kwargs.pop('name', func.__name__)] = func
return func
return decorator
def get_validator(name):
"""Returns a validator registered for given name.
"""
return _VALIDATORS.get(name)
|
iwaseyusuke/ryu
|
ryu/services/protocols/bgp/base.py
|
Python
|
apache-2.0
| 19,278
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Providers sub-commands"""
import re
from airflow.cli.simple_table import AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.utils.cli import suppress_logs_and_warning
def _remove_rst_syntax(value: str) -> str:
return re.sub("[`_<>]", "", value.strip(" \n."))
@suppress_logs_and_warning
def provider_get(args):
"""Get a provider info."""
providers = ProvidersManager().providers
if args.provider_name in providers:
provider_version = providers[args.provider_name].version
provider_info = providers[args.provider_name].provider_info
if args.full:
provider_info["description"] = _remove_rst_syntax(provider_info["description"])
AirflowConsole().print_as(
data=[provider_info],
output=args.output,
)
else:
print(f"Provider: {args.provider_name}")
print(f"Version: {provider_version}")
else:
raise SystemExit(f"No such provider installed: {args.provider_name}")
@suppress_logs_and_warning
def providers_list(args):
"""Lists all providers at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().providers.values()),
output=args.output,
mapper=lambda x: {
"package_name": x[1]["package-name"],
"description": _remove_rst_syntax(x[1]["description"]),
"version": x[0],
},
)
@suppress_logs_and_warning
def hooks_list(args):
"""Lists all hooks at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].connection_class,
"conn_id_attribute_name": x[1].connection_id_attribute_name,
'package_name': x[1].package_name,
'hook_name': x[1].hook_name,
},
)
@suppress_logs_and_warning
def connection_form_widget_list(args):
"""Lists all custom connection form fields at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().connection_form_widgets.items()),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].connection_class,
'package_name': x[1].package_name,
'field_type': x[1].field.field_class.__name__,
},
)
@suppress_logs_and_warning
def connection_field_behaviours(args):
"""Lists field behaviours"""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours.keys()),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
)
@suppress_logs_and_warning
def extra_links_list(args):
"""Lists all extra links at the command line"""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
)
|
nathanielvarona/airflow
|
airflow/cli/commands/provider_command.py
|
Python
|
apache-2.0
| 3,862
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Andreas Hansson
import m5.objects
import inspect
import sys
from textwrap import TextWrapper
# Dictionary of mapping names of real memory controller models to
# classes.
_mem_classes = {}
# Memory aliases. We make sure they exist before we add them to the
# fina; list. A target may be specified as a tuple, in which case the
# first available memory controller model in the tuple will be used.
_mem_aliases_all = [
("simple_mem", "SimpleMemory"),
("ddr3_1600_x64", "DDR3_1600_x64"),
("lpddr2_s4_1066_x32", "LPDDR2_S4_1066_x32"),
("lpddr3_1600_x32", "LPDDR3_1600_x32"),
("wio_200_x128", "WideIO_200_x128"),
("dramsim2", "DRAMSim2")
]
# Filtered list of aliases. Only aliases for existing memory
# controllers exist in this list.
_mem_aliases = {}
def is_mem_class(cls):
"""Determine if a class is a memory controller that can be instantiated"""
# We can't use the normal inspect.isclass because the ParamFactory
# and ProxyFactory classes have a tendency to confuse it.
try:
return issubclass(cls, m5.objects.AbstractMemory) and \
not cls.abstract
except TypeError:
return False
def get(name):
"""Get a memory class from a user provided class name or alias."""
real_name = _mem_aliases.get(name, name)
try:
mem_class = _mem_classes[real_name]
return mem_class
except KeyError:
print "%s is not a valid memory controller." % (name,)
sys.exit(1)
def print_mem_list():
"""Print a list of available memory classes including their aliases."""
print "Available memory classes:"
doc_wrapper = TextWrapper(initial_indent="\t\t", subsequent_indent="\t\t")
for name, cls in _mem_classes.items():
print "\t%s" % name
# Try to extract the class documentation from the class help
# string.
doc = inspect.getdoc(cls)
if doc:
for line in doc_wrapper.wrap(doc):
print line
if _mem_aliases:
print "\nMemory aliases:"
for alias, target in _mem_aliases.items():
print "\t%s => %s" % (alias, target)
def mem_names():
"""Return a list of valid memory names."""
return _mem_classes.keys() + _mem_aliases.keys()
# Add all memory controllers in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_mem_class):
_mem_classes[name] = cls
for alias, target in _mem_aliases_all:
if isinstance(target, tuple):
# Some aliases contain a list of memory controller models
# sorted in priority order. Use the first target that's
# available.
for t in target:
if t in _mem_classes:
_mem_aliases[alias] = t
break
elif target in _mem_classes:
# Normal alias
_mem_aliases[alias] = target
def create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits, cache_line_size):
"""
Helper function for creating a single memoy controller from the given
options. This function is invoked multiple times in config_mem function
to create an array of controllers.
"""
import math
# The default behaviour is to interleave on cache line granularity
cache_line_bit = int(math.log(cache_line_size, 2)) - 1
intlv_low_bit = cache_line_bit
# Create an instance so we can figure out the address
# mapping and row-buffer size
ctrl = cls()
# Only do this for DRAMs
if issubclass(cls, m5.objects.DRAMCtrl):
# Inform each controller how many channels to account
# for
ctrl.channels = nbr_mem_ctrls
# If the channel bits are appearing after the column
# bits, we need to add the appropriate number of bits
# for the row buffer size
if ctrl.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every
# one
rowbuffer_size = ctrl.device_rowbuffer_size.value * \
ctrl.devices_per_rank.value
intlv_low_bit = int(math.log(rowbuffer_size, 2)) - 1
# We got all we need to configure the appropriate address
# range
ctrl.range = m5.objects.AddrRange(r.start, size = r.size(),
intlvHighBit = \
intlv_low_bit + intlv_bits,
intlvBits = intlv_bits,
intlvMatch = i)
return ctrl
def config_mem(options, system):
"""
Create the memory controllers based on the options and attach them.
If requested, we make a multi-channel configuration of the
selected memory controller class by creating multiple instances of
the specific class. The individual controllers have their
parameters set such that the address range is interleaved between
them.
"""
nbr_mem_ctrls = options.mem_channels
import math
from m5.util import fatal
intlv_bits = int(math.log(nbr_mem_ctrls, 2))
if 2 ** intlv_bits != nbr_mem_ctrls:
fatal("Number of memory channels must be a power of 2")
cls = get(options.mem_type)
mem_ctrls = []
# For every range (most systems will only have one), create an
# array of controllers and set their parameters to match their
# address mapping in the case of a DRAM
for r in system.mem_ranges:
for i in xrange(nbr_mem_ctrls):
mem_ctrls.append(create_mem_ctrl(cls, r, i, nbr_mem_ctrls,
intlv_bits,
system.cache_line_size.value))
system.mem_ctrls = mem_ctrls
# Connect the controllers to the membus
for i in xrange(len(system.mem_ctrls)):
system.mem_ctrls[i].port = system.membus.master
|
xiaoyuanW/gem5
|
configs/common/MemConfig.py
|
Python
|
bsd-3-clause
| 8,019
|
from . import errordocument
from . import recursive
from . import static
|
ryanpetrello/pecan
|
pecan/middleware/__init__.py
|
Python
|
bsd-3-clause
| 73
|
#!/usr/bin/python
#
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_case_lists.py:
# Helper script for updating the dEQP case list files, stored in the repo.
# Generally only used when the dEQP config changes, or when we roll dEQP.
import subprocess
import sys
import os
import shutil
import gzip
# TODO(jmadill): other platforms
os_suffix = '.exe'
build_dir = os.path.join('build', 'Debug_x64')
def run_deqp(deqp_exe):
subprocess.call([deqp_exe, '--deqp-runmode=txt-caselist', '--deqp-gl-context-type=null'])
# This stuff is all hard-coded for now. If we need more versatility we can
# make some options into command line arguments with default values.
script_dir = os.path.dirname(sys.argv[0])
path_to_deqp_exe = os.path.join('..', '..', build_dir)
deqp_data_path = os.path.join('third_party', 'deqp', 'data')
os.chdir(os.path.join(script_dir, '..'))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles2_tests' + os_suffix))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles3_tests' + os_suffix))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_egl_tests' + os_suffix))
def compress_case_list(case_file):
with open(os.path.join(deqp_data_path, case_file + '.txt')) as in_handle:
data = in_handle.read()
in_handle.close()
with gzip.open(os.path.join('deqp_support', case_file + '.txt.gz'), 'wb') as out_handle:
out_handle.write(data)
out_handle.close()
compress_case_list('dEQP-GLES2-cases')
compress_case_list('dEQP-GLES3-cases')
compress_case_list('dEQP-EGL-cases')
|
crezefire/angle
|
src/tests/deqp_support/generate_case_lists.py
|
Python
|
bsd-3-clause
| 1,684
|
"""
sentry.templatetags.sentry_activity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django import template
from django.utils.html import escape, urlize, linebreaks
from django.utils.safestring import mark_safe
from sentry.models import Activity, User
from sentry.templatetags.sentry_helpers import timesince
from sentry.utils.avatar import get_gravatar_url
register = template.Library()
ACTIVITY_ACTION_STRINGS = {
Activity.NOTE: 'left a note',
Activity.SET_RESOLVED: 'marked this event as resolved',
Activity.SET_UNRESOLVED: 'marked this event as unresolved',
Activity.SET_MUTED: 'marked this event as muted',
Activity.SET_PUBLIC: 'made this event public',
Activity.SET_PRIVATE: 'made this event private',
Activity.SET_REGRESSION: 'marked this event as a regression',
Activity.CREATE_ISSUE: u'created an issue on {provider:s} titled <a href="{location:s}">{title:s}</a>',
Activity.FIRST_SEEN: 'first saw this event',
Activity.ASSIGNED: 'assigned this event to {user:s}',
Activity.UNASSIGNED: 'unassigned this event',
Activity.RELEASE: 'saw a new release: {version:s}',
}
@register.filter
def render_activity(item):
if not item.group:
# not implemented
return
try:
action_str = ACTIVITY_ACTION_STRINGS[item.type]
except KeyError:
logging.warning('Unknown activity type present: %s', item.type)
return
if item.type == Activity.CREATE_ISSUE:
action_str = action_str.format(**item.data)
elif item.type == Activity.ASSIGNED:
if item.data['assignee'] == item.user_id:
assignee_name = 'themselves'
else:
try:
assignee = User.objects.get(id=item.data['assignee'])
except User.DoesNotExist:
assignee_name = 'unknown'
else:
assignee_name = assignee.get_display_name()
action_str = action_str.format(user=assignee_name)
output = ''
if item.user:
user = item.user
name = user.name or user.email
output += '<span class="avatar"><img src="%s"></span> ' % (get_gravatar_url(user.email, size=20),)
output += '<strong>%s</strong> %s' % (escape(name), action_str)
else:
output += '<span class="avatar sentry"></span> '
output += 'The system %s' % (action_str,)
output += ' <span class="sep">—</span> <span class="time">%s</span>' % (timesince(item.datetime),)
if item.type == Activity.NOTE:
output += linebreaks(urlize(escape(item.data['text'])))
return mark_safe(output)
|
nicholasserra/sentry
|
src/sentry/templatetags/sentry_activity.py
|
Python
|
bsd-3-clause
| 2,769
|
# -*- coding: utf-8 -*-
from mock import Mock
from django.test import TestCase
from opbeat.events import Message
class MessageTest(TestCase):
def test_to_string(self):
unformatted_message = 'My message from %s about %s'
client = Mock()
message = Message(client)
message.logger = Mock()
data = {
'param_message': {
'message': unformatted_message,
}
}
self.assertEqual(message.to_string(data), unformatted_message)
data['param_message']['params'] = (1, 2)
self.assertEqual(message.to_string(data),
unformatted_message % (1, 2))
|
dirtycoder/opbeat_python
|
tests/events/tests.py
|
Python
|
bsd-3-clause
| 676
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DefinedGeography'
db.create_table('seak_definedgeography', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
))
db.send_create_signal('seak', ['DefinedGeography'])
# Adding M2M table for field planning_units on 'DefinedGeography'
db.create_table('seak_definedgeography_planning_units', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('definedgeography', models.ForeignKey(orm['seak.definedgeography'], null=False)),
('planningunit', models.ForeignKey(orm['seak.planningunit'], null=False))
))
db.create_unique('seak_definedgeography_planning_units', ['definedgeography_id', 'planningunit_id'])
def backwards(self, orm):
# Deleting model 'DefinedGeography'
db.delete_table('seak_definedgeography')
# Removing M2M table for field planning_units on 'DefinedGeography'
db.delete_table('seak_definedgeography_planning_units')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 19, 9, 43, 46, 965579)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 19, 9, 43, 46, 965425)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'seak.conservationfeature': {
'Meta': {'object_name': 'ConservationFeature'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'level1': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'level2': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level3': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level4': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level5': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '90', 'null': 'True', 'blank': 'True'})
},
'seak.cost': {
'Meta': {'object_name': 'Cost'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'seak.definedgeography': {
'Meta': {'object_name': 'DefinedGeography'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'planning_units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['seak.PlanningUnit']", 'symmetrical': 'False'})
},
'seak.folder': {
'Meta': {'object_name': 'Folder'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_folder_related'", 'to': "orm['auth.User']"})
},
'seak.planningunit': {
'Meta': {'object_name': 'PlanningUnit'},
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'fid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'})
},
'seak.planningunitshapes': {
'Meta': {'object_name': 'PlanningUnitShapes'},
'bests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'fid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"}),
'stamp': ('django.db.models.fields.FloatField', [], {})
},
'seak.puvscf': {
'Meta': {'unique_together': "(('pu', 'cf'),)", 'object_name': 'PuVsCf'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.ConservationFeature']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.puvscost': {
'Meta': {'unique_together': "(('pu', 'cost'),)", 'object_name': 'PuVsCost'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.Cost']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.scenario': {
'Meta': {'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_geography': ('seak.models.JSONField', [], {}),
'input_penalties': ('seak.models.JSONField', [], {}),
'input_relativecosts': ('seak.models.JSONField', [], {}),
'input_scalefactor': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'input_targets': ('seak.models.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_best': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'output_pu_count': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_scenario_related'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['seak']
|
Ecotrust/cogs-priorities
|
priorities/seak/migrations/0002_auto__add_definedgeography.py
|
Python
|
bsd-3-clause
| 12,162
|
"""Constraints definition for minimize."""
from __future__ import division, print_function, absolute_import
import numpy as np
from ._hessian_update_strategy import BFGS
from ._differentiable_functions import (
VectorFunction, LinearVectorFunction, IdentityVectorFunction)
from .optimize import OptimizeWarning
from warnings import warn
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import issparse
class NonlinearConstraint(object):
"""Nonlinear constraint on the variables.
The constraint has the general inequality form::
lb <= fun(x) <= ub
Here the vector of independent variables x is passed as ndarray of shape
(n,) and ``fun`` returns a vector with m components.
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
fun : callable
The function defining the constraint.
The signature is ``fun(x) -> array_like, shape (m,)``.
lb, ub : array_like
Lower and upper bounds on the constraint. Each array must have the
shape (m,) or be a scalar, in the latter case a bound will be the same
for all components of the constraint. Use ``np.inf`` with an
appropriate sign to specify a one-sided constraint.
Set components of `lb` and `ub` equal to represent an equality
constraint. Note that you can mix constraints of different types:
interval, one-sided or equality, by setting different components of
`lb` and `ub` as necessary.
jac : {callable, '2-point', '3-point', 'cs'}, optional
Method of computing the Jacobian matrix (an m-by-n matrix,
where element (i, j) is the partial derivative of f[i] with
respect to x[j]). The keywords {'2-point', '3-point',
'cs'} select a finite difference scheme for the numerical estimation.
A callable must have the following signature:
``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
Default is '2-point'.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
Method for computing the Hessian matrix. The keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme for
numerical estimation. Alternatively, objects implementing
`HessianUpdateStrategy` interface can be used to approximate the
Hessian. Currently available implementations are:
- `BFGS` (default option)
- `SR1`
A callable must return the Hessian matrix of ``dot(fun, v)`` and
must have the following signature:
``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
finite_diff_rel_step: None or array_like, optional
Relative step size for the finite difference approximation. Default is
None, which will select a reasonable value automatically depending
on a finite difference scheme.
finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations. A zero entry means
that a corresponding element in the Jacobian is identically zero.
If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used.
Notes
-----
Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
approximating either the Jacobian or the Hessian. We, however, do not allow
its use for approximating both simultaneously. Hence whenever the Jacobian
is estimated via finite-differences, we require the Hessian to be estimated
using one of the quasi-Newton strategies.
The scheme 'cs' is potentially the most accurate, but requires the function
to correctly handles complex inputs and be analytically continuable to the
complex plane. The scheme '3-point' is more accurate than '2-point' but
requires twice as many operations.
"""
def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
keep_feasible=False, finite_diff_rel_step=None,
finite_diff_jac_sparsity=None):
self.fun = fun
self.lb = lb
self.ub = ub
self.finite_diff_rel_step = finite_diff_rel_step
self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
self.jac = jac
self.hess = hess
self.keep_feasible = keep_feasible
class LinearConstraint(object):
"""Linear constraint on the variables.
The constraint has the general inequality form::
lb <= A.dot(x) <= ub
Here the vector of independent variables x is passed as ndarray of shape
(n,) and the matrix A has shape (m, n).
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
A : {array_like, sparse matrix}, shape (m, n)
Matrix defining the constraint.
lb, ub : array_like
Lower and upper bounds on the constraint. Each array must have the
shape (m,) or be a scalar, in the latter case a bound will be the same
for all components of the constraint. Use ``np.inf`` with an
appropriate sign to specify a one-sided constraint.
Set components of `lb` and `ub` equal to represent an equality
constraint. Note that you can mix constraints of different types:
interval, one-sided or equality, by setting different components of
`lb` and `ub` as necessary.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
"""
def __init__(self, A, lb, ub, keep_feasible=False):
self.A = A
self.lb = lb
self.ub = ub
self.keep_feasible = keep_feasible
class Bounds(object):
"""Bounds constraint on the variables.
The constraint has the general inequality form::
lb <= x <= ub
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
lb, ub : array_like, optional
Lower and upper bounds on independent variables. Each array must
have the same size as x or be a scalar, in which case a bound will be
the same for all the variables. Set components of `lb` and `ub` equal
to fix a variable. Use ``np.inf`` with an appropriate sign to disable
bounds on all or some variables. Note that you can mix constraints of
different types: interval, one-sided or equality, by setting different
components of `lb` and `ub` as necessary.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
"""
def __init__(self, lb, ub, keep_feasible=False):
self.lb = lb
self.ub = ub
self.keep_feasible = keep_feasible
def __repr__(self):
if np.any(self.keep_feasible):
return "{}({!r}, {!r}, keep_feasible={!r})".format(type(self).__name__, self.lb, self.ub, self.keep_feasible)
else:
return "{}({!r}, {!r})".format(type(self).__name__, self.lb, self.ub)
class PreparedConstraint(object):
"""Constraint prepared from a user defined constraint.
On creation it will check whether a constraint definition is valid and
the initial point is feasible. If created successfully, it will contain
the attributes listed below.
Parameters
----------
constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
Constraint to check and prepare.
x0 : array_like
Initial vector of independent variables.
sparse_jacobian : bool or None, optional
If bool, then the Jacobian of the constraint will be converted
to the corresponded format if necessary. If None (default), such
conversion is not made.
finite_diff_bounds : 2-tuple, optional
Lower and upper bounds on the independent variables for the finite
difference approximation, if applicable. Defaults to no bounds.
Attributes
----------
fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
Function defining the constraint wrapped by one of the convenience
classes.
bounds : 2-tuple
Contains lower and upper bounds for the constraints --- lb and ub.
These are converted to ndarray and have a size equal to the number of
the constraints.
keep_feasible : ndarray
Array indicating which components must be kept feasible with a size
equal to the number of the constraints.
"""
def __init__(self, constraint, x0, sparse_jacobian=None,
finite_diff_bounds=(-np.inf, np.inf)):
if isinstance(constraint, NonlinearConstraint):
fun = VectorFunction(constraint.fun, x0,
constraint.jac, constraint.hess,
constraint.finite_diff_rel_step,
constraint.finite_diff_jac_sparsity,
finite_diff_bounds, sparse_jacobian)
elif isinstance(constraint, LinearConstraint):
fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
elif isinstance(constraint, Bounds):
fun = IdentityVectorFunction(x0, sparse_jacobian)
else:
raise ValueError("`constraint` of an unknown type is passed.")
m = fun.m
lb = np.asarray(constraint.lb, dtype=float)
ub = np.asarray(constraint.ub, dtype=float)
if lb.ndim == 0:
lb = np.resize(lb, m)
if ub.ndim == 0:
ub = np.resize(ub, m)
keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
if keep_feasible.ndim == 0:
keep_feasible = np.resize(keep_feasible, m)
if keep_feasible.shape != (m,):
raise ValueError("`keep_feasible` has a wrong shape.")
mask = keep_feasible & (lb != ub)
f0 = fun.f
if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
raise ValueError("`x0` is infeasible with respect to some "
"inequality constraint with `keep_feasible` "
"set to True.")
self.fun = fun
self.bounds = (lb, ub)
self.keep_feasible = keep_feasible
def violation(self, x):
"""How much the constraint is exceeded by.
Parameters
----------
x : array-like
Vector of independent variables
Returns
-------
excess : array-like
How much the constraint is exceeded by, for each of the
constraints specified by `PreparedConstraint.fun`.
"""
with suppress_warnings() as sup:
sup.filter(UserWarning)
ev = self.fun.fun(np.asarray(x))
excess_lb = np.maximum(self.bounds[0] - ev, 0)
excess_ub = np.maximum(ev - self.bounds[1], 0)
return excess_lb + excess_ub
def new_bounds_to_old(lb, ub, n):
"""Convert the new bounds representation to the old one.
The new representation is a tuple (lb, ub) and the old one is a list
containing n tuples, i-th containing lower and upper bound on a i-th
variable.
"""
lb = np.asarray(lb)
ub = np.asarray(ub)
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
lb = [x if x > -np.inf else None for x in lb]
ub = [x if x < np.inf else None for x in ub]
return list(zip(lb, ub))
def old_bound_to_new(bounds):
"""Convert the old bounds representation to the new one.
The new representation is a tuple (lb, ub) and the old one is a list
containing n tuples, i-th containing lower and upper bound on a i-th
variable.
"""
lb, ub = zip(*bounds)
lb = np.array([x if x is not None else -np.inf for x in lb])
ub = np.array([x if x is not None else np.inf for x in ub])
return lb, ub
def strict_bounds(lb, ub, keep_feasible, n_vars):
"""Remove bounds which are not asked to be kept feasible."""
strict_lb = np.resize(lb, n_vars).astype(float)
strict_ub = np.resize(ub, n_vars).astype(float)
keep_feasible = np.resize(keep_feasible, n_vars)
strict_lb[~keep_feasible] = -np.inf
strict_ub[~keep_feasible] = np.inf
return strict_lb, strict_ub
def new_constraint_to_old(con, x0):
"""
Converts new-style constraint objects to old-style constraint dictionaries.
"""
if isinstance(con, NonlinearConstraint):
if (con.finite_diff_jac_sparsity is not None or
con.finite_diff_rel_step is not None or
not isinstance(con.hess, BFGS) or # misses user specified BFGS
con.keep_feasible):
warn("Constraint options `finite_diff_jac_sparsity`, "
"`finite_diff_rel_step`, `keep_feasible`, and `hess`"
"are ignored by this method.", OptimizeWarning)
fun = con.fun
if callable(con.jac):
jac = con.jac
else:
jac = None
else: # LinearConstraint
if con.keep_feasible:
warn("Constraint option `keep_feasible` is ignored by this "
"method.", OptimizeWarning)
A = con.A
if issparse(A):
A = A.todense()
fun = lambda x: np.dot(A, x)
jac = lambda x: A
# FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
# use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
pcon = PreparedConstraint(con, x0)
lb, ub = pcon.bounds
i_eq = lb == ub
i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
i_bound_above = np.logical_xor(ub != np.inf, i_eq)
i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
if np.any(i_unbounded):
warn("At least one constraint is unbounded above and below. Such "
"constraints are ignored.", OptimizeWarning)
ceq = []
if np.any(i_eq):
def f_eq(x):
y = np.array(fun(x)).flatten()
return y[i_eq] - lb[i_eq]
ceq = [{"type": "eq", "fun": f_eq}]
if jac is not None:
def j_eq(x):
dy = jac(x)
if issparse(dy):
dy = dy.todense()
dy = np.atleast_2d(dy)
return dy[i_eq, :]
ceq[0]["jac"] = j_eq
cineq = []
n_bound_below = np.sum(i_bound_below)
n_bound_above = np.sum(i_bound_above)
if n_bound_below + n_bound_above:
def f_ineq(x):
y = np.zeros(n_bound_below + n_bound_above)
y_all = np.array(fun(x)).flatten()
y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
return y
cineq = [{"type": "ineq", "fun": f_ineq}]
if jac is not None:
def j_ineq(x):
dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
dy_all = jac(x)
if issparse(dy_all):
dy_all = dy_all.todense()
dy_all = np.atleast_2d(dy_all)
dy[:n_bound_below, :] = dy_all[i_bound_below]
dy[n_bound_below:, :] = -dy_all[i_bound_above]
return dy
cineq[0]["jac"] = j_ineq
old_constraints = ceq + cineq
if len(old_constraints) > 1:
warn("Equality and inequality constraints are specified in the same "
"element of the constraint list. For efficient use with this "
"method, equality and inequality constraints should be specified "
"in separate elements of the constraint list. ", OptimizeWarning)
return old_constraints
def old_constraint_to_new(ic, con):
"""
Converts old-style constraint dictionaries to new-style constraint objects.
"""
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be a sequence of dictionaries.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype not in ['eq', 'ineq']:
raise ValueError("Unknown constraint type '%s'." % con['type'])
if 'fun' not in con:
raise ValueError('Constraint %d has no function defined.' % ic)
lb = 0
if ctype == 'eq':
ub = 0
else:
ub = np.inf
jac = '2-point'
if 'args' in con:
args = con['args']
fun = lambda x: con['fun'](x, *args)
if 'jac' in con:
jac = lambda x: con['jac'](x, *args)
else:
fun = con['fun']
if 'jac' in con:
jac = con['jac']
return NonlinearConstraint(fun, lb, ub, jac)
|
gertingold/scipy
|
scipy/optimize/_constraints.py
|
Python
|
bsd-3-clause
| 17,881
|
# Generated by Django 2.2.5 on 2019-09-12 13:51
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.fields
import olympia.amo.models
import olympia.amo.validators
import olympia.users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(default='', max_length=255, unique=True)),
('display_name', models.CharField(blank=True, default='', max_length=50, null=True, validators=[django.core.validators.MinLengthValidator(2), olympia.amo.validators.OneOrMorePrintableCharacterValidator()])),
('email', models.EmailField(max_length=75, null=True, unique=True)),
('averagerating', models.FloatField(null=True)),
('biography', models.TextField(blank=True, null=True)),
('deleted', models.BooleanField(default=False)),
('display_collections', models.BooleanField(default=False)),
('homepage', models.URLField(blank=True, default='', max_length=255)),
('location', models.CharField(blank=True, default='', max_length=255)),
('notes', models.TextField(blank=True, null=True)),
('occupation', models.CharField(blank=True, default='', max_length=255)),
('picture_type', models.CharField(blank=True, default=None, max_length=75, null=True)),
('read_dev_agreement', models.DateTimeField(blank=True, null=True)),
('last_login_ip', models.CharField(default='', editable=False, max_length=45)),
('email_changed', models.DateTimeField(editable=False, null=True)),
('banned', models.DateTimeField(editable=False, null=True)),
('is_public', models.BooleanField(db_column='public', default=False)),
('fxa_id', models.CharField(blank=True, max_length=128, null=True)),
('auth_id', models.PositiveIntegerField(default=olympia.users.models.generate_auth_id, null=True)),
('basket_token', models.CharField(blank=True, default='', max_length=128)),
('bypass_upload_restrictions', models.BooleanField(default=False)),
('reviewer_name', models.CharField(blank=True, default='', max_length=50, null=True, validators=[django.core.validators.MinLengthValidator(2)])),
],
options={
'db_table': 'users',
},
bases=(olympia.amo.models.OnChangeMixin, olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='DeniedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default='', max_length=255, unique=True)),
],
options={
'db_table': 'users_denied_name',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='DisposableEmailDomainRestriction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('domain', models.CharField(help_text='Enter full disposable email domain that should be blocked. Wildcards are not supported: if you need those, or need to match against the entire email and not just the domain part, use "Email user restrictions" instead.', max_length=255, unique=True)),
],
options={
'db_table': 'users_disposable_email_domain_restriction',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='EmailUserRestriction',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('email_pattern', models.CharField(help_text='Either enter full domain or email that should be blocked or use glob-style wildcards to match other patterns. E.g "@*.mail.com"\n Please note that we do not include "@" in the match so you should do that in the pattern.', max_length=100, verbose_name='Email Pattern')),
],
options={
'db_table': 'users_user_email_restriction',
},
bases=(olympia.users.models.NormalizeEmailMixin, olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='IPNetworkUserRestriction',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('network', olympia.amo.fields.CIDRField(blank=True, help_text='Enter a valid IPv4 or IPv6 CIDR network range, eg. 127.0.0.1/28', null=True)),
],
options={
'db_table': 'users_user_network_restriction',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserRestrictionHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('restriction', models.PositiveSmallIntegerField(choices=[(0, 'DeveloperAgreementRestriction'), (1, 'DisposableEmailDomainRestriction'), (2, 'EmailUserRestriction'), (3, 'IPNetworkUserRestriction'), (4, 'EmailReputationRestriction'), (5, 'IPReputationRestriction')], default=0)),
('ip_address', models.CharField(default='', max_length=45)),
('last_login_ip', models.CharField(default='', max_length=45)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='restriction_history', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('notification_id', models.IntegerField()),
('enabled', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifications', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'users_notifications',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='UserHistory',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('email', models.EmailField(max_length=75)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'users_history',
'ordering': ('-created',),
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddIndex(
model_name='userprofile',
index=models.Index(fields=['created'], name='created'),
),
migrations.AddIndex(
model_name='userprofile',
index=models.Index(fields=['fxa_id'], name='users_fxa_id_index'),
),
migrations.AddIndex(
model_name='usernotification',
index=models.Index(fields=['user'], name='user_id'),
),
migrations.AddIndex(
model_name='userhistory',
index=models.Index(fields=['email'], name='users_history_email'),
),
migrations.AddIndex(
model_name='userhistory',
index=models.Index(fields=['user'], name='users_history_user_idx'),
),
]
|
mozilla/addons-server
|
src/olympia/users/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 10,444
|
"""Generated client library for fusiontables version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from samples.fusiontables_sample.fusiontables_v1 import fusiontables_v1_messages as messages
class FusiontablesV1(base_api.BaseApiClient):
"""Generated client library for service fusiontables version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://www.googleapis.com/fusiontables/v1/'
_PACKAGE = u'fusiontables'
_SCOPES = [u'https://www.googleapis.com/auth/fusiontables', u'https://www.googleapis.com/auth/fusiontables.readonly']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'FusiontablesV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new fusiontables handle."""
url = url or self.BASE_URL
super(FusiontablesV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.column = self.ColumnService(self)
self.query = self.QueryService(self)
self.style = self.StyleService(self)
self.table = self.TableService(self)
self.task = self.TaskService(self)
self.template = self.TemplateService(self)
class ColumnService(base_api.BaseApiService):
"""Service class for the column resource."""
_NAME = u'column'
def __init__(self, client):
super(FusiontablesV1.ColumnService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes the column.
Args:
request: (FusiontablesColumnDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesColumnDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.column.delete',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field='',
request_type_name=u'FusiontablesColumnDeleteRequest',
response_type_name=u'FusiontablesColumnDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves a specific column by its id.
Args:
request: (FusiontablesColumnGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.get',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field='',
request_type_name=u'FusiontablesColumnGetRequest',
response_type_name=u'Column',
supports_download=False,
)
def Insert(self, request, global_params=None):
r"""Adds a new column to the table.
Args:
request: (FusiontablesColumnInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
Insert.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.column.insert',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns',
request_field=u'column',
request_type_name=u'FusiontablesColumnInsertRequest',
response_type_name=u'Column',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of columns.
Args:
request: (FusiontablesColumnListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ColumnList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/columns',
request_field='',
request_type_name=u'FusiontablesColumnListRequest',
response_type_name=u'ColumnList',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the name or type of an existing column. This method supports patch semantics.
Args:
request: (FusiontablesColumnPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'fusiontables.column.patch',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field=u'column',
request_type_name=u'FusiontablesColumnPatchRequest',
response_type_name=u'Column',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the name or type of an existing column.
Args:
request: (FusiontablesColumnUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'fusiontables.column.update',
ordered_params=[u'tableId', u'columnId'],
path_params=[u'columnId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/columns/{columnId}',
request_field=u'column',
request_type_name=u'FusiontablesColumnUpdateRequest',
response_type_name=u'Column',
supports_download=False,
)
class QueryService(base_api.BaseApiService):
"""Service class for the query resource."""
_NAME = u'query'
def __init__(self, client):
super(FusiontablesV1.QueryService, self).__init__(client)
self._upload_configs = {
}
def Sql(self, request, global_params=None, download=None):
r"""Executes an SQL SELECT/INSERT/UPDATE/DELETE/SHOW/DESCRIBE/CREATE statement.
Args:
request: (FusiontablesQuerySqlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Sqlresponse) The response message.
"""
config = self.GetMethodConfig('Sql')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
Sql.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.query.sql',
ordered_params=[u'sql'],
path_params=[],
query_params=[u'hdrs', u'sql', u'typed'],
relative_path=u'query',
request_field='',
request_type_name=u'FusiontablesQuerySqlRequest',
response_type_name=u'Sqlresponse',
supports_download=True,
)
def SqlGet(self, request, global_params=None, download=None):
r"""Executes an SQL SELECT/SHOW/DESCRIBE statement.
Args:
request: (FusiontablesQuerySqlGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Sqlresponse) The response message.
"""
config = self.GetMethodConfig('SqlGet')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
SqlGet.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.query.sqlGet',
ordered_params=[u'sql'],
path_params=[],
query_params=[u'hdrs', u'sql', u'typed'],
relative_path=u'query',
request_field='',
request_type_name=u'FusiontablesQuerySqlGetRequest',
response_type_name=u'Sqlresponse',
supports_download=True,
)
class StyleService(base_api.BaseApiService):
"""Service class for the style resource."""
_NAME = u'style'
def __init__(self, client):
super(FusiontablesV1.StyleService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes a style.
Args:
request: (FusiontablesStyleDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesStyleDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.style.delete',
ordered_params=[u'tableId', u'styleId'],
path_params=[u'styleId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/styles/{styleId}',
request_field='',
request_type_name=u'FusiontablesStyleDeleteRequest',
response_type_name=u'FusiontablesStyleDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a specific style.
Args:
request: (FusiontablesStyleGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.style.get',
ordered_params=[u'tableId', u'styleId'],
path_params=[u'styleId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/styles/{styleId}',
request_field='',
request_type_name=u'FusiontablesStyleGetRequest',
response_type_name=u'StyleSetting',
supports_download=False,
)
def Insert(self, request, global_params=None):
r"""Adds a new style for the table.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
Insert.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.style.insert',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/styles',
request_field='<request>',
request_type_name=u'StyleSetting',
response_type_name=u'StyleSetting',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of styles.
Args:
request: (FusiontablesStyleListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSettingList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.style.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/styles',
request_field='',
request_type_name=u'FusiontablesStyleListRequest',
response_type_name=u'StyleSettingList',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an existing style. This method supports patch semantics.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'fusiontables.style.patch',
ordered_params=[u'tableId', u'styleId'],
path_params=[u'styleId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/styles/{styleId}',
request_field='<request>',
request_type_name=u'StyleSetting',
response_type_name=u'StyleSetting',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates an existing style.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'fusiontables.style.update',
ordered_params=[u'tableId', u'styleId'],
path_params=[u'styleId', u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/styles/{styleId}',
request_field='<request>',
request_type_name=u'StyleSetting',
response_type_name=u'StyleSetting',
supports_download=False,
)
class TableService(base_api.BaseApiService):
"""Service class for the table resource."""
_NAME = u'table'
def __init__(self, client):
super(FusiontablesV1.TableService, self).__init__(client)
self._upload_configs = {
'ImportRows': base_api.ApiUploadInfo(
accept=['application/octet-stream'],
max_size=262144000,
resumable_multipart=True,
resumable_path=u'/resumable/upload/fusiontables/v1/tables/{tableId}/import',
simple_multipart=True,
simple_path=u'/upload/fusiontables/v1/tables/{tableId}/import',
),
'ImportTable': base_api.ApiUploadInfo(
accept=['application/octet-stream'],
max_size=262144000,
resumable_multipart=True,
resumable_path=u'/resumable/upload/fusiontables/v1/tables/import',
simple_multipart=True,
simple_path=u'/upload/fusiontables/v1/tables/import',
),
}
def Copy(self, request, global_params=None):
r"""Copies a table.
Args:
request: (FusiontablesTableCopyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Copy')
return self._RunMethod(
config, request, global_params=global_params)
Copy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.table.copy',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'copyPresentation'],
relative_path=u'tables/{tableId}/copy',
request_field='',
request_type_name=u'FusiontablesTableCopyRequest',
response_type_name=u'Table',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a table.
Args:
request: (FusiontablesTableDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesTableDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.table.delete',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}',
request_field='',
request_type_name=u'FusiontablesTableDeleteRequest',
response_type_name=u'FusiontablesTableDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves a specific table by its id.
Args:
request: (FusiontablesTableGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.table.get',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}',
request_field='',
request_type_name=u'FusiontablesTableGetRequest',
response_type_name=u'Table',
supports_download=False,
)
def ImportRows(self, request, global_params=None, upload=None):
r"""Import more rows into a table.
Args:
request: (FusiontablesTableImportRowsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Import) The response message.
"""
config = self.GetMethodConfig('ImportRows')
upload_config = self.GetUploadConfig('ImportRows')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
ImportRows.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.table.importRows',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'delimiter', u'encoding', u'endLine', u'isStrict', u'startLine'],
relative_path=u'tables/{tableId}/import',
request_field='',
request_type_name=u'FusiontablesTableImportRowsRequest',
response_type_name=u'Import',
supports_download=False,
)
def ImportTable(self, request, global_params=None, upload=None):
r"""Import a new table.
Args:
request: (FusiontablesTableImportTableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('ImportTable')
upload_config = self.GetUploadConfig('ImportTable')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
ImportTable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.table.importTable',
ordered_params=[u'name'],
path_params=[],
query_params=[u'delimiter', u'encoding', u'name'],
relative_path=u'tables/import',
request_field='',
request_type_name=u'FusiontablesTableImportTableRequest',
response_type_name=u'Table',
supports_download=False,
)
def Insert(self, request, global_params=None):
r"""Creates a new table.
Args:
request: (Table) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
Insert.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.table.insert',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'tables',
request_field='<request>',
request_type_name=u'Table',
response_type_name=u'Table',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of tables a user owns.
Args:
request: (FusiontablesTableListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.table.list',
ordered_params=[],
path_params=[],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables',
request_field='',
request_type_name=u'FusiontablesTableListRequest',
response_type_name=u'TableList',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated. This method supports patch semantics.
Args:
request: (FusiontablesTablePatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'fusiontables.table.patch',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'replaceViewDefinition'],
relative_path=u'tables/{tableId}',
request_field=u'table',
request_type_name=u'FusiontablesTablePatchRequest',
response_type_name=u'Table',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated.
Args:
request: (FusiontablesTableUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'fusiontables.table.update',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'replaceViewDefinition'],
relative_path=u'tables/{tableId}',
request_field=u'table',
request_type_name=u'FusiontablesTableUpdateRequest',
response_type_name=u'Table',
supports_download=False,
)
class TaskService(base_api.BaseApiService):
"""Service class for the task resource."""
_NAME = u'task'
def __init__(self, client):
super(FusiontablesV1.TaskService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes the task, unless already started.
Args:
request: (FusiontablesTaskDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesTaskDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.task.delete',
ordered_params=[u'tableId', u'taskId'],
path_params=[u'tableId', u'taskId'],
query_params=[],
relative_path=u'tables/{tableId}/tasks/{taskId}',
request_field='',
request_type_name=u'FusiontablesTaskDeleteRequest',
response_type_name=u'FusiontablesTaskDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves a specific task by its id.
Args:
request: (FusiontablesTaskGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Task) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.task.get',
ordered_params=[u'tableId', u'taskId'],
path_params=[u'tableId', u'taskId'],
query_params=[],
relative_path=u'tables/{tableId}/tasks/{taskId}',
request_field='',
request_type_name=u'FusiontablesTaskGetRequest',
response_type_name=u'Task',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of tasks.
Args:
request: (FusiontablesTaskListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TaskList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.task.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken', u'startIndex'],
relative_path=u'tables/{tableId}/tasks',
request_field='',
request_type_name=u'FusiontablesTaskListRequest',
response_type_name=u'TaskList',
supports_download=False,
)
class TemplateService(base_api.BaseApiService):
"""Service class for the template resource."""
_NAME = u'template'
def __init__(self, client):
super(FusiontablesV1.TemplateService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes a template.
Args:
request: (FusiontablesTemplateDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesTemplateDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'fusiontables.template.delete',
ordered_params=[u'tableId', u'templateId'],
path_params=[u'tableId', u'templateId'],
query_params=[],
relative_path=u'tables/{tableId}/templates/{templateId}',
request_field='',
request_type_name=u'FusiontablesTemplateDeleteRequest',
response_type_name=u'FusiontablesTemplateDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves a specific template by its id.
Args:
request: (FusiontablesTemplateGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.template.get',
ordered_params=[u'tableId', u'templateId'],
path_params=[u'tableId', u'templateId'],
query_params=[],
relative_path=u'tables/{tableId}/templates/{templateId}',
request_field='',
request_type_name=u'FusiontablesTemplateGetRequest',
response_type_name=u'Template',
supports_download=False,
)
def Insert(self, request, global_params=None):
r"""Creates a new template for the table.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
Insert.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'fusiontables.template.insert',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[],
relative_path=u'tables/{tableId}/templates',
request_field='<request>',
request_type_name=u'Template',
response_type_name=u'Template',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Retrieves a list of templates.
Args:
request: (FusiontablesTemplateListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TemplateList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.template.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/templates',
request_field='',
request_type_name=u'FusiontablesTemplateListRequest',
response_type_name=u'TemplateList',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an existing template. This method supports patch semantics.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'fusiontables.template.patch',
ordered_params=[u'tableId', u'templateId'],
path_params=[u'tableId', u'templateId'],
query_params=[],
relative_path=u'tables/{tableId}/templates/{templateId}',
request_field='<request>',
request_type_name=u'Template',
response_type_name=u'Template',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates an existing template.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'fusiontables.template.update',
ordered_params=[u'tableId', u'templateId'],
path_params=[u'tableId', u'templateId'],
query_params=[],
relative_path=u'tables/{tableId}/templates/{templateId}',
request_field='<request>',
request_type_name=u'Template',
response_type_name=u'Template',
supports_download=False,
)
|
endlessm/chromium-browser
|
third_party/catapult/third_party/gsutil/third_party/apitools/samples/fusiontables_sample/fusiontables_v1/fusiontables_v1_client.py
|
Python
|
bsd-3-clause
| 34,754
|
import sys
from mock import patch
from pip import pep425tags
class TestPEP425Tags(object):
def mock_get_config_var(self, **kwd):
"""
Patch sysconfig.get_config_var for arbitrary keys.
"""
import pip.pep425tags
get_config_var = pip.pep425tags.sysconfig.get_config_var
def _mock_get_config_var(var):
if var in kwd:
return kwd[var]
return get_config_var(var)
return _mock_get_config_var
def abi_tag_unicode(self, flags, config_vars):
"""
Used to test ABI tags, verify correct use of the `u` flag
"""
import pip.pep425tags
config_vars.update({'SOABI': None})
base = pip.pep425tags.get_abbr_impl() + pip.pep425tags.get_impl_ver()
if sys.version_info < (3, 3):
config_vars.update({'Py_UNICODE_SIZE': 2})
mock_gcf = self.mock_get_config_var(**config_vars)
with patch('pip.pep425tags.sysconfig.get_config_var', mock_gcf):
abi_tag = pip.pep425tags.get_abi_tag()
assert abi_tag == base + flags
config_vars.update({'Py_UNICODE_SIZE': 4})
mock_gcf = self.mock_get_config_var(**config_vars)
with patch('pip.pep425tags.sysconfig.get_config_var', mock_gcf):
abi_tag = pip.pep425tags.get_abi_tag()
assert abi_tag == base + flags + 'u'
else:
# On Python >= 3.3, UCS-4 is essentially permanently enabled, and
# Py_UNICODE_SIZE is None. SOABI on these builds does not include
# the 'u' so manual SOABI detection should not do so either.
config_vars.update({'Py_UNICODE_SIZE': None})
mock_gcf = self.mock_get_config_var(**config_vars)
with patch('pip.pep425tags.sysconfig.get_config_var', mock_gcf):
abi_tag = pip.pep425tags.get_abi_tag()
assert abi_tag == base + flags
def test_broken_sysconfig(self):
"""
Test that pep425tags still works when sysconfig is broken.
Can be a problem on Python 2.7
Issue #1074.
"""
import pip.pep425tags
def raises_ioerror(var):
raise IOError("I have the wrong path!")
with patch('pip.pep425tags.sysconfig.get_config_var', raises_ioerror):
assert len(pip.pep425tags.get_supported())
def test_no_hyphen_tag(self):
"""
Test that no tag contains a hyphen.
"""
import pip.pep425tags
mock_gcf = self.mock_get_config_var(SOABI='cpython-35m-darwin')
with patch('pip.pep425tags.sysconfig.get_config_var', mock_gcf):
supported = pip.pep425tags.get_supported()
for (py, abi, plat) in supported:
assert '-' not in py
assert '-' not in abi
assert '-' not in plat
def test_manual_abi_noflags(self):
"""
Test that no flags are set on a non-PyDebug, non-Pymalloc ABI tag.
"""
self.abi_tag_unicode('', {'Py_DEBUG': False, 'WITH_PYMALLOC': False})
def test_manual_abi_d_flag(self):
"""
Test that the `d` flag is set on a PyDebug, non-Pymalloc ABI tag.
"""
self.abi_tag_unicode('d', {'Py_DEBUG': True, 'WITH_PYMALLOC': False})
def test_manual_abi_m_flag(self):
"""
Test that the `m` flag is set on a non-PyDebug, Pymalloc ABI tag.
"""
self.abi_tag_unicode('m', {'Py_DEBUG': False, 'WITH_PYMALLOC': True})
def test_manual_abi_dm_flags(self):
"""
Test that the `dm` flags are set on a PyDebug, Pymalloc ABI tag.
"""
self.abi_tag_unicode('dm', {'Py_DEBUG': True, 'WITH_PYMALLOC': True})
class TestManylinux1Tags(object):
@patch('pip.pep425tags.get_platform', lambda: 'linux_x86_64')
@patch('pip.utils.glibc.have_compatible_glibc', lambda major, minor: True)
def test_manylinux1_compatible_on_linux_x86_64(self):
"""
Test that manylinux1 is enabled on linux_x86_64
"""
assert pep425tags.is_manylinux1_compatible()
@patch('pip.pep425tags.get_platform', lambda: 'linux_i686')
@patch('pip.utils.glibc.have_compatible_glibc', lambda major, minor: True)
def test_manylinux1_compatible_on_linux_i686(self):
"""
Test that manylinux1 is enabled on linux_i686
"""
assert pep425tags.is_manylinux1_compatible()
@patch('pip.pep425tags.get_platform', lambda: 'linux_x86_64')
@patch('pip.utils.glibc.have_compatible_glibc', lambda major, minor: False)
def test_manylinux1_2(self):
"""
Test that manylinux1 is disabled with incompatible glibc
"""
assert not pep425tags.is_manylinux1_compatible()
@patch('pip.pep425tags.get_platform', lambda: 'arm6vl')
@patch('pip.utils.glibc.have_compatible_glibc', lambda major, minor: True)
def test_manylinux1_3(self):
"""
Test that manylinux1 is disabled on arm6vl
"""
assert not pep425tags.is_manylinux1_compatible()
@patch('pip.pep425tags.get_platform', lambda: 'linux_x86_64')
@patch('pip.utils.glibc.have_compatible_glibc', lambda major, minor: True)
@patch('sys.platform', 'linux2')
def test_manylinux1_tag_is_first(self):
"""
Test that the more specific tag manylinux1 comes first.
"""
groups = {}
for pyimpl, abi, arch in pep425tags.get_supported():
groups.setdefault((pyimpl, abi), []).append(arch)
for arches in groups.values():
if arches == ['any']:
continue
# Expect the most specific arch first:
if len(arches) == 3:
assert arches == ['manylinux1_x86_64', 'linux_x86_64', 'any']
else:
assert arches == ['manylinux1_x86_64', 'linux_x86_64']
|
sigmavirus24/pip
|
tests/unit/test_pep425tags.py
|
Python
|
mit
| 5,894
|
import string
import numpy
import six
import cupy
from cupy import carray
from cupy import cuda
from cupy import util
six_range = six.moves.range
six_zip = six.moves.zip
def _get_simple_elementwise_kernel(
params, operation, name, preamble,
loop_prep='', after_loop='', options=()):
module_code = string.Template('''
${preamble}
extern "C" __global__ void ${name}(${params}) {
${loop_prep};
CUPY_FOR(i, _ind.size()) {
_ind.set(i);
${operation};
}
${after_loop};
}
''').substitute(
params=params,
operation=operation,
name=name,
preamble=preamble,
loop_prep=loop_prep,
after_loop=after_loop)
module = carray.compile_with_cache(module_code, options)
return module.get_function(name)
_typenames = {
numpy.dtype('float64'): 'double',
numpy.dtype('float32'): 'float',
numpy.dtype('float16'): 'float16',
numpy.dtype('int64'): 'long long',
numpy.dtype('int32'): 'int',
numpy.dtype('int16'): 'short',
numpy.dtype('int8'): 'signed char',
numpy.dtype('uint64'): 'unsigned long long',
numpy.dtype('uint32'): 'unsigned int',
numpy.dtype('uint16'): 'unsigned short',
numpy.dtype('uint8'): 'unsigned char',
numpy.dtype('bool'): 'bool',
}
_scalar_type = (int, float, bool) + tuple(t.type for t in _typenames.keys())
def _get_typename(dtype):
if dtype is None:
raise ValueError('dtype is None')
return _typenames[numpy.dtype(dtype)]
def _check_args(args):
dev = cuda.Device()
cp_array = cupy.ndarray
scalar_type = _scalar_type
for arg in args:
if isinstance(arg, cp_array):
if arg.data.device != dev:
raise ValueError('Array device must be same as the current '
'device: array device = %d while current = %d'
% (arg.device.id, dev.id))
elif not isinstance(arg, scalar_type):
raise TypeError('Unsupported type %s' % type(arg))
def _get_args_info(args):
ret = []
carray_Indexer = carray.Indexer
ret_append = ret.append
for a in args:
t = type(a)
if t == carray_Indexer:
dtype = None
else:
dtype = a.dtype.type
ret_append((t, dtype, a.ndim))
return tuple(ret)
def _get_kernel_params(params, args_info):
ret = []
for p, a in six_zip(params, args_info):
type, dtype, ndim = a
is_array = type is cupy.ndarray
if type is carray.Indexer:
t = 'CIndexer<%d>' % ndim
else:
t = _get_typename(dtype)
if is_array:
t = 'CArray<%s, %d>' % (t, ndim)
ret.append('%s%s %s%s' % ('const ' if p.is_const else '',
t,
'_raw_' if is_array and not p.raw else '',
p.name))
return ', '.join(ret)
def _reduce_dims(args, params, shape):
ndim = len(shape)
if ndim <= 1:
return args, shape
cp_array = cupy.ndarray
is_array_flags = [not p.raw and isinstance(a, cp_array)
for p, a in six_zip(params, args)]
args_strides = [a._strides for a, f in six_zip(args, is_array_flags) if f]
src_shape = shape
shape = list(src_shape)
cnt = 0
for i in six_range(1, ndim):
j = i - 1
shape_i = shape[i]
shape_j = shape[j]
if shape_j == 1:
continue
for strides in args_strides:
if strides[i] * shape_i != strides[j]:
cnt += 1
axis = j
break
else:
shape[i] *= shape_j
shape[j] = 1
if shape[-1] != 1:
cnt += 1
axis = -1
if not cnt:
return args, src_shape
elif cnt == 1:
new_shape = shape[axis],
args = list(args)
for i, a in enumerate(args):
if is_array_flags[i]:
a = args[i] = a.view()
a._shape = new_shape
a._strides = a._strides[axis],
return args, new_shape
new_shape = tuple([dim for dim in shape if dim != 1])
args = list(args)
for i, a in enumerate(args):
if is_array_flags[i]:
a = args[i] = a.view()
a._shape = new_shape
a._strides = tuple(
[st for st, sh in six_zip(a._strides, shape) if sh != 1])
return args, new_shape
class ParameterInfo(object):
def __init__(self, str, is_const):
self.name = None
self.dtype = None
self.ctype = None
self.raw = False
self.is_const = is_const
s = tuple(i for i in str.split() if len(i) != 0)
if len(s) < 2:
raise Exception('Syntax error: %s' % str)
t, self.name = s[-2:]
if t == 'CIndexer':
pass
elif len(t) == 1:
self.ctype = t
else:
dtype = numpy.dtype(t)
self.dtype = dtype.type
if dtype.name != t:
raise ValueError('Wrong type %s' % t)
self.ctype = _get_typename(self.dtype)
for i in s[:-2]:
if i == 'raw':
self.raw = True
else:
raise Exception('Unknown keyward "%s"' % i)
@util.memoize()
def _get_param_info(s, is_const):
if len(s) == 0:
return ()
return tuple([ParameterInfo(i, is_const) for i in s.strip().split(',')])
@util.memoize()
def _decide_params_type(in_params, out_params, in_args_dtype, out_args_dtype):
type_dict = {}
if out_args_dtype:
assert len(out_params) == len(out_args_dtype)
for p, a in six_zip(out_params, out_args_dtype):
if a is None:
raise TypeError('Output arguments must be cupy.ndarray')
if p.dtype is not None:
if a != p.dtype:
raise TypeError(
'Type is mismatched. %s %s %s' % (p.name, a, p.dtype))
elif p.ctype in type_dict:
t = type_dict[p.ctype]
if t != a:
raise TypeError(
'Type is mismatched. %s %s %s %s' % (
p.name, a, t, p.ctype))
else:
type_dict[p.ctype] = a
assert len(in_params) == len(in_args_dtype)
unknown_ctype = []
for p, a in six_zip(in_params, in_args_dtype):
if a is None:
if p.dtype is None:
unknown_ctype.append(p.ctype)
else:
if p.dtype is not None:
if a != p.dtype:
raise TypeError(
'Type is mismatched. %s %s %s' % (p.name, a, p.dtype))
elif p.ctype in type_dict:
t = type_dict[p.ctype]
if t != a:
raise TypeError(
'Type is mismatched. %s %s %s %s' % (
p.name, a, t, p.ctype))
else:
type_dict[p.ctype] = a
in_types = tuple([type_dict[p.ctype] if p.dtype is None else p.dtype
for p in in_params])
out_types = tuple([type_dict[p.ctype] if p.dtype is None else p.dtype
for p in out_params])
return in_types, out_types, tuple(type_dict.items())
def _broadcast(args, params, use_size):
value = [a if not p.raw and isinstance(a, cupy.ndarray) else None
for p, a in six_zip(params, args)]
if use_size:
for i in value:
if i is None:
break
else:
raise ValueError("Specified 'size' can be used only "
"if all of the ndarray are 'raw'.")
else:
for i in value:
if i is not None:
break
else:
raise ValueError('Loop size is Undecided')
brod = cupy.broadcast(*value)
value = [b if a is None else a
for a, b in six_zip(brod.values, args)]
return value, brod.shape
def _get_out_args(out_args, out_types, out_shape):
if not out_args:
return [cupy.empty(out_shape, t) for t in out_types]
for a in out_args:
if not isinstance(a, cupy.ndarray):
raise TypeError(
'Output arguments type must be cupy.ndarray')
if a.shape != out_shape:
raise ValueError('Out shape is mismatched')
return out_args
def _get_out_args_with_params(out_args, out_types, out_shape, out_params):
if not out_args:
for p in out_params:
if p.raw:
raise ValueError('Output array size is Undecided')
return [cupy.empty(out_shape, t) for t in out_types]
for a, p in six_zip(out_args, out_params):
if not isinstance(a, cupy.ndarray):
raise TypeError(
'Output arguments type must be cupy.ndarray')
if a.shape != out_shape and not p.raw:
raise ValueError('Out shape is mismatched')
return out_args
@util.memoize(for_each_device=True)
def _get_elementwise_kernel(args_info, types, params, operation, name,
preamble, kwargs):
kernel_params = _get_kernel_params(params, args_info)
types_preamble = '\n'.join(
'typedef %s %s;' % (_get_typename(v), k) for k, v in types)
preamble = types_preamble + '\n' + preamble
op = []
for p, a in six_zip(params, args_info):
if not p.raw and a[0] == cupy.ndarray:
if p.is_const:
fmt = 'const {t} {n} = _raw_{n}[_ind.get()];'
else:
fmt = '{t} &{n} = _raw_{n}[_ind.get()];'
op.append(fmt.format(t=p.ctype, n=p.name))
op.append(operation)
operation = '\n'.join(op)
return _get_simple_elementwise_kernel(
kernel_params, operation, name,
preamble, **dict(kwargs))
class ElementwiseKernel(object):
"""User-defined elementwise kernel.
This class can be used to define an elementwise kernel with or without
broadcasting.
The kernel is compiled at an invocation of the
:meth:`~ElementwiseKernel.__call__` method,
which is cached for each device.
The compiled binary is also cached into a file under the
``$HOME/.cupy/kernel_cache/`` directory with a hashed file name. The cached
binary is reused by other processes.
Args:
in_params (str): Input argument list.
out_params (str): Output argument list.
operation (str): The body in the loop written in CUDA-C/C++.
name (str): Name of the kernel function. It should be set for
readability of the performance profiling.
reduce_dims (bool): If False, the shapes of array arguments are
kept within the kernel invocation. The shapes are reduced
(i.e., the arrays are reshaped without copy to the minimum
ndims) by default. It may make the kernel fast by reducing the
index calculations.
options (list): Options passed to the nvcc command.
preamble (str): Fragment of the CUDA-C/C++ code that is inserted at the
top of the cu file.
loop_prep (str): Fragment of the CUDA-C/C++ code that is inserted at
the top of the kernel function definition and above the ``for``
loop.
after_loop (str): Fragment of the CUDA-C/C++ code that is inserted at
the bottom of the kernel function definition.
"""
def __init__(self, in_params, out_params, operation,
name='kernel', reduce_dims=True, preamble='', **kwargs):
self.in_params = _get_param_info(in_params, True)
self.out_params = _get_param_info(out_params, False)
self.nin = len(self.in_params)
self.nout = len(self.out_params)
self.nargs = self.nin + self.nout
param_rest = _get_param_info('CIndexer _ind', False)
self.params = self.in_params + self.out_params + param_rest
self.operation = operation
self.name = name
self.reduce_dims = reduce_dims
self.preamble = preamble
self.kwargs = frozenset(kwargs.items())
names = [p.name for p in self.in_params + self.out_params]
if 'i' in names:
raise ValueError("Can not use 'i' as a parameter name")
def __call__(self, *args, **kwargs):
"""Compiles and invokes the elementwise kernel.
The compilation runs only if the kernel is not cached. Note that the
kernels with different argument dtypes or ndims are not compatible. It
means that single ElementwiseKernel object may be compiled into
multiple kernel binaries.
Args:
args: Argumens of the kernel.
size (int): Range size of the indices. If specified, the variable
``n`` is set to this value. Otherwise, the result of
broadcasting is used to determine the value of ``n``.
Returns:
Arrays are returned according to the ``out_params`` argument of the
``__init__`` method.
"""
size = kwargs.pop('size', None)
if kwargs:
raise TypeError('Wrong arguments %s' % kwargs)
n_args = len(args)
if n_args != self.nin and n_args != self.nargs:
raise TypeError('Wrong number of arguments for %s' % self.name)
_check_args(args)
values, shape = _broadcast(args, self.params, size is not None)
in_args = values[:self.nin]
out_args = values[self.nin:]
cp_array = cupy.ndarray
in_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in in_args])
out_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in out_args])
in_types, out_types, types = _decide_params_type(
self.in_params, self.out_params,
in_ndarray_types, out_ndarray_types)
out_args = _get_out_args_with_params(
out_args, out_types, shape, self.out_params)
if self.nout == 1:
ret = out_args[0]
else:
ret = tuple(out_args)
if size is not None:
shape = size,
if 0 in shape:
return ret
inout_args = [x if isinstance(x, cp_array) else t(x)
for x, t in six_zip(in_args, in_types)]
inout_args += out_args
if self.reduce_dims:
inout_args, shape = _reduce_dims(
inout_args, self.params, shape)
indexer = carray.Indexer(shape)
inout_args.append(indexer)
args_info = _get_args_info(inout_args)
kern = _get_elementwise_kernel(
args_info, types, self.params, self.operation,
self.name, self.preamble, self.kwargs)
kern.linear_launch(indexer.size, inout_args)
return ret
@util.memoize(for_each_device=True)
def _get_ufunc_kernel(in_types, out_types, routine, args_info, out_raw_types,
params, name, preamble):
kernel_params = _get_kernel_params(params, args_info)
types = []
op = []
for i, x in enumerate(in_types):
types.append('typedef %s in%d_type;' % (_get_typename(x), i))
if args_info[i][0] is cupy.ndarray:
op.append(
'const in{0}_type in{0} = _raw_in{0}[_ind.get()];'.format(i))
for i, x in enumerate(out_types):
types.append('typedef %s out%d_type;' % (_get_typename(x), i))
op.append('{1} &out{0} = _raw_out{0}[_ind.get()];'.format(
i, _get_typename(out_raw_types[i])))
op.append(routine)
operation = '\n'.join(op)
types.append(preamble)
preamble = '\n'.join(types)
return _get_simple_elementwise_kernel(
kernel_params, operation, name, preamble)
def _guess_routine_from_in_types(ops, in_types):
for op in ops:
for dst, src in six_zip(op[0], in_types):
if not numpy.can_cast(src, dst):
break
else:
return op
return None
def _guess_routine_from_dtype(ops, dtype):
for op in ops:
for t in op[1]:
if t != dtype:
break
else:
return op
return None
def _guess_routine(name, cache, ops, in_args, dtype):
if dtype is None:
key = tuple([numpy.dtype(type(i)).type
if isinstance(i, (int, float, bool)) else i.dtype.type
for i in in_args])
else:
key = dtype
op = cache.get(key, ())
if op is ():
if dtype is None:
op = _guess_routine_from_in_types(ops, key)
else:
op = _guess_routine_from_dtype(ops, key)
cache[key] = op
if op:
return op
raise TypeError('Wrong type of arguments for %s' % name)
class ufunc(object):
"""Universal function.
Attributes:
name (str): The name of the universal function.
nin (int): Number of input arguments.
nout (int): Number of output arguments.
nargs (int): Number of all arguments.
"""
def __init__(self, name, nin, nout, ops, preamble='', doc=''):
self.name = name
self.nin = nin
self.nout = nout
self.nargs = nin + nout
self._ops = ops
self._preamble = preamble
self.__doc__ = doc
_in_params = tuple(
ParameterInfo('T in%d' % i, True)
for i in six_range(nin))
_out_params = tuple(
ParameterInfo('T out%d' % i, False)
for i in six_range(nout))
self._params = _in_params + _out_params + (
ParameterInfo('CIndexer _ind', False),)
self._routine_cache = {}
def __repr__(self):
return "<ufunc '%s'>" % self.name
@property
def types(self):
"""A list of type signatures.
Each type signature is represented by type character codes of inputs
and outputs separated by '->'.
"""
types = []
for in_types, out_types, _ in self._ops:
in_str = ''.join([numpy.dtype(t).char for t in in_types])
out_str = ''.join([numpy.dtype(t).char for t in out_types])
types.append('%s->%s' % (in_str, out_str))
return types
def __call__(self, *args, **kwargs):
"""Applies the universal function to arguments elementwise.
Args:
args: Input arguments. Each of them can be a cupy.ndarray object or
a scalar. The output arguments can be omitted or be specified
by the ``out`` argument.
out (cupy.ndarray): Output array. It outputs to new arrays
default.
dtype: Data type specifier.
Returns:
Output array or a tuple of output arrays.
"""
out = kwargs.pop('out', None)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
dtype = numpy.dtype(dtype).type
if kwargs:
raise TypeError('Wrong arguments %s' % kwargs)
n_args = len(args)
if n_args != self.nin and n_args != self.nargs:
raise TypeError('Wrong number of arguments for %s' % self.name)
if out is None:
in_args = args[:self.nin]
out_args = args[self.nin:]
else:
if self.nout != 1:
raise ValueError("Cannot use 'out' in %s" % self.name)
if n_args != self.nin:
raise ValueError("Cannot specify 'out' as both "
"a positional and keyword argument")
in_args = args
out_args = out,
args += out_args
_check_args(args)
broad = cupy.broadcast(*args)
shape = broad.shape
in_types, out_types, routine = _guess_routine(
self.name, self._routine_cache, self._ops, in_args, dtype)
out_args = _get_out_args(out_args, out_types, shape)
if self.nout == 1:
ret = out_args[0]
else:
ret = tuple(out_args)
if 0 in shape:
return ret
inout_args = [x if isinstance(x, cupy.ndarray) else t(x)
for x, t in six_zip(broad.values, in_types)]
inout_args.extend(out_args)
inout_args, shape = _reduce_dims(inout_args, self._params, shape)
indexer = carray.Indexer(shape)
inout_args.append(indexer)
args_info = _get_args_info(inout_args)
out_raw_types = tuple([x.dtype.type for x in out_args])
kern = _get_ufunc_kernel(
in_types, out_types, routine,
args_info, out_raw_types,
self._params, self.name, self._preamble)
kern.linear_launch(indexer.size, inout_args)
return ret
def create_ufunc(name, ops, routine=None, preamble='', doc=''):
_ops = []
for t in ops:
if not isinstance(t, tuple):
typ = t
rt = routine
else:
typ, rt = t
types = typ.split('->')
if len(types) == 1:
in_types = out_types = tuple(types)
else:
in_types, out_types = map(tuple, types)
in_types = tuple([numpy.dtype(t).type for t in in_types])
out_types = tuple([numpy.dtype(t).type for t in out_types])
_ops.append((in_types, out_types, rt))
return ufunc(name, len(_ops[0][0]), len(_ops[0][1]), _ops, preamble, doc)
_id = 'out0 = in0'
copy = create_ufunc(
'cupy_copy',
('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'),
_id)
copy_where = create_ufunc(
'cupy_copy_where',
('??->?', 'b?->b', 'B?->B', 'h?->h', 'H?->H', 'i?->i', 'I?->I', 'l?->l',
'L?->L', 'q?->q', 'Q?->Q', 'e?->e', 'f?->f', 'd?->d'),
'if (in1) out0 = in0')
_divmod = create_ufunc(
'cupy_divmod',
('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0_type a = _floor_divide(in0, in1); out0 = a; out1 = in0 - a * in1')
|
sou81821/chainer
|
cupy/elementwise.py
|
Python
|
mit
| 22,222
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains routines for printing protocol messages in text format."""
import cStringIO
import re
from collections import deque
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.public import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker())
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|'
'[0-9+-][0-9a-zA-Z_.+-]*|'
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|'
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)')
_IDENTIFIER = re.compile('\w+')
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Expected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column + 1, message))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 13: return r"\r"
if o == 9: return r"\t"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
try:
result = int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
return float(text)
except ValueError:
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text)
def ParseBool(text):
"""Parse a boolean value.
Args:
text: Text to parse.
Returns:
Boolean values parsed
Raises:
ValueError: If text is not a valid boolean.
"""
if text in ('true', 't', '1'):
return True
elif text in ('false', 'f', '0'):
return False
else:
raise ValueError('Expected "true" or "false".')
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, value))
else:
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
return enum_value.number
|
GdZ/scriptfile
|
software/googleAppEngine/google/net/proto2/python/public/text_format.py
|
Python
|
mit
| 19,690
|
#! /usr/bin/env python
# Copyright (c) 2014 Quanta Research Cambridge, Inc
# Original author John Ankcorn
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import sys
print('preprocess_trace.py:', sys.argv)
cppind = []
bsvind = []
for filename in sys.argv[2:]:
data = open(filename).readlines()
hasdisplay = False
hasdispind = False
for line in data:
if line.find('$display') >= 0:
hasdisplay = True
if line.find('printfInd') >= 0:
hasdispind = True
if hasdisplay and hasdispind:
fname = sys.argv[1] + '/generatedbsv/' + filename
fh = open(fname, 'w')
for line in data:
ind = line.find('$display')
if ind >= 0:
param = line[ind+8:].strip()[1:][:-2].strip()
formatstr = ''
pitem = ''
level = 0
informat = True
pactual = []
for ch in param[1:]:
if informat:
if ch == '"':
if level == 0:
informat = False
else:
formatstr = formatstr + ch
elif ch == ',':
if pitem != '':
pactual.append(pitem.strip())
pitem = ''
else:
pitem = pitem + ch
pactual.append(pitem.strip())
freplace = 'printfind_'
lastch = ''
plist = []
for ch in formatstr:
if lastch == '%':
if ch == 'x':
plist.append('Bit#(32)')
else:
print('unknown format char', ch)
if ch == '-':
freplace = freplace + '__'
elif (ch >= 'A' and ch <= 'Z') or (ch >= 'a' and ch <= 'z') or (ch >= '0' and ch <= '9'):
freplace = freplace + ch
else:
freplace = freplace + '_' + '{:02x}'.format(ord(ch))
lastch = ch
line = line[:ind] + 'printfInd.' + freplace + '(' + ','.join(pactual) + ');\n'
pformal = ''
pactual = ''
pbsv = ''
pcount = 1
for item in plist:
if pcount > 1:
pformal = pformal + ', '
pactual = pactual + ', '
pbsv = pbsv + ', '
pvar = 'v%d' % pcount
pcount = pcount + 1
if item == 'Bit#(32)':
pformal = pformal + 'uint32_t ' + pvar
pactual = pactual + pvar
pbsv = pbsv + item + ' ' + pvar
cppind.append(' void ' + freplace + '(' + pformal + ') { printf("' + formatstr + '\\n", ' + pactual + '); }\n')
bsvind.append(' method Action ' + freplace + '(' + pbsv + ');\n')
fh.write(line)
fh.close()
if cppind != []:
fname = sys.argv[1] + '/jni/printfInd.h'
fh = open(fname, 'w')
fh.write('class DisplayInd : public DisplayIndWrapper\n')
fh.write('{\n')
fh.write('public:\n')
fh.write(' DisplayInd(unsigned int id, PortalPoller *poller) : DisplayIndWrapper(id, poller) {}\n')
for item in cppind:
fh.write(item)
fh.write('};\n')
fh.close()
if bsvind != []:
fname = sys.argv[1] + '/generatedbsv/DisplayInd.bsv'
fh = open(fname, 'w')
fh.write('interface DisplayInd;\n')
for item in bsvind:
fh.write(item)
fh.write('endinterface\n')
fh.close()
sys.exit(0)
|
cambridgehackers/connectal
|
scripts/preprocess_trace.py
|
Python
|
mit
| 4,872
|
############################################################################
# Generic script applicable on any Operating Environments (Unix, Windows)
# ScriptName : wls_reset.py
# Properties : weblogic.properties
# Author : Kevin Yuan
############################################################################
#===========================================================================
# Connect to wls server
#===========================================================================
connect('@WL_USR@','@WL_PWD@','t3://@WL_HOST@:@WL_PORT@')
#===========================================================================
# Remove Data Sources using wlst on-line commonds
#===========================================================================
edit()
startEdit()
delete('@DS_NAME@','JDBCSystemResource')
delete('@NON_JTA_DS_NAME@','JDBCSystemResource')
save()
activate()
exit()
|
RallySoftware/eclipselink.runtime
|
jpa/eclipselink.jpars.test/resource/weblogic/wls_reset.py
|
Python
|
epl-1.0
| 904
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, class (GUICG2)
import GemRB
from GUIDefines import *
import CommonTables
ClassWindow = 0
TextAreaControl = 0
DoneButton = 0
BackButton = 0
ClassCount = 0
HasSubClass = 0
ClassID = 0
def AdjustTextArea():
global HasSubClass, ClassID
Class = GemRB.GetVar("Class")-1
TextAreaControl.SetText(CommonTables.Classes.GetValue(Class,1) )
ClassName = CommonTables.Classes.GetRowName(Class)
ClassID = CommonTables.Classes.GetValue(ClassName, "ID")
#determining if this class has any subclasses
HasSubClass = 0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
HasSubClass = 1
break
if HasSubClass == 0:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OnLoad():
global ClassWindow, TextAreaControl, DoneButton, BackButton
global ClassCount
GemRB.LoadWindowPack("GUICG", 800, 600)
#this replaces help02.2da for class restrictions
ClassCount = CommonTables.Classes.GetRowCount()+1
ClassWindow = GemRB.LoadWindow(2)
rid = CommonTables.Races.FindValue(3, GemRB.GetVar('BaseRace'))
RaceName = CommonTables.Races.GetRowName(rid)
#radiobutton groups must be set up before doing anything else to them
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Allowed = CommonTables.Classes.GetValue(ClassName, RaceName)
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
if Allowed==0:
continue
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress)
Button.SetVarAssoc("Class", i)
BackButton = ClassWindow.GetControl(17)
BackButton.SetText(15416)
BackButton.SetFlags(IE_GUI_BUTTON_CANCEL,OP_OR)
DoneButton = ClassWindow.GetControl(0)
DoneButton.SetText(36789)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
ScrollBarControl = ClassWindow.GetControl(15)
TextAreaControl = ClassWindow.GetControl(16)
Class = GemRB.GetVar("Class")-1
if Class<0:
TextAreaControl.SetText(17242)
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
else:
AdjustTextArea()
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
ClassWindow.SetVisible(WINDOW_VISIBLE)
return
def ClassPress():
global HasSubClass
AdjustTextArea()
if HasSubClass == 0:
return
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetText("")
j=0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress2)
Button.SetVarAssoc("Class", i)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress2)
return
def ClassPress2():
Class = GemRB.GetVar("Class")-1
TextAreaControl.SetText(CommonTables.Classes.GetValue(Class,1) )
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress2():
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
if ClassWindow:
ClassWindow.Unload()
OnLoad()
return
def BackPress():
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen3")
GemRB.SetVar("Class",0) #scrapping the class value
MyChar = GemRB.GetVar("Slot")
GemRB.SetPlayerStat (IE_CLASS, 0)
return
def NextPress():
#classcolumn is base class
Class = GemRB.GetVar("Class")
ClassColumn = CommonTables.Classes.GetValue(Class - 1, 3)
if ClassColumn <= 0: #it was already a base class
ClassColumn = Class
GemRB.SetVar("BaseClass", ClassColumn)
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen4") #alignment
return
|
tomprince/gemrb
|
gemrb/GUIScripts/iwd2/Class.py
|
Python
|
gpl-2.0
| 5,585
|
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_LD_X_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the LD_X_decr opcode.
"""
import base_test
from registers import Reg, SREG
class LD_X_decr_TestFail(base_test.TestFail): pass
class base_LD_X_decr(base_test.opcode_test):
"""Generic test case for testing LD_X_decr opcode.
LD_X_decr - Load Indirect from data space to Register using index X and
pre decrement X.
Operation: X <- X - 1 then Rd <- (X)
opcode is '1001 000d dddd 1110' where 0 <= d <= 31 and d != {26,27}
Only registers PC, R26, R27 and Rd should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = 0
self.setup_regs[Reg.R26] = (self.X & 0xff)
self.setup_regs[Reg.R27] = ((self.X >> 8) & 0xff)
# set up the val in memory (memory is read after X is decremented,
# thus we need to write to memory _at_ X - 1)
self.mem_byte_write( self.X - 1, self.Vd )
# Return the raw opcode
return 0x900E | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.R26, Reg.R27] )
# check that result is correct
expect = self.Vd
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('LD_X_decr: expect=%02x, got=%02x' % (expect, got))
# check that X was decremented
expect = self.X - 1
got = (self.anal_regs[Reg.R26] & 0xff) | ((self.anal_regs[Reg.R27] << 8) & 0xff00)
if expect != got:
self.fail('LD_X_decr X not decr: expect=%04x, got=%04x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class LD_X_decr_r%02d_X%04x_v%02x_TestFail(LD_X_decr_TestFail): pass
class test_LD_X_decr_r%02d_X%04x_v%02x(base_LD_X_decr):
Rd = %d
X = 0x%x
Vd = 0x%x
def fail(self,s):
raise LD_X_decr_r%02d_X%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_LD_X_decr_rNN_vXX class definitions.
#
# Operation is undefined for d = 26 and d = 27.
#
code = ''
for d in range(0,26)+range(28,32):
for x in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,x,v)*4
code += template % args
exec code
|
zouppen/simulavr
|
regress/test_opcodes/test_LD_X_decr.py
|
Python
|
gpl-2.0
| 3,080
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Abiquo
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Abiquo API
Requires some python libraries, ensure to have them installed when using this script.
This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
Before using this script you may want to modify abiquo.ini config file.
This script generates an Ansible hosts file with these host groups:
ABQ_xxx: Defines a hosts itself by Abiquo VM name label
all: Contains all hosts defined in Abiquo user's enterprise
virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
imagetemplate: Creates a host group for each image template containing all hosts using it
'''
# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils.urls import open_url
def api_get(link, config):
try:
if link is None:
url = config.get('api','uri') + config.get('api','login_path')
headers = {"Accept": config.get('api','login_type')}
else:
url = link['href'] + '?limit=0'
headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''),
url_password=config.get('auth','apipass').replace('\n', ''))
return json.loads(result.read())
except:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache','cache_dir')
try:
cache = open('/'.join([dpath,'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache','cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache','cache_dir'):
dpath = config.get('cache','cache_dir')
try:
existing = os.stat( '/'.join([dpath,'inventory']))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)):
return True
return False
def generate_inv_from_api(enterprise_entity,config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
inventory['all']['hosts'] = []
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity,config)
vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines'))
vms = api_get(vms_entity,config)
for vmcollection in vms['collection']:
vm_vapp = next(link for link in (vmcollection['links']) if (link['rel']=='virtualappliance'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_vdc = next(link for link in (vmcollection['links']) if (link['rel']=='virtualdatacenter'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_template = next(link for link in (vmcollection['links']) if (link['rel']=='virtualmachinetemplate'))['title'].replace('[','').replace(']','').replace(' ','_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if (config.getboolean('defaults', 'public_ip_only')) == True:
for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'):
vm_nic = link['title']
break
else:
vm_nic = None
# Otherwise, assigning defined network interface IP address
else:
for link in vmcollection['links']:
if (link['rel']==config.get('defaults', 'default_net_interface')):
vm_nic = link['title']
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')):
vm_state = False
if vm_nic is not None and vm_state:
if vm_vapp not in inventory:
inventory[vm_vapp] = {}
inventory[vm_vapp]['children'] = []
inventory[vm_vapp]['hosts'] = []
if vm_vdc not in inventory:
inventory[vm_vdc] = {}
inventory[vm_vdc]['hosts'] = []
inventory[vm_vdc]['children'] = []
if vm_template not in inventory:
inventory[vm_template] = {}
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') == True:
meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata'))
try:
metadata = api_get(meta_entity,config)
if (config.getfloat("api","version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
inventory[vm_vdc]['children'].append(vmcollection['name'])
inventory[vm_template]['children'].append(vmcollection['name'])
inventory['all']['children'].append(vmcollection['name'])
inventory[vmcollection['name']] = []
inventory[vmcollection['name']].append(vm_nic)
return inventory
except Exception as e:
# Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
if cache_available(config):
inv = get_cache('inventory', config)
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise,config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
login = api_get(None,config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise'))
except Exception as e:
enterprise = None
if cache_available(config):
inventory = get_cache('inventory', config)
else:
inventory = get_inventory(enterprise, config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
|
camradal/ansible
|
contrib/inventory/abiquo.py
|
Python
|
gpl-3.0
| 8,807
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipaclient.frontend import MethodOverride
from ipalib import errors
from ipalib import Flag
from ipalib import util
from ipalib.plugable import Registry
from ipalib import _
from ipalib import x509
register = Registry()
@register(override=True, no_fail=True)
class user_del(MethodOverride):
def get_options(self):
for option in super(user_del, self).get_options():
yield option
yield Flag(
'preserve?',
include='cli',
doc=_('Delete a user, keeping the entry available for future use'),
)
yield Flag(
'no_preserve?',
include='cli',
doc=_('Delete a user'),
)
def forward(self, *keys, **options):
if self.api.env.context == 'cli':
no_preserve = options.pop('no_preserve', False)
preserve = options.pop('preserve', False)
if no_preserve and preserve:
raise errors.MutuallyExclusiveError(
reason=_("preserve and no-preserve cannot be both set"))
elif no_preserve:
options['preserve'] = False
elif preserve:
options['preserve'] = True
return super(user_del, self).forward(*keys, **options)
@register(override=True, no_fail=True)
class user_show(MethodOverride):
def forward(self, *keys, **options):
if 'out' in options:
util.check_writable_file(options['out'])
result = super(user_show, self).forward(*keys, **options)
if 'usercertificate' in result['result']:
certs = (x509.load_der_x509_certificate(c)
for c in result['result']['usercertificate'])
x509.write_certificate_list(certs, options['out'])
result['summary'] = (
_('Certificate(s) stored in file \'%(file)s\'')
% dict(file=options['out'])
)
return result
else:
raise errors.NoCertificateError(entry=keys[-1])
else:
return super(user_show, self).forward(*keys, **options)
|
encukou/freeipa
|
ipaclient/plugins/user.py
|
Python
|
gpl-3.0
| 2,966
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Adam Miller <maxamillion@fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- Name of a service to add/remove to/from firewalld.
- The service must be listed in output of firewall-cmd --get-services.
type: str
port:
description:
- Name of a port or port range to add/remove to/from firewalld.
- Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges.
type: str
rich_rule:
description:
- Rich rule to add/remove to/from firewalld.
type: str
source:
description:
- The source/network you would like to add/remove to/from firewalld.
type: str
version_added: "2.0"
interface:
description:
- The interface you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.1"
icmp_block:
description:
- The icmp block you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.8"
icmp_block_inversion:
description:
- Enable/Disable inversion of icmp blocks for a zone in firewalld.
type: str
version_added: "2.8"
zone:
description:
- >
The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream.
- Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).
- Possible values include C(block), C(dmz), C(drop), C(external), C(home), C(internal), C(public), C(trusted), C(work) ]
type: str
default: system-default(public)
permanent:
description:
- Should this configuration be in the running firewalld configuration or persist across reboots.
- As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 3.0.9).
- Note that if this is C(no), immediate is assumed C(yes).
type: bool
immediate:
description:
- Should this configuration be applied immediately, if set as permanent.
type: bool
default: no
version_added: "1.9"
state:
description:
- Enable or disable a setting.
- 'For ports: Should this port accept(enabled) or reject(disabled) connections.'
- The states C(present) and C(absent) can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
type: str
required: true
choices: [ absent, disabled, enabled, present ]
timeout:
description:
- The amount of time the rule should be in effect for when non-permanent.
type: int
default: 0
masquerade:
description:
- The masquerade setting you would like to enable/disable to/from zones within firewalld.
type: str
version_added: "2.1"
offline:
description:
- Whether to run this module even when firewalld is offline.
type: bool
version_added: "2.3"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default.
- For distributions where the python2 firewalld bindings are unavailable (e.g Fedora 28 and later) you will have to set the
ansible_python_interpreter for these hosts to the python3 interpreter path and install the python3 bindings.
- Zone transactions (creating, deleting) can be performed by using only the zone and state parameters "present" or "absent".
Note that zone transactions must explicitly be permanent. This is a limitation in firewalld.
This also means that you will have to reload firewalld after adding a zone that you wish to perform immediate actions on.
The module will not take care of this for you implicitly because that would undo any previously performed immediate actions which were not
permanent. Therefore, if you require immediate access to a newly created zone it is recommended you reload firewalld immediately after the zone
creation returns with a changed state and before you perform any other immediate, non-permanent actions on that zone.
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = r'''
- firewalld:
service: https
permanent: yes
state: enabled
- firewalld:
port: 8081/tcp
permanent: yes
state: disabled
- firewalld:
port: 161-162/udp
permanent: yes
state: enabled
- firewalld:
zone: dmz
service: http
permanent: yes
state: enabled
- firewalld:
rich_rule: rule service name="ftp" audit limit value="1/m" accept
permanent: yes
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: yes
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: yes
zone: dmz
- firewalld:
zone: custom
state: present
permanent: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block_inversion: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block: echo-request
- name: Redirect port 443 to 8443 with Rich Rule
firewalld:
rich_rule: rule forward-port port=443 protocol=tcp to-port=8443
zone: public
permanent: yes
immediate: yes
state: enabled
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.firewalld import FirewallTransaction, fw_offline
try:
from firewall.client import Rich_Rule
from firewall.client import FirewallClientZoneSettings
except ImportError:
# The import errors are handled via FirewallTransaction, don't need to
# duplicate that here
pass
class IcmpBlockTransaction(FirewallTransaction):
"""
IcmpBlockTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, icmp_block, timeout):
return icmp_block in self.fw.getIcmpBlocks(self.zone)
def get_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
return icmp_block in fw_settings.getIcmpBlocks()
def set_enabled_immediate(self, icmp_block, timeout):
self.fw.addIcmpBlock(self.zone, icmp_block, timeout)
def set_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, icmp_block, timeout):
self.fw.removeIcmpBlock(self.zone, icmp_block)
def set_disabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
class IcmpBlockInversionTransaction(FirewallTransaction):
"""
IcmpBlockInversionTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockInversionTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self):
if self.fw.queryIcmpBlockInversion(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getIcmpBlockInversion() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addIcmpBlockInversion(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeIcmpBlockInversion(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(False)
self.update_fw_settings(fw_zone, fw_settings)
class ServiceTransaction(FirewallTransaction):
"""
ServiceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(ServiceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, service, timeout):
if service in self.fw.getServices(self.zone):
return True
else:
return False
def get_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
if service in fw_settings.getServices():
return True
else:
return False
def set_enabled_immediate(self, service, timeout):
self.fw.addService(self.zone, service, timeout)
def set_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addService(service)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, service, timeout):
self.fw.removeService(self.zone, service)
def set_disabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeService(service)
self.update_fw_settings(fw_zone, fw_settings)
class MasqueradeTransaction(FirewallTransaction):
"""
MasqueradeTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(MasqueradeTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added masquerade to zone %s" % self.zone
self.disabled_msg = "Removed masquerade from zone %s" % self.zone
def get_enabled_immediate(self):
if self.fw.queryMasquerade(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getMasquerade() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addMasquerade(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeMasquerade(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(False)
self.update_fw_settings(fw_zone, fw_settings)
class PortTransaction(FirewallTransaction):
"""
PortTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(PortTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, port, protocol, timeout):
port_proto = [port, protocol]
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
ports_list = fw_settings.getPorts()
else:
ports_list = self.fw.getPorts(self.zone)
if port_proto in ports_list:
return True
else:
return False
def get_enabled_permanent(self, port, protocol, timeout):
port_proto = (port, protocol)
fw_zone, fw_settings = self.get_fw_zone_settings()
if port_proto in fw_settings.getPorts():
return True
else:
return False
def set_enabled_immediate(self, port, protocol, timeout):
self.fw.addPort(self.zone, port, protocol, timeout)
def set_enabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addPort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, port, protocol, timeout):
self.fw.removePort(self.zone, port, protocol)
def set_disabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removePort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
class InterfaceTransaction(FirewallTransaction):
"""
InterfaceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(InterfaceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Changed %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, interface):
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
interface_list = fw_settings.getInterfaces()
else:
interface_list = self.fw.getInterfaces(self.zone)
if interface in interface_list:
return True
else:
return False
def get_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def set_enabled_immediate(self, interface):
self.fw.changeZoneOfInterface(self.zone, interface)
def set_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if self.fw_offline:
iface_zone_objs = []
for zone in self.fw.config.get_zones():
old_zone_obj = self.fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
self.module.fail_json(
msg='ERROR: interface {} is in {} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != self.zone:
old_zone_settings = FirewallClientZoneSettings(
self.fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
self.fw.config.set_zone_config(
old_zone_obj,
old_zone_settings.settings
)
fw_settings.addInterface(interface) # add to new
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = self.fw.config().getZoneOfInterface(interface)
if old_zone_name != self.zone:
if old_zone_name:
old_zone_obj = self.fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def set_disabled_immediate(self, interface):
self.fw.removeInterface(self.zone, interface)
def set_disabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeInterface(interface)
self.update_fw_settings(fw_zone, fw_settings)
class RichRuleTransaction(FirewallTransaction):
"""
RichRuleTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(RichRuleTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, rule, timeout):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in self.fw.getRichRules(self.zone):
return True
else:
return False
def get_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_enabled_immediate(self, rule, timeout):
self.fw.addRichRule(self.zone, rule, timeout)
def set_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, rule, timeout):
self.fw.removeRichRule(self.zone, rule)
def set_disabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
class SourceTransaction(FirewallTransaction):
"""
SourceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(SourceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, source):
if source in self.fw.getSources(self.zone):
return True
else:
return False
def get_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
if source in fw_settings.getSources():
return True
else:
return False
def set_enabled_immediate(self, source):
self.fw.addSource(self.zone, source)
def set_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addSource(source)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, source):
self.fw.removeSource(self.zone, source)
def set_disabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeSource(source)
self.update_fw_settings(fw_zone, fw_settings)
class ZoneTransaction(FirewallTransaction):
"""
ZoneTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None,
permanent=True, immediate=False, enabled_values=None, disabled_values=None):
super(ZoneTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone,
permanent=permanent, immediate=immediate,
enabled_values=enabled_values or ["present"],
disabled_values=disabled_values or ["absent"])
self.enabled_msg = "Added zone %s" % \
(self.zone)
self.disabled_msg = "Removed zone %s" % \
(self.zone)
self.tx_not_permanent_error_msg = "Zone operations must be permanent. " \
"Make sure you didn't set the 'permanent' flag to 'false' or the 'immediate' flag to 'true'."
def get_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def get_enabled_permanent(self):
zones = self.fw.config().listZones()
zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones]
if self.zone in zone_names:
return True
else:
return False
def set_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_enabled_permanent(self):
self.fw.config().addZone(self.zone, FirewallClientZoneSettings())
def set_disabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_disabled_permanent(self):
zone_obj = self.fw.config().getZoneByName(self.zone)
zone_obj.remove()
def main():
module = AnsibleModule(
argument_spec=dict(
icmp_block=dict(type='str'),
icmp_block_inversion=dict(type='str'),
service=dict(type='str'),
port=dict(type='str'),
rich_rule=dict(type='str'),
zone=dict(type='str'),
immediate=dict(type='bool', default=False),
source=dict(type='str'),
permanent=dict(type='bool'),
state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']),
timeout=dict(type='int', default=0),
interface=dict(type='str'),
masquerade=dict(type='str'),
offline=dict(type='bool'),
),
supports_check_mode=True
)
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
# Sanity checks
FirewallTransaction.sanity_check(module)
# If neither permanent or immediate is provided, assume immediate (as
# written in the module's docs)
if not permanent and not immediate:
immediate = True
# Verify required params are provided
if immediate and fw_offline:
module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
changed = False
msgs = []
icmp_block = module.params['icmp_block']
icmp_block_inversion = module.params['icmp_block_inversion']
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
zone = module.params['zone']
if module.params['port'] is not None:
if '/' in module.params['port']:
port, protocol = module.params['port'].strip().split('/')
else:
protocol = None
if not protocol:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
modification_count = 0
if icmp_block is not None:
modification_count += 1
if icmp_block_inversion is not None:
modification_count += 1
if service is not None:
modification_count += 1
if port is not None:
modification_count += 1
if rich_rule is not None:
modification_count += 1
if interface is not None:
modification_count += 1
if masquerade is not None:
modification_count += 1
if modification_count > 1:
module.fail_json(
msg='can only operate on port, service, rich_rule, masquerade, icmp_block, icmp_block_inversion, or interface at once'
)
elif modification_count > 0 and desired_state in ['absent', 'present']:
module.fail_json(
msg='absent and present state can only be used in zone level operations'
)
if icmp_block is not None:
transaction = IcmpBlockTransaction(
module,
action_args=(icmp_block, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state))
if icmp_block_inversion is not None:
transaction = IcmpBlockInversionTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block-inversion %s to %s" % (icmp_block_inversion, desired_state))
if service is not None:
transaction = ServiceTransaction(
module,
action_args=(service, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source is not None:
transaction = SourceTransaction(
module,
action_args=(source,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if port is not None:
transaction = PortTransaction(
module,
action_args=(port, protocol, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append(
"Changed port %s to %s" % (
"%s/%s" % (port, protocol), desired_state
)
)
if rich_rule is not None:
transaction = RichRuleTransaction(
module,
action_args=(rich_rule, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface is not None:
transaction = InterfaceTransaction(
module,
action_args=(interface,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if masquerade is not None:
transaction = MasqueradeTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
''' If there are no changes within the zone we are operating on the zone itself '''
if modification_count == 0 and desired_state in ['absent', 'present']:
transaction = ZoneTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed zone %s to %s" % (zone, desired_state))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/system/firewalld.py
|
Python
|
gpl-3.0
| 29,694
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
from scrapy_proj.helpers import *
class SanatatePipelineExtraMeta(object):
def process_item(self, item, spider):
item['institution'] = spider.name
act_type = LegalHelper.get_type_from_title(item['title'])
if act_type == None:
raise scrapy.exceptions.DropItem
item['type'] = act_type
engrol = RomanianHelper.englishize_romanian(item['title']).lower()
engrolna = TextHelper.remove_non_ascii(engrol)
identifier_text = '{0} {1}'.format(engrolna, item['date'] if 'date' in item else 'NA')
identifier_text_hashed = hashlib.md5(identifier_text.encode()).hexdigest()
item['identifier'] = '{0}-{1}-{2}'.format(item['institution'], item['type'], identifier_text_hashed)
return item
|
mgax/czl-scrape
|
sanatate/scrapy_proj/pipelines/extrameta.py
|
Python
|
mpl-2.0
| 826
|
""" Tests for commerce views. """
from common.djangoapps.student.tests.factories import UserFactory
class UserMixin:
""" Mixin for tests involving users. """
def setUp(self):
super().setUp()
self.user = UserFactory()
def _login(self):
""" Log into LMS. """
self.client.login(username=self.user.username, password='test')
|
eduNEXT/edx-platform
|
lms/djangoapps/commerce/tests/test_views.py
|
Python
|
agpl-3.0
| 370
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class HrPlanWizard(models.TransientModel):
_name = 'hr.plan.wizard'
_description = 'Plan Wizard'
plan_id = fields.Many2one('hr.plan', default=lambda self: self.env['hr.plan'].search([], limit=1))
employee_id = fields.Many2one(
'hr.employee', string='Employee', required=True,
default=lambda self: self.env.context.get('active_id', None),
)
def action_launch(self):
for activity_type in self.plan_id.plan_activity_type_ids:
responsible = activity_type.get_responsible_id(self.employee_id)
if self.env['hr.employee'].with_user(responsible).check_access_rights('read', raise_exception=False):
date_deadline = self.env['mail.activity']._calculate_date_deadline(activity_type.activity_type_id)
self.employee_id.activity_schedule(
activity_type_id=activity_type.activity_type_id.id,
summary=activity_type.summary,
note=activity_type.note,
user_id=responsible.id,
date_deadline=date_deadline
)
return {
'type': 'ir.actions.act_window',
'res_model': 'hr.employee',
'res_id': self.employee_id.id,
'name': self.employee_id.display_name,
'view_mode': 'form',
'views': [(False, "form")],
}
|
rven/odoo
|
addons/hr/wizard/hr_plan_wizard.py
|
Python
|
agpl-3.0
| 1,529
|
test_records = [
[{
"doctype": "Price List",
"price_list_name": "_Test Price List",
"currency": "INR",
"valid_for_all_countries": 1
}]
]
|
gangadhar-kadam/sms-erpnext
|
setup/doctype/price_list/test_price_list.py
|
Python
|
agpl-3.0
| 146
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyNbformat(PythonPackage):
"""The Jupyter Notebook format"""
homepage = "https://github.com/jupyter/nbformat"
url = "https://github.com/jupyter/nbformat/archive/4.1.0.tar.gz"
version('4.1.0', '826b4fc4ec42553b20144f53b57b4e7b')
version('4.0.1', 'ab7172e517c9d561c0c01eef5631b4c8')
version('4.0.0', '7cf61359fa4e9cf3ef5e969e2fcb909e')
depends_on('py-ipython-genutils', type=('build', 'run'))
depends_on('py-traitlets', type=('build', 'run'))
depends_on('py-jsonschema', type=('build', 'run'))
depends_on('py-jupyter-core', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-nbformat/package.py
|
Python
|
lgpl-2.1
| 1,856
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unsmuggle_url,
)
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
['ag', '76440', 'http://ag-f.akamaihd.net'],
['aging', '76442', 'http://aging-f.akamaihd.net'],
['approps', '76441', 'http://approps-f.akamaihd.net'],
['armed', '76445', 'http://armed-f.akamaihd.net'],
['banking', '76446', 'http://banking-f.akamaihd.net'],
['budget', '76447', 'http://budget-f.akamaihd.net'],
['cecc', '76486', 'http://srs-f.akamaihd.net'],
['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
['csce', '75229', 'http://srs-f.akamaihd.net'],
['dpc', '76590', 'http://dpc-f.akamaihd.net'],
['energy', '76448', 'http://energy-f.akamaihd.net'],
['epw', '76478', 'http://epw-f.akamaihd.net'],
['ethics', '76449', 'http://ethics-f.akamaihd.net'],
['finance', '76450', 'http://finance-f.akamaihd.net'],
['foreign', '76451', 'http://foreign-f.akamaihd.net'],
['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
['help', '76452', 'http://help-f.akamaihd.net'],
['indian', '76455', 'http://indian-f.akamaihd.net'],
['intel', '76456', 'http://intel-f.akamaihd.net'],
['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
['jccic', '85180', 'http://jccic-f.akamaihd.net'],
['jec', '76458', 'http://jec-f.akamaihd.net'],
['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
['rpc', '76591', 'http://rpc-f.akamaihd.net'],
['rules', '76460', 'http://rules-f.akamaihd.net'],
['saa', '76489', 'http://srs-f.akamaihd.net'],
['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
['srs', '75229', 'http://srs-f.akamaihd.net'],
['uscc', '76487', 'http://srs-f.akamaihd.net'],
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
}
}, {
# From http://www.c-span.org/video/?96791-1
'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715',
'only_matching': True,
}]
@staticmethod
def _search_iframe_url(webpage):
mobj = re.search(
r"<iframe[^>]+src=['\"](?P<url>http://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]",
webpage)
if mobj:
return mobj.group('url')
def _get_info_for_comm(self, committee):
for entry in self._COMM_MAP:
if entry[0] == committee:
return entry[1:]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs'))
if not qs.get('filename') or not qs.get('type') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
video_id = re.sub(r'.mp4$', '', qs['filename'][0])
webpage = self._download_webpage(url, video_id)
if smuggled_data.get('force_title'):
title = smuggled_data['force_title']
else:
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id)
poster = qs.get('poster')
thumbnail = poster[0] if poster else None
video_type = qs['type'][0]
committee = video_type if video_type == 'arch' else qs['comm'][0]
stream_num, domain = self._get_info_for_comm(committee)
formats = []
if video_type == 'arch':
filename = video_id if '.' in video_id else video_id + '.mp4'
formats = [{
# All parameters in the query string are necessary to prevent a 403 error
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
hdcore_sign = 'hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'):
mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url'])
if mobj:
entry['format_id'] += mobj.group('tag')
formats.append(entry)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
|
linglung/ytdl
|
youtube_dl/extractor/senateisvp.py
|
Python
|
unlicense
| 6,273
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test LRUCache by running different input batch sizes on same network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class LRUCacheTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
dtype = dtypes.float32
input_name = "input"
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
1]],
[[2, 10, 10, 1]]]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
dtype=dtype, shape=[None, 10, 10, 2], name=input_name)
conv_filter = constant_op.constant(
np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
x = array_ops.identity(x, name="output")
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=input_dims,
output_names=[output_name],
expected_output_dims=expected_output_dims)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and
not trt_test.IsQuantizationMode(run_params.precision_mode))
if __name__ == "__main__":
test.main()
|
jbedorf/tensorflow
|
tensorflow/python/compiler/tensorrt/test/lru_cache_test.py
|
Python
|
apache-2.0
| 2,912
|
"""SynologyChat platform for notify component."""
from http import HTTPStatus
import json
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_RESOURCE, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
ATTR_FILE_URL = "file_url"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
}
)
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Synology Chat notification service."""
resource = config.get(CONF_RESOURCE)
verify_ssl = config.get(CONF_VERIFY_SSL)
return SynologyChatNotificationService(resource, verify_ssl)
class SynologyChatNotificationService(BaseNotificationService):
"""Implementation of a notification service for Synology Chat."""
def __init__(self, resource, verify_ssl):
"""Initialize the service."""
self._resource = resource
self._verify_ssl = verify_ssl
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {"text": message}
extended_data = kwargs.get(ATTR_DATA)
file_url = extended_data.get(ATTR_FILE_URL) if extended_data else None
if file_url:
data["file_url"] = file_url
to_send = f"payload={json.dumps(data)}"
response = requests.post(
self._resource, data=to_send, timeout=10, verify=self._verify_ssl
)
if response.status_code not in (HTTPStatus.OK, HTTPStatus.CREATED):
_LOGGER.exception(
"Error sending message. Response %d: %s:",
response.status_code,
response.reason,
)
|
aronsky/home-assistant
|
homeassistant/components/synology_chat/notify.py
|
Python
|
apache-2.0
| 1,900
|
# Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A registry module for registering alert methods.
"""
from django.db import models
from method_base import AlertMethodBase
class MethodRegistry(dict):
"""
Dict with methods for handling method registration.
"""
def unregister(self, name):
method = self[name]
del self[name]
def register(self, name, module):
"""
Registers a method with its name and module.
"""
if not issubclass(module, AlertMethodBase):
raise AttributeError("Module given to MethodRegistry not valid")
if not name:
raise AttributeError("MethodRegistry not given a name for module.")
module.name = name
self[name] = module
def _autodiscover(registry):
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Import alert_methods from each app
try:
before_import_registry = copy.copy(registry)
import_module('%s.alert_methods' % app)
except:
registry = before_import_registry
if module_has_submodule(mod, 'alert_methods'):
raise
registry = MethodRegistry()
def autodiscover():
_autodiscover(registry)
def register(name, module):
"""Proxy for register method."""
return registry.register(name, module)
|
nyrocron/eve-wspace
|
evewspace/Alerts/method_registry.py
|
Python
|
apache-2.0
| 2,115
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["setup_ranger_plugin"]
import os
from datetime import datetime
from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.core.resources import File, Execute
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.core.logger import Logger
from resource_management.core.source import DownloadSource
from resource_management.libraries.resources import ModifyPropertiesFile
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
from resource_management.libraries.script.script import Script
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
downloaded_custom_connector, driver_curl_source,
driver_curl_target, java_home,
repo_name, plugin_repo_dict,
ranger_env_properties, plugin_properties,
policy_user, policymgr_mgr_url,
plugin_enabled, component_user, component_group, api_version=None, skip_if_rangeradmin_down = True, **kwargs):
if driver_curl_source and not driver_curl_source.endswith("/None"):
if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
File(previous_jdbc_jar, action='delete')
File(downloaded_custom_connector,
content = DownloadSource(driver_curl_source),
mode = 0644
)
Execute(('cp', '--remove-destination', downloaded_custom_connector, driver_curl_target),
path=["/bin", "/usr/bin/"],
sudo=True
)
File(driver_curl_target, mode=0644)
if policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
stack_root = Script.get_stack_root()
stack_version = get_stack_version(component_select_name)
file_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install.properties')
if not os.path.isfile(file_path):
raise Fail(format('Ranger {service_name} plugin install.properties file does not exist at {file_path}'))
ModifyPropertiesFile(file_path,
properties = plugin_properties
)
custom_plugin_properties = dict()
custom_plugin_properties['CUSTOM_USER'] = component_user
custom_plugin_properties['CUSTOM_GROUP'] = component_group
ModifyPropertiesFile(file_path,properties = custom_plugin_properties)
if plugin_enabled:
cmd = (format('enable-{service_name}-plugin.sh'),)
if api_version == 'v2' and api_version is not None:
ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down)
else:
ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down)
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user)
else:
cmd = (format('disable-{service_name}-plugin.sh'),)
cmd_env = {'JAVA_HOME': java_home,
'PWD': format('{stack_root}/{stack_version}/ranger-{service_name}-plugin'),
'PATH': format('{stack_root}/{stack_version}/ranger-{service_name}-plugin')}
Execute(cmd,
environment=cmd_env,
logoutput=True,
sudo=True,
)
|
arenadata/ambari
|
ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
|
Python
|
apache-2.0
| 4,473
|
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo.config import cfg
from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class TestNatRules(base.NsxlibTestCase):
def _test_create_lrouter_dnat_rule(self, version):
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
tenant_id = 'pippo'
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake_router',
'192.168.0.1')
nat_rule = routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '10.0.0.99',
match_criteria={'destination_ip_addresses':
'192.168.0.5'})
uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
nat_rule['uuid'],
lrouter['uuid'])
resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
def test_create_lrouter_dnat_rule_v2(self):
self._test_create_lrouter_dnat_rule('2.9')
def test_create_lrouter_dnat_rule_v31(self):
self._test_create_lrouter_dnat_rule('3.1')
class TestExplicitLRouters(base.NsxlibTestCase):
def setUp(self):
self.fake_version = '3.2'
super(TestExplicitLRouters, self).setUp()
def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
schema = '/ws.v1/schema/RoutingTableRoutingConfig'
router = {'display_name': router_name,
'uuid': router_id,
'tags': utils.get_tags(os_tid=tenant_id),
'distributed': False,
'routing_config': {'type': 'RoutingTableRoutingConfig',
'_schema': schema},
'_schema': schema,
'nat_synchronization_enabled': True,
'replication_mode': 'service',
'type': 'LogicalRouterConfig',
'_href': '/ws.v1/lrouter/%s' % router_id, }
if relations:
router['_relations'] = relations
return router
def _get_single_route(self, router_id, route_id='fake_route_id_0',
prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
return {'protocol': 'static',
'_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
'prefix': prefix,
'_schema': '/ws.v1/schema/RoutingTableEntry',
'next_hop_ip': next_hop_ip,
'action': 'accept',
'uuid': route_id}
def test_prepare_body_with_implicit_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'pipita_higuain'
router_type = 'SingleDefaultRouteImplicitRoutingConfig'
route_config = {
'default_route_next_hop': {'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'}, }
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type,
**route_config)
expected = {'display_name': 'fake_router_name',
'routing_config': {
'default_route_next_hop':
{'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'},
'type': 'SingleDefaultRouteImplicitRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='pipita_higuain'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_prepare_body_without_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'marekiaro_hamsik'
router_type = 'RoutingTableRoutingConfig'
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type)
expected = {'display_name': 'fake_router_name',
'routing_config': {'type': 'RoutingTableRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='marekiaro_hamsik'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_get_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
relations = {
'LogicalRouterStatus':
{'_href': '/ws.v1/lrouter/%s/status' % router_id,
'lport_admin_up_count': 1,
'_schema': '/ws.v1/schema/LogicalRouterStatus',
'lport_count': 1,
'fabric_status': True,
'type': 'LogicalRouterStatus',
'lport_link_up_count': 0, }, }
with mock.patch.object(nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id,
relations)):
lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
self.assertTrue(
lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
def test_create_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
nexthop_ip = '10.0.0.1'
with mock.patch.object(
nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id)):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
router_name, nexthop_ip)
self.assertEqual(lrouter['routing_config']['type'],
'RoutingTableRoutingConfig')
self.assertNotIn('default_route_next_hop',
lrouter['routing_config'])
def test_update_lrouter_with_no_routes(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_no_routes_raise_nsx_exception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
def test_update_lrouter_with_routes(self):
router_id = 'fake_router_id'
new_routes = [{"next_hop_ip": "10.0.0.2",
"prefix": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
return_value=None):
with mock.patch.object(routerlib,
'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_routes_raises_nsx_expception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
with mock.patch.object(
routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
self.assertRaises(
api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase):
def test_create_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.create_lrouter,
self.fake_cluster,
uuidutils.generate_uuid(),
'pluto',
'fake_router',
'my_hop')
def test_delete_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.delete_lrouter,
self.fake_cluster,
'fake_router')
def test_get_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.get_lrouter,
self.fake_cluster,
'fake_router')
def test_update_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_lrouter,
self.fake_cluster,
'fake_router',
'pluto',
'new_hop')
class TestLogicalRouters(base.NsxlibTestCase):
def _verify_lrouter(self, res_lrouter,
expected_uuid,
expected_display_name,
expected_nexthop,
expected_tenant_id,
expected_neutron_id=None,
expected_distributed=None):
self.assertEqual(res_lrouter['uuid'], expected_uuid)
nexthop = (res_lrouter['routing_config']
['default_route_next_hop']['gateway_ip_address'])
self.assertEqual(nexthop, expected_nexthop)
router_tags = self._build_tag_dict(res_lrouter['tags'])
self.assertIn('os_tid', router_tags)
self.assertEqual(res_lrouter['display_name'], expected_display_name)
self.assertEqual(expected_tenant_id, router_tags['os_tid'])
if expected_distributed is not None:
self.assertEqual(expected_distributed,
res_lrouter['distributed'])
if expected_neutron_id:
self.assertIn('q_router_id', router_tags)
self.assertEqual(expected_neutron_id, router_tags['q_router_id'])
def test_get_lrouters(self):
lrouter_uuids = [routerlib.create_lrouter(
self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k,
'10.0.0.1')['uuid'] for k in range(3)]
routers = routerlib.get_lrouters(self.fake_cluster, 'pippo')
for router in routers:
self.assertIn(router['uuid'], lrouter_uuids)
def _create_lrouter(self, version, neutron_id=None, distributed=None):
with mock.patch.object(
self.fake_cluster.api_client, 'get_version',
return_value=version_module.Version(version)):
if not neutron_id:
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(
self.fake_cluster, neutron_id, 'pippo',
'fake-lrouter', '10.0.0.1', distributed=distributed)
return routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
def test_create_and_get_lrouter_v30(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_create_and_get_lrouter_v31_centralized(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=False)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=False)
def test_create_and_get_lrouter_v31_distributed(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=True)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=True)
def test_create_and_get_lrouter_name_exceeds_40chars(self):
neutron_id = uuidutils.generate_uuid()
display_name = '*' * 50
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
display_name,
'10.0.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'*' * 40, '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def _test_version_dependent_update_lrouter(self, version):
def foo(*args, **kwargs):
return version
foo_func_dict = {
'update_lrouter': {
2: {-1: foo},
3: {-1: foo, 2: foo}
}
}
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
with mock.patch.dict(routerlib.ROUTER_FUNC_DICT,
foo_func_dict, clear=True):
return routerlib.update_lrouter(
self.fake_cluster, 'foo_router_id', 'foo_router_name',
'foo_nexthop', routes={'foo_destination': 'foo_address'})
def test_version_dependent_update_lrouter_old_versions(self):
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"2.9")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.0")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.1")
def test_version_dependent_update_lrouter_new_versions(self):
self.assertEqual("3.2",
self._test_version_dependent_update_lrouter("3.2"))
self.assertEqual("4.0",
self._test_version_dependent_update_lrouter("4.0"))
self.assertEqual("4.1",
self._test_version_dependent_update_lrouter("4.1"))
def test_update_lrouter_no_nexthop(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
None)
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_lrouter(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
'192.168.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '192.168.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_nonexistent_lrouter_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.update_lrouter,
self.fake_cluster,
'whatever',
'foo', '9.9.9.9')
def test_delete_lrouter(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid'])
self.assertRaises(exceptions.NotFound,
routerlib.get_lrouter,
self.fake_cluster,
lrouter['uuid'])
def test_query_lrouter_ports(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
router_port_uuids = [routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo',
'qp_id_%s' % k, 'port-%s' % k, True,
['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid']
for k in range(3)]
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], router_port_uuids)
def test_query_lrouter_lports_nonexistent_lrouter_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_create_and_get_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('neutron_port_id', port_tags['q_port_id'])
def test_create_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_update_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_router_lport(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
'pippo', 'another_port_id', 'name', False,
['192.168.0.1', '10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1', '10.10.10.254'],
res_port['ip_addresses'])
self.assertEqual('False', res_port['admin_status_enabled'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('another_port_id', port_tags['q_port_id'])
def test_update_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, 'boo-router', 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_update_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_delete_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertFalse(len(ports))
def test_delete_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, 'xyz', 'abc')
def test_delete_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, lrouter['uuid'], 'abc')
def test_delete_peer_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
def fakegetport(*args, **kwargs):
return {'_relations': {'LogicalPortAttachment':
{'peer_port_uuid': lrouter_port['uuid']}}}
# mock get_port
with mock.patch.object(switchlib, 'get_port', new=fakegetport):
routerlib.delete_peer_router_lport(self.fake_cluster,
lrouter_port['uuid'],
'whatwever', 'whatever')
def test_update_lrouter_port_ips_add_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], [])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254', '192.168.0.1'],
res_port['ip_addresses'])
def test_update_lrouter_port_ips_remove_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1', '10.10.10.254'],
'00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
[], ['10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_add_and_remove(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], ['192.168.0.1'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_nonexistent_router_raises(self):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, 'boo-router', 'boo-port', [], [])
def test_update_lrouter_port_ips_nsx_exception_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def raise_nsx_exc(*args, **kwargs):
raise api_exc.NsxApiException()
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], [], [])
def test_plug_lrouter_port_patch_attachment(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz',
'name', 'device_id', True)
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
lport['uuid'], 'PatchAttachment')
self.assertEqual(lport['uuid'],
result['LogicalPortAttachment']['peer_port_uuid'])
def test_plug_lrouter_port_l3_gw_attachment(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment')
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment', 123)
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
self.assertEqual(
'123',
result['LogicalPortAttachment']['vlan_id'])
def test_plug_lrouter_port_invalid_attachment_type_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
self.assertRaises(nsx_exc.InvalidAttachmentType,
routerlib.plug_router_port_attachment,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], 'gw_att', 'BadType')
def _test_create_router_snat_rule(self, version):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'source_ip_addresses': '192.168.0.24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_snat_rule_v3(self):
self._test_create_router_snat_rule('3.0')
def test_create_router_snat_rule_v2(self):
self._test_create_router_snat_rule('2.0')
def _test_create_router_dnat_rule(self, version, dest_port=None):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
dest_port=dest_port,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_dnat_rule_v3(self):
self._test_create_router_dnat_rule('3.0')
def test_create_router_dnat_rule_v2(self):
self._test_create_router_dnat_rule('2.0')
def test_create_router_dnat_rule_v2_with_destination_port(self):
self._test_create_router_dnat_rule('2.0', 8080)
def test_create_router_dnat_rule_v3_with_destination_port(self):
self._test_create_router_dnat_rule('3.0', 8080)
def test_create_router_snat_rule_invalid_match_keys_raises(self):
# In this case the version does not make a difference
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: '2.0'):
self.assertRaises(AttributeError,
routerlib.create_lrouter_snat_rule,
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'foo': 'bar'})
def _test_create_router_nosnat_rule(self, version, expected=1):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_nosnat_rule(
self.fake_cluster, lrouter['uuid'],
order=100,
match_criteria={'destination_ip_addresses': '192.168.0.0/24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
# NoSNAT rules do not exist in V2
self.assertEqual(len(rules), expected)
def test_create_router_nosnat_rule_v2(self):
self._test_create_router_nosnat_rule('2.0', expected=0)
def test_create_router_nosnat_rule_v3(self):
self._test_create_router_nosnat_rule('3.0')
def _prepare_nat_rules_for_delete_tests(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
# v2 or v3 makes no difference for this test
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version('2.0')):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=220,
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.3', '10.0.0.3', order=200,
match_criteria={'source_ip_addresses': '192.168.0.2/32'})
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
return lrouter
def test_delete_router_nat_rules_by_match_on_destination_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1,
destination_ip_addresses='10.0.0.3')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_on_source_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1,
source_ip_addresses='192.168.0.2/32')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_no_match_expected(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0)
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0,
destination_ip_addresses='99.99.99.99')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
def test_delete_router_nat_rules_by_match_no_match_raises(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
self.assertRaises(
nsx_exc.NatRuleMismatch,
routerlib.delete_nat_rules_by_match,
self.fake_cluster, lrouter['uuid'],
'SomeWeirdType', 1, 1)
|
subramani95/neutron
|
neutron/tests/unit/vmware/nsxlib/test_router.py
|
Python
|
apache-2.0
| 45,472
|
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
_DEFAULT_GRAPH_SEED = 87654321
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed).
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
graph_seed = ops.get_default_graph().seed
if graph_seed is not None:
if op_seed is not None:
return graph_seed, op_seed
else:
return graph_seed, ops.get_default_graph()._last_id
else:
if op_seed is not None:
return _DEFAULT_GRAPH_SEED, op_seed
else:
return None, None
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A3'
print sess2.run(a) # generates 'A4'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate different
# sequences of 'a' and 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B1'
print sess2.run(b) # generates 'B2'
```
Args:
seed: integer.
"""
ops.get_default_graph().seed = seed
|
arunhotra/tensorflow
|
tensorflow/python/framework/random_seed.py
|
Python
|
apache-2.0
| 4,427
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import job_binary
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
job_binary_template = """
heat_template_version: 2015-10-15
resources:
job-binary:
type: OS::Sahara::JobBinary
properties:
name: my-jb
url: swift://container/jar-example.jar
credentials: {'user': 'admin','password': 'swordfish'}
"""
class SaharaJobBinaryTest(common.HeatTestCase):
def setUp(self):
super(SaharaJobBinaryTest, self).setUp()
t = template_format.parse(job_binary_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['job-binary']
self.client = mock.Mock()
self.patchobject(job_binary.JobBinary, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
jb = job_binary.JobBinary(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.job_binaries.create.return_value = value
scheduler.TaskRunner(jb.create)()
return jb
def test_create(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
args = self.client.job_binaries.create.call_args[1]
expected_args = {
'name': 'my-jb',
'description': '',
'url': 'swift://container/jar-example.jar',
'extra': {
'user': 'admin',
'password': 'swordfish'
}
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', jb.resource_id)
expected_state = (jb.CREATE, jb.COMPLETE)
self.assertEqual(expected_state, jb.state)
def test_resource_mapping(self):
mapping = job_binary.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(job_binary.JobBinary,
mapping['OS::Sahara::JobBinary'])
def test_update(self):
jb = self._create_resource('job-binary', self.rsrc_defn,
self.stack)
self.rsrc_defn['Properties']['url'] = (
'internal-db://94b8821d-1ce7-4131-8364-a6c6d85ad57b')
scheduler.TaskRunner(jb.update, self.rsrc_defn)()
data = {
'name': 'my-jb',
'description': '',
'url': 'internal-db://94b8821d-1ce7-4131-8364-a6c6d85ad57b',
'extra': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.job_binaries.update.assert_called_once_with(
'12345', data)
self.assertEqual((jb.UPDATE, jb.COMPLETE), jb.state)
def test_delete(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.job_binaries.delete.assert_called_once_with(
jb.resource_id)
def test_delete_not_found(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
self.client.job_binaries.delete.side_effect = (
sahara.sahara_base.APIException(error_code=404))
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.job_binaries.delete.assert_called_once_with(
jb.resource_id)
def test_show_attribute(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'jb': 'info'}
self.client.job_binaries.get.return_value = value
self.assertEqual({'jb': 'info'}, jb.FnGetAtt('show'))
def test_validate_invalid_url(self):
self.rsrc_defn['Properties']['url'] = 'internal-db://38273f82'
jb = job_binary.JobBinary('job-binary', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('resources.job-binary.properties: internal-db://38273f82 '
'is not a valid job location.')
self.assertEqual(error_msg, six.text_type(ex))
def test_validate_password_without_user(self):
self.rsrc_defn['Properties']['credentials'].pop('user')
jb = job_binary.JobBinary('job-binary', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('Property error: resources.job-binary.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, six.text_type(ex))
|
pratikmallya/heat
|
heat/tests/test_sahara_job_binary.py
|
Python
|
apache-2.0
| 5,419
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A template to define composite ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import app
from tensorflow.compiler.mlir.tfr.python.composite import Composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
flags.mark_flag_as_required('output')
@Composite('TestRandom', derived_attrs=['T: numbertype'], outputs=['o: T'])
def _composite_random_op():
pass
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_composite_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_composite_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
|
annarev/tensorflow
|
tensorflow/compiler/mlir/tfr/define_op_template.py
|
Python
|
apache-2.0
| 2,013
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
#options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
|
daftano/interactive-tutorials
|
suds/client.py
|
Python
|
apache-2.0
| 25,972
|
# Copyright 2014 - Numergy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from solum.objects import infrastructure_stack as abstract
from solum.objects.sqlalchemy import models as sql
class InfrastructureStack(sql.Base, abstract.InfrastructureStack):
"""Represent an infrastructure_stack in sqlalchemy."""
__tablename__ = 'infrastructure_stack'
__resource__ = 'infrastructure/stacks'
__table_args__ = sql.table_args()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), nullable=False)
project_id = sa.Column(sa.String(36))
user_id = sa.Column(sa.String(36))
image_id = sa.Column(sa.String(36))
heat_stack_id = sa.Column(sa.String(36))
name = sa.Column(sa.String(100))
description = sa.Column(sa.String(255))
tags = sa.Column(sa.Text)
class InfrastructureStackList(abstract.InfrastructureStackList):
"""Represent a list of infrastructure_stacks in sqlalchemy."""
@classmethod
def get_all(cls, context):
return InfrastructureStackList(sql.model_query(context,
InfrastructureStack))
|
ed-/solum
|
solum/objects/sqlalchemy/infrastructure_stack.py
|
Python
|
apache-2.0
| 1,686
|
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import GafferUI
import GafferUITest
class ProgressBarTest( GafferUITest.TestCase ) :
def testConstructor( self ) :
b = GafferUI.ProgressBar()
self.assertEqual( b.getRange(), ( 0, 100 ) )
self.assertEqual( b.getProgress(), 0 )
self.assertEqual( b.getText(), "%p%" )
b = GafferUI.ProgressBar( 10, ( 5, 15 ), "doing something %p%" )
self.assertEqual( b.getRange(), ( 5, 15 ) )
self.assertEqual( b.getProgress(), 10 )
self.assertEqual( b.getText(), "doing something %p%" )
def testAccessors( self ) :
b = GafferUI.ProgressBar()
b.setRange( ( 0, 20 ) )
self.assertEqual( b.getRange(), ( 0, 20 ) )
b.setProgress( 10 )
self.assertEqual( b.getProgress(), 10 )
b.setText( "woteva" )
self.assertEqual( b.getText(), "woteva" )
if __name__ == "__main__":
unittest.main()
|
DoubleNegativeVisualEffects/gaffer
|
python/GafferUITest/ProgressBarTest.py
|
Python
|
bsd-3-clause
| 2,664
|
from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
|
MoritzS/django
|
tests/messages_tests/base.py
|
Python
|
bsd-3-clause
| 13,842
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import logging
import re
import shlex
import sys
import time
import os
from webkitpy.common.system import path
from webkitpy.common.system.profiler import ProfilerFactory
_log = logging.getLogger(__name__)
DRIVER_START_TIMEOUT_SECS = 30
class DriverInput(object):
def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args):
self.test_name = test_name
self.timeout = timeout # in ms
self.image_hash = image_hash
self.should_run_pixel_test = should_run_pixel_test
self.args = args
class DriverOutput(object):
"""Groups information about a output from driver for easy passing
and post-processing of data."""
def __init__(self, text, image, image_hash, audio, crash=False,
test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
crashed_pid=None, crash_log=None, leak=False, leak_log=None, pid=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
self.image_hash = image_hash
self.image_diff = None # image_diff gets filled in after construction.
self.audio = audio # Binary format is port-dependent.
self.crash = crash
self.crashed_process_name = crashed_process_name
self.crashed_pid = crashed_pid
self.crash_log = crash_log
self.leak = leak
self.leak_log = leak_log
self.test_time = test_time
self.measurements = measurements
self.timeout = timeout
self.error = error # stderr output
self.pid = pid
def has_stderr(self):
return bool(self.error)
class DeviceFailure(Exception):
pass
class Driver(object):
"""object for running test(s) using content_shell or other driver."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Initialize a Driver to subsequently run tests.
Typically this routine will spawn content_shell in a config
ready for subsequent input.
port - reference back to the port object.
worker_number - identifier for a particular worker/driver instance
"""
self._port = port
self._worker_number = worker_number
self._no_timeout = no_timeout
self._driver_tempdir = None
# content_shell can report back subprocess crashes by printing
# "#CRASHED - PROCESSNAME". Since those can happen at any time
# and ServerProcess won't be aware of them (since the actual tool
# didn't crash, just a subprocess) we record the crashed subprocess name here.
self._crashed_process_name = None
self._crashed_pid = None
# content_shell can report back subprocesses that became unresponsive
# This could mean they crashed.
self._subprocess_was_unresponsive = False
# content_shell can report back subprocess DOM-object leaks by printing
# "#LEAK". This leak detection is enabled only when the flag
# --enable-leak-detection is passed to content_shell.
self._leaked = False
# stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
# stderr output, as well as if we've seen #EOF on this driver instance.
# FIXME: We should probably remove _read_first_block and _read_optional_image_block and
# instead scope these locally in run_test.
self.error_from_test = str()
self.err_seen_eof = False
self._server_process = None
self._current_cmd_line = None
self._measurements = {}
if self._port.get_option("profile"):
profiler_name = self._port.get_option("profiler")
self._profiler = ProfilerFactory.create_profiler(self._port.host,
self._port._path_to_driver(), self._port.results_directory(), profiler_name)
else:
self._profiler = None
def __del__(self):
self.stop()
def run_test(self, driver_input, stop_when_done):
"""Run a single test and return the results.
Note that it is okay if a test times out or crashes and leaves
the driver in an indeterminate state. The upper layers of the program
are responsible for cleaning up and ensuring things are okay.
Returns a DriverOutput object.
"""
start_time = time.time()
stdin_deadline = start_time + int(driver_input.timeout) / 2000.0
self.start(driver_input.should_run_pixel_test, driver_input.args, stdin_deadline)
test_begin_time = time.time()
self.error_from_test = str()
self.err_seen_eof = False
command = self._command_from_driver_input(driver_input)
deadline = test_begin_time + int(driver_input.timeout) / 1000.0
self._server_process.write(command)
text, audio = self._read_first_block(deadline) # First block is either text or audio
image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data.
crashed = self.has_crashed()
timed_out = self._server_process.timed_out
pid = self._server_process.pid()
leaked = self._leaked
if not crashed:
sanitizer = self._port.output_contains_sanitizer_messages(self.error_from_test)
if sanitizer:
self.error_from_test = 'OUTPUT CONTAINS "' + sanitizer + '", so we are treating this test as if it crashed, even though it did not.\n\n' + self.error_from_test
crashed = True
self._crashed_process_name = "unknown process name"
self._crashed_pid = 0
if stop_when_done or crashed or timed_out or leaked:
# We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
# In the timeout case, we kill the hung process as well.
out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
if out:
text += out
if err:
self.error_from_test += err
self._server_process = None
crash_log = None
if crashed:
self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
# If we were unresponsive append a message informing there may not have been a crash.
if self._subprocess_was_unresponsive:
crash_log += 'Process failed to become responsive before timing out.\n'
# Print stdout and stderr to the placeholder crash log; we want as much context as possible.
if self.error_from_test:
crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
return DriverOutput(text, image, actual_image_hash, audio,
crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
timeout=timed_out, error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid, crash_log=crash_log,
leak=leaked, leak_log=self._leak_log,
pid=pid)
def _get_crash_log(self, stdout, stderr, newer_than):
return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
# FIXME: Seems this could just be inlined into callers.
@classmethod
def _command_wrapper(cls, wrapper_option):
# Hook for injecting valgrind or other runtime instrumentation,
# used by e.g. tools/valgrind/valgrind_tests.py.
return shlex.split(wrapper_option) if wrapper_option else []
HTTP_DIR = "http/tests/"
HTTP_LOCAL_DIR = "http/tests/local/"
def is_http_test(self, test_name):
return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
def test_to_uri(self, test_name):
"""Convert a test name to a URI.
Tests which have an 'https' directory in their paths (e.g.
'/http/tests/security/mixedContent/https/test1.html') or '.https.' in
their name (e.g. 'http/tests/security/mixedContent/test1.https.html') will
be loaded over HTTPS; all other tests over HTTP.
"""
if not self.is_http_test(test_name):
return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
relative_path = test_name[len(self.HTTP_DIR):]
if "/https/" in test_name or ".https." in test_name:
return "https://127.0.0.1:8443/" + relative_path
return "http://127.0.0.1:8000/" + relative_path
def uri_to_test(self, uri):
"""Return the base layout test name for a given URI.
This returns the test name for a given URI, e.g., if you passed in
"file:///src/LayoutTests/fast/html/keygen.html" it would return
"fast/html/keygen.html".
"""
if uri.startswith("file:///"):
prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
if not prefix.endswith('/'):
prefix += '/'
return uri[len(prefix):]
if uri.startswith("http://"):
return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
if uri.startswith("https://"):
return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
raise NotImplementedError('unknown url type: %s' % uri)
def has_crashed(self):
if self._server_process is None:
return False
if self._crashed_process_name:
return True
if self._server_process.has_crashed():
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
return True
return False
def start(self, pixel_tests, per_test_args, deadline):
new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
def _setup_environ_for_driver(self, environment):
if self._profiler:
environment = self._profiler.adjusted_environment(environment)
return environment
def _start(self, pixel_tests, per_test_args, wait_for_ready=True):
self.stop()
self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server(server_name)
environment = self._setup_environ_for_driver(environment)
self._crashed_process_name = None
self._crashed_pid = None
self._leaked = False
self._leak_log = None
cmd_line = self.cmd_line(pixel_tests, per_test_args)
self._server_process = self._port._server_process_constructor(self._port, server_name, cmd_line, environment, logging=self._port.get_option("driver_logging"))
self._server_process.start()
self._current_cmd_line = cmd_line
if wait_for_ready:
deadline = time.time() + DRIVER_START_TIMEOUT_SECS
if not self._wait_for_server_process_output(self._server_process, deadline, '#READY'):
_log.error("content_shell took too long to startup.")
def _wait_for_server_process_output(self, server_process, deadline, text):
output = ''
line = server_process.read_stdout_line(deadline)
while not server_process.timed_out and not server_process.has_crashed() and not text in line.rstrip():
output += line
line = server_process.read_stdout_line(deadline)
if server_process.timed_out or server_process.has_crashed():
_log.error('Failed to start the %s process: \n%s' % (server_process.name(), output))
return False
return True
def _run_post_start_tasks(self):
# Remote drivers may override this to delay post-start tasks until the server has ack'd.
if self._profiler:
self._profiler.attach_to_pid(self._pid_on_target())
def _pid_on_target(self):
# Remote drivers will override this method to return the pid on the device.
return self._server_process.pid()
def stop(self, timeout_secs=0.0):
if self._server_process:
self._server_process.stop(timeout_secs)
self._server_process = None
if self._profiler:
self._profiler.profile_after_exit()
if self._driver_tempdir:
self._port._filesystem.rmtree(str(self._driver_tempdir))
self._driver_tempdir = None
self._current_cmd_line = None
def cmd_line(self, pixel_tests, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
if self._no_timeout:
cmd.append('--no-timeout')
cmd.extend(self._port.get_option('additional_driver_flag', []))
cmd.extend(self._port.additional_driver_flag())
if self._port.get_option('enable_leak_detection'):
cmd.append('--enable-leak-detection')
cmd.extend(per_test_args)
cmd.append('-')
return cmd
def _check_for_driver_crash(self, error_line):
if error_line == "#CRASHED\n":
# This is used on Windows to report that the process has crashed
# See http://trac.webkit.org/changeset/65537.
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
elif (error_line.startswith("#CRASHED - ")
or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
# WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
self._crashed_process_name = match.group(1) if match else 'WebProcess'
match = re.search('pid (\d+)', error_line)
pid = int(match.group(1)) if match else None
self._crashed_pid = pid
# FIXME: delete this after we're sure this code is working :)
_log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
if error_line.startswith("#PROCESS UNRESPONSIVE - "):
self._subprocess_was_unresponsive = True
self._port.sample_process(self._crashed_process_name, self._crashed_pid)
# We want to show this since it's not a regular crash and probably we don't have a crash log.
self.error_from_test += error_line
return True
return self.has_crashed()
def _check_for_leak(self, error_line):
if error_line.startswith("#LEAK - "):
self._leaked = True
match = re.match('#LEAK - (\S+) pid (\d+) (.+)\n', error_line)
self._leak_log = match.group(3)
return self._leaked
def _command_from_driver_input(self, driver_input):
# FIXME: performance tests pass in full URLs instead of test names.
if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://') or driver_input.test_name == ('about:blank'):
command = driver_input.test_name
elif self.is_http_test(driver_input.test_name):
command = self.test_to_uri(driver_input.test_name)
else:
command = self._port.abspath_for_test(driver_input.test_name)
if sys.platform == 'cygwin':
command = path.cygpath(command)
assert not driver_input.image_hash or driver_input.should_run_pixel_test
# ' is the separator between arguments.
if self._port.supports_per_test_timeout():
command += "'--timeout'%s" % driver_input.timeout
if driver_input.should_run_pixel_test:
command += "'--pixel-test"
if driver_input.image_hash:
command += "'" + driver_input.image_hash
return command + "\n"
def _read_first_block(self, deadline):
# returns (text_content, audio_content)
block = self._read_block(deadline)
if block.malloc:
self._measurements['Malloc'] = float(block.malloc)
if block.js_heap:
self._measurements['JSHeap'] = float(block.js_heap)
if block.content_type == 'audio/wav':
return (None, block.decoded_content)
return (block.decoded_content, None)
def _read_optional_image_block(self, deadline):
# returns (image, actual_image_hash)
block = self._read_block(deadline, wait_for_stderr_eof=True)
if block.content and block.content_type == 'image/png':
return (block.decoded_content, block.content_hash)
return (None, block.content_hash)
def _read_header(self, block, line, header_text, header_attr, header_filter=None):
if line.startswith(header_text) and getattr(block, header_attr) is None:
value = line.split()[1]
if header_filter:
value = header_filter(value)
setattr(block, header_attr, value)
return True
return False
def _process_stdout_line(self, block, line):
if (self._read_header(block, line, 'Content-Type: ', 'content_type')
or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
or self._read_header(block, line, 'ActualHash: ', 'content_hash')
or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')
or self._read_header(block, line, 'StdinPath', 'stdin_path')):
return
# Note, we're not reading ExpectedHash: here, but we could.
# If the line wasn't a header, we just append it to the content.
block.content += line
def _strip_eof(self, line):
if line and line.endswith("#EOF\n"):
return line[:-5], True
if line and line.endswith("#EOF\r\n"):
_log.error("Got a CRLF-terminated #EOF - this is a driver bug.")
return line[:-6], True
return line, False
def _read_block(self, deadline, wait_for_stderr_eof=False):
block = ContentBlock()
out_seen_eof = False
while not self.has_crashed():
if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
break
if self.err_seen_eof:
out_line = self._server_process.read_stdout_line(deadline)
err_line = None
elif out_seen_eof:
out_line = None
err_line = self._server_process.read_stderr_line(deadline)
else:
out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
if self._server_process.timed_out or self.has_crashed():
break
if out_line:
assert not out_seen_eof
out_line, out_seen_eof = self._strip_eof(out_line)
if err_line:
assert not self.err_seen_eof
err_line, self.err_seen_eof = self._strip_eof(err_line)
if out_line:
if out_line[-1] != "\n":
_log.error("Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.")
content_length_before_header_check = block._content_length
self._process_stdout_line(block, out_line)
# FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
# Don't wait until we're done with headers, just read the binary blob right now.
if content_length_before_header_check != block._content_length:
if block._content_length > 0:
block.content = self._server_process.read_stdout(deadline, block._content_length)
else:
_log.error("Received content of type %s with Content-Length of 0! This indicates a bug in %s.",
block.content_type, self._server_process.name())
if err_line:
if self._check_for_driver_crash(err_line):
break
if self._check_for_leak(err_line):
break
self.error_from_test += err_line
block.decode_content()
return block
class ContentBlock(object):
def __init__(self):
self.content_type = None
self.encoding = None
self.content_hash = None
self._content_length = None
# Content is treated as binary data even though the text output is usually UTF-8.
self.content = str() # FIXME: Should be bytearray() once we require Python 2.6.
self.decoded_content = None
self.malloc = None
self.js_heap = None
self.stdin_path = None
def decode_content(self):
if self.encoding == 'base64' and self.content is not None:
self.decoded_content = base64.b64decode(self.content)
else:
self.decoded_content = self.content
|
XiaosongWei/blink-crosswalk
|
Tools/Scripts/webkitpy/layout_tests/port/driver.py
|
Python
|
bsd-3-clause
| 23,508
|
# Authors: Mainak Jas <mainak@neuro.hut.fi>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .. import pick_types
from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
band_stop_filter)
from ..time_frequency import multitaper_psd
from ..externals import six
from ..utils import _check_type_picks, deprecated
class Scaler(TransformerMixin):
"""Standardizes data across channels
Parameters
----------
info : instance of Info
The measurement info
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
info : instance of Info
The measurement info
ch_mean_ : dict
The mean value for each channel type
std_ : dict
The standard deviation for each channel type
"""
def __init__(self, info, with_mean=True, with_std=True):
self.info = info
self.with_mean = with_mean
self.with_std = with_std
self.ch_mean_ = dict() # TODO rename attribute
self.std_ = dict() # TODO rename attribute
def fit(self, epochs_data, y):
"""Standardizes data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data to concatenate channels.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of Scaler
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
picks_list = dict()
picks_list['mag'] = pick_types(self.info, meg='mag', ref_meg=False,
exclude='bads')
picks_list['grad'] = pick_types(self.info, meg='grad', ref_meg=False,
exclude='bads')
picks_list['eeg'] = pick_types(self.info, eeg='grad', ref_meg=False,
exclude='bads')
self.picks_list_ = picks_list
for key, this_pick in picks_list.items():
if self.with_mean:
ch_mean = X[:, this_pick, :].mean(axis=1)[:, None, :]
self.ch_mean_[key] = ch_mean # TODO rename attribute
if self.with_std:
ch_std = X[:, this_pick, :].mean(axis=1)[:, None, :]
self.std_[key] = ch_std # TODO rename attribute
return self
def transform(self, epochs_data, y=None):
"""Standardizes data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
for key, this_pick in six.iteritems(self.picks_list_):
if self.with_mean:
X[:, this_pick, :] -= self.ch_mean_[key]
if self.with_std:
X[:, this_pick, :] /= self.std_[key]
return X
def inverse_transform(self, epochs_data, y=None):
""" Inverse standardization of data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
for key, this_pick in six.iteritems(self.picks_list_):
if self.with_mean:
X[:, this_pick, :] += self.ch_mean_[key]
if self.with_std:
X[:, this_pick, :] *= self.std_[key]
return X
class EpochsVectorizer(TransformerMixin):
"""EpochsVectorizer transforms epoch data to fit into a scikit-learn pipeline.
Parameters
----------
info : instance of Info
The measurement info.
Attributes
----------
n_channels : int
The number of channels.
n_times : int
The number of time points.
"""
def __init__(self, info=None):
self.info = info
self.n_channels = None
self.n_times = None
def fit(self, epochs_data, y):
"""For each epoch, concatenate data from different channels into a single
feature vector.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data to concatenate channels.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of ConcatenateChannels
returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data, y=None):
"""For each epoch, concatenate data from different channels into a single
feature vector.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels * n_times)
The data concatenated over channels
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
n_epochs, n_channels, n_times = epochs_data.shape
X = epochs_data.reshape(n_epochs, n_channels * n_times)
# save attributes for inverse_transform
self.n_epochs = n_epochs
self.n_channels = n_channels
self.n_times = n_times
return X
def inverse_transform(self, X, y=None):
"""For each epoch, reshape a feature vector into the original data shape
Parameters
----------
X : array, shape (n_epochs, n_channels * n_times)
The feature vector concatenated over channels
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The original data
"""
if not isinstance(X, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(X))
return X.reshape(-1, self.n_channels, self.n_times)
@deprecated("Class 'ConcatenateChannels' has been renamed to "
"'EpochsVectorizer' and will be removed in release 0.11.")
class ConcatenateChannels(EpochsVectorizer):
pass
class PSDEstimator(TransformerMixin):
"""Compute power spectrum density (PSD) using a multi-taper method
Parameters
----------
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
n_jobs : int
Number of parallel jobs to use (only used if adaptive=True).
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
adaptive=False, low_bias=True, n_jobs=1,
normalization='length', verbose=None):
self.sfreq = sfreq
self.fmin = fmin
self.fmax = fmax
self.bandwidth = bandwidth
self.adaptive = adaptive
self.low_bias = low_bias
self.n_jobs = n_jobs
self.verbose = verbose
self.normalization = normalization
def fit(self, epochs_data, y):
"""Compute power spectrum density (PSD) using a multi-taper method
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch
Returns
-------
self : instance of PSDEstimator
returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data, y=None):
"""Compute power spectrum density (PSD) using a multi-taper method
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
psd : array, shape (n_signals, len(freqs)) or (len(freqs),)
The computed PSD.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
n_epochs, n_channels, n_times = epochs_data.shape
X = epochs_data.reshape(n_epochs * n_channels, n_times)
psd, _ = multitaper_psd(x=X, sfreq=self.sfreq, fmin=self.fmin,
fmax=self.fmax, bandwidth=self.bandwidth,
adaptive=self.adaptive, low_bias=self.low_bias,
n_jobs=self.n_jobs,
normalization=self.normalization,
verbose=self.verbose)
_, n_freqs = psd.shape
psd = psd.reshape(n_epochs, n_channels, n_freqs)
return psd
class FilterEstimator(TransformerMixin):
"""Estimator to filter RtEpochs
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by "picks".
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
- l_freq < h_freq: band-pass filter
- l_freq > h_freq: band-stop filter
- l_freq is not None, h_freq is None: low-pass filter
- l_freq is None, h_freq is not None: high-pass filter
If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporarily stored in memory.
Parameters
----------
info : instance of Info
Measurement info.
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
def __init__(self, info, l_freq, h_freq, picks=None, filter_length='10s',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
method='fft', iir_params=None, verbose=None):
self.info = info
self.l_freq = l_freq
self.h_freq = h_freq
self.picks = _check_type_picks(picks)
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
def fit(self, epochs_data, y):
"""Filters data
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of FilterEstimator
Returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.picks is None:
self.picks = pick_types(self.info, meg=True, eeg=True,
ref_meg=False, exclude=[])
if self.l_freq == 0:
self.l_freq = None
if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
self.h_freq = None
if self.l_freq is not None and not isinstance(self.l_freq, float):
self.l_freq = float(self.l_freq)
if self.h_freq is not None and not isinstance(self.h_freq, float):
self.h_freq = float(self.h_freq)
if self.info['lowpass'] is None or (self.h_freq is not None and
(self.l_freq is None or
self.l_freq < self.h_freq) and
self.h_freq <
self.info['lowpass']):
self.info['lowpass'] = self.h_freq
if self.info['highpass'] is None or (self.l_freq is not None and
(self.h_freq is None or
self.l_freq < self.h_freq) and
self.l_freq >
self.info['highpass']):
self.info['highpass'] = self.l_freq
return self
def transform(self, epochs_data, y=None):
"""Filters data
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data after filtering
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
if self.l_freq is None and self.h_freq is not None:
epochs_data = \
low_pass_filter(epochs_data, self.info['sfreq'], self.h_freq,
filter_length=self.filter_length,
trans_bandwidth=self.l_trans_bandwidth,
method=self.method, iir_params=self.iir_params,
picks=self.picks, n_jobs=self.n_jobs,
copy=False, verbose=False)
if self.l_freq is not None and self.h_freq is None:
epochs_data = \
high_pass_filter(epochs_data, self.info['sfreq'], self.l_freq,
filter_length=self.filter_length,
trans_bandwidth=self.h_trans_bandwidth,
method=self.method,
iir_params=self.iir_params,
picks=self.picks, n_jobs=self.n_jobs,
copy=False, verbose=False)
if self.l_freq is not None and self.h_freq is not None:
if self.l_freq < self.h_freq:
epochs_data = \
band_pass_filter(epochs_data, self.info['sfreq'],
self.l_freq, self.h_freq,
filter_length=self.filter_length,
l_trans_bandwidth=self.l_trans_bandwidth,
h_trans_bandwidth=self.h_trans_bandwidth,
method=self.method,
iir_params=self.iir_params,
picks=self.picks, n_jobs=self.n_jobs,
copy=False, verbose=False)
else:
epochs_data = \
band_stop_filter(epochs_data, self.info['sfreq'],
self.h_freq, self.l_freq,
filter_length=self.filter_length,
l_trans_bandwidth=self.h_trans_bandwidth,
h_trans_bandwidth=self.l_trans_bandwidth,
method=self.method,
iir_params=self.iir_params,
picks=self.picks, n_jobs=self.n_jobs,
copy=False, verbose=False)
return epochs_data
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/mne/decoding/transformer.py
|
Python
|
bsd-3-clause
| 19,949
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing Cloud CDN cache invalidations."""
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
class ListCacheInvalidations(base_classes.BaseLister):
"""List Cloud CDN cache invalidations for a URL map."""
detailed_help = {
'DESCRIPTION': """\
List Cloud CDN cache invalidations for a URL map. A cache invalidation instructs
Cloud CDN to stop using cached content. You can list invalidations to check
which have completed.
""",
}
@staticmethod
def _Flags(parser):
parser.add_argument(
'--limit',
type=arg_parsers.BoundedInt(1, sys.maxint, unlimited=True),
help='The maximum number of invalidations to list.')
@staticmethod
def Args(parser):
parser.add_argument('urlmap', help='The name of the URL map.')
@property
def resource_type(self):
return 'invalidations'
@property
def global_service(self):
return self.compute.globalOperations
def GetUrlMapGetRequest(self, args):
return (
self.compute.urlMaps,
'Get',
self.messages.ComputeUrlMapsGetRequest(
project=self.project,
urlMap=args.urlmap))
def GetResources(self, args, errors):
get_request = self.GetUrlMapGetRequest(args)
new_errors = []
objects = list(request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=new_errors))
errors.extend(new_errors)
if new_errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch resource:')
urlmap_id = objects[0].id
filter_expr = ('(operationType eq invalidateCache) (targetId eq '
'{urlmap_id})').format(urlmap_id=urlmap_id)
max_results = args.limit or constants.MAX_RESULTS_PER_PAGE
project = self.project
requests = [
(self.global_service, 'AggregatedList',
self.global_service.GetRequestType('AggregatedList')(
filter=filter_expr,
maxResults=max_results,
orderBy='creationTimestamp desc',
project=project))
]
return request_helper.MakeRequests(requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors)
def Run(self, args):
args.names = []
args.regexp = None
args.uri = None
return super(ListCacheInvalidations, self).Run(args)
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/compute/url_maps/list_cdn_cache_invalidations.py
|
Python
|
mit
| 3,298
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import glob
from automation.utilities.const import COMMAND_MODULE_PREFIX, EXTENSIONS_MOD_PREFIX
def get_repo_root():
"""Returns the path to the source code root directory"""
current_dir = os.path.dirname(os.path.abspath(__file__))
while not os.path.exists(os.path.join(current_dir, 'CONTRIBUTING.rst')):
current_dir = os.path.dirname(current_dir)
return current_dir
def get_all_module_paths():
"""List all core and command modules"""
return list(get_core_modules_paths()) + list(get_command_modules_paths(include_prefix=True))
def get_config_dir():
""" Returns the users Azure directory. """
return os.getenv('AZURE_CONFIG_DIR', None) or os.path.expanduser(os.path.join('~', '.azure'))
def get_extension_dir():
""" Returns the extensions directory. """
custom_dir = os.environ.get('AZURE_EXTENSION_DIR')
return os.path.expanduser(custom_dir) if custom_dir else os.path.join(get_config_dir(), 'cliextensions')
def get_extensions_paths(include_prefix=False):
glob_pattern = os.path.normcase('/*/{}*'.format(EXTENSIONS_MOD_PREFIX))
for path in glob.glob(get_extension_dir() + glob_pattern):
name = os.path.basename(path)
if not include_prefix:
name = name[len(EXTENSIONS_MOD_PREFIX):]
yield name, path
def get_command_modules_paths(include_prefix=False):
glob_pattern = os.path.normcase('/src/command_modules/{}*/setup.py'.format(COMMAND_MODULE_PREFIX))
for path in glob.glob(get_repo_root() + glob_pattern):
folder = os.path.dirname(path)
name = os.path.basename(folder)
if not include_prefix:
name = name[len(COMMAND_MODULE_PREFIX):]
yield name, folder
def get_command_modules_paths_with_tests(profile):
return get_module_paths_with_tests(get_command_modules_paths(), profile)
def get_core_modules_paths_with_tests(profile):
if profile == 'latest':
for name, path in get_core_modules_paths():
for root, dirs, files in os.walk(path):
if os.path.basename(root) == 'tests':
if name == 'azure-cli-core':
name = 'core'
yield name, path, root
def get_core_modules_paths():
for path in glob.glob(get_repo_root() + os.path.normcase('/src/*/setup.py')):
yield os.path.basename(os.path.dirname(path)), os.path.dirname(path)
def get_module_paths_with_tests(modules, profile):
for name, path in modules:
name = name.replace(COMMAND_MODULE_PREFIX, '')
test_folder = os.path.join(path, 'azure', 'cli', 'command_modules', name, 'tests', profile)
if os.path.exists(test_folder):
yield name, path, test_folder
def make_dirs(path):
"""Create a directories recursively"""
import errno
try:
os.makedirs(path)
except OSError as exc: # Python <= 2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_test_results_dir(with_timestamp=None, prefix=None):
"""Returns the folder where test results should be saved to. If the folder doesn't exist,
it will be created."""
result = os.path.join(get_repo_root(), 'test_results')
if isinstance(with_timestamp, bool):
from datetime import datetime
with_timestamp = datetime.now()
if with_timestamp:
if prefix:
result = os.path.join(result, with_timestamp.strftime(prefix + '_%Y%m%d_%H%M%S'))
else:
result = os.path.join(result, with_timestamp.strftime('%Y%m%d_%H%M%S'))
if not os.path.exists(result):
make_dirs(result)
if not os.path.exists(result) or not os.path.isdir(result):
raise Exception('Failed to create test result dir {}'.format(result))
return result
def filter_blacklisted_modules(*black_list_modules):
"""Returns the paths to the modules except those in the black list."""
import itertools
existing_modules = list(itertools.chain(get_core_modules_paths(),
get_command_modules_paths()))
black_list_modules = set(black_list_modules)
return list((name, path) for name, path in existing_modules if name not in black_list_modules)
def filter_user_selected_modules(user_input_modules):
import itertools
existing_modules = list(itertools.chain(get_core_modules_paths(),
get_command_modules_paths()))
if user_input_modules:
selected_modules = set(user_input_modules)
extra = selected_modules - set([name for name, _ in existing_modules])
if any(extra):
print('ERROR: These modules do not exist: {}.'.format(', '.join(extra)))
return None
return list((name, module) for name, module in existing_modules
if name in selected_modules)
else:
return list((name, module) for name, module in existing_modules)
def filter_user_selected_modules_with_tests(user_input_modules=None, profile=None):
import itertools
existing_modules = list(itertools.chain(get_core_modules_paths_with_tests(profile),
get_command_modules_paths_with_tests(profile)))
if user_input_modules is not None:
selected_modules = set(user_input_modules)
extra = selected_modules - set([name for name, _, _ in existing_modules])
# don't count extensions as extras
extra = [x for x in extra if not x.startswith('azext_')]
if any(extra):
print('ERROR: These modules do not exist: {}.'.format(', '.join(extra)))
return None
return list((name, module, test) for name, module, test in existing_modules
if name in selected_modules)
else:
return list((name, module, test) for name, module, test in existing_modules)
|
yugangw-msft/azure-cli
|
tools/automation/utilities/path.py
|
Python
|
mit
| 6,263
|
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.widget import View, Select
from widgetastic_manageiq import (
Accordion, BaseEntitiesView, BootstrapSelect, BreadCrumb, ItemsToolBarViewSelector,
ManageIQTree, SummaryTable, Text, TextInput)
from widgetastic_patternfly import Dropdown, Button
from cfme.base.ui import BaseLoggedInPage
from cfme.exceptions import ItemNotFound, SecurityGroupsNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
class SecurityGroupToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class SecurityGroupDetailsToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class SecurityGroupDetailsAccordion(View):
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class SecurityGroupDetailsEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
properties = SummaryTable(title='Properties')
relationships = SummaryTable(title='Relationships')
smart_management = SummaryTable(title='Smart Management')
firewall_rules = SummaryTable(title="Firewall Rules")
class SecurityGroupAddEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
class SecurityGroupAddForm(View):
network_manager = BootstrapSelect(id='ems_id')
name = TextInput(name='name')
description = TextInput(name='description')
cloud_tenant = Select(name='cloud_tenant_id')
add = Button('Add')
cancel = Button('Cancel')
class SecurityGroupView(BaseLoggedInPage):
"""Base view for header and nav checking, navigatable views should inherit this"""
@property
def in_security_groups(self):
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Networks', 'Security Groups'])
class SecurityGroupAllView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.title.text == 'Security Groups')
toolbar = View.nested(SecurityGroupToolbar)
including_entities = View.include(BaseEntitiesView, use_parent=True)
class SecurityGroupDetailsView(SecurityGroupView):
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].name)
return (
self.in_security_groups and
self.entities.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(SecurityGroupDetailsToolbar)
sidebar = View.nested(SecurityGroupDetailsAccordion)
entities = View.nested(SecurityGroupDetailsEntities)
class SecurityGroupAddView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.breadcrumb.active_location == 'Add New Security Group' and
self.entities.title.text == 'Add New Security Group')
entities = View.nested(SecurityGroupAddEntities)
form = View.nested(SecurityGroupAddForm)
@attr.s
class SecurityGroup(BaseEntity):
""" Automate Model page of SecurityGroup
Args:
provider (obj): Provider name for Network Manager
name(str): name of the Security Group
description (str): Security Group description
"""
_param_name = "SecurityGroup"
name = attr.ib()
provider = attr.ib()
description = attr.ib(default="")
def refresh(self):
self.provider.refresh_provider_relationships()
self.browser.refresh()
def delete(self, cancel=False, wait=False):
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Delete this Security Group',
handle_alert=(not cancel))
# cancel doesn't redirect, confirmation does
view.flush_widget_cache()
if not cancel:
view = self.create_view(SecurityGroupAllView)
view.is_displayed
view.flash.assert_success_message('Delete initiated for 1 Security Group.')
if wait:
wait_for(
lambda: self.name in view.entities.all_entity_names,
message="Wait Security Group to disappear",
fail_condition=True,
num_sec=500,
timeout=1000,
delay=20,
fail_func=self.refresh
)
@property
def exists(self):
try:
navigate_to(self, 'Details')
except SecurityGroupsNotFound:
return False
else:
return True
@attr.s
class SecurityGroupCollection(BaseCollection):
""" Collection object for the :py:class: `cfme.cloud.SecurityGroup`. """
ENTITY = SecurityGroup
def create(self, name, description, provider, cancel=False, wait=False):
"""Create new Security Group.
Args:
provider (obj): Provider name for Network Manager
name (str): name of the Security Group
description (str): Security Group description
cancel (boolean): Cancel Security Group creation
wait (boolean): wait if Security Group created
"""
view = navigate_to(self, 'Add')
changed = view.form.fill({'network_manager': "{} Network Manager".format(provider.name),
'name': name,
'description': description,
'cloud_tenant': 'admin'})
if cancel and changed:
view.form.cancel.click()
flash_message = 'Add of new Security Group was cancelled by the user'
else:
view.form.add.click()
flash_message = 'Security Group "{}" created'.format(name)
# add/cancel should redirect, new view
view = self.create_view(SecurityGroupAllView)
view.flash.assert_success_message(flash_message)
view.entities.paginator.set_items_per_page(500)
sec_groups = self.instantiate(name, provider, description)
if wait:
wait_for(
lambda: sec_groups.name in view.entities.all_entity_names,
message="Wait Security Group to appear",
num_sec=400,
timeout=1000,
delay=20,
fail_func=sec_groups.refresh,
handle_exception=True
)
return sec_groups
# TODO: Delete collection as Delete option is not available on List view and update
@navigator.register(SecurityGroupCollection, 'All')
class SecurityGroupAll(CFMENavigateStep):
VIEW = SecurityGroupAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Networks', 'Security Groups')
@navigator.register(SecurityGroup, 'Details')
class Details(CFMENavigateStep):
VIEW = SecurityGroupDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
except ItemNotFound:
raise SecurityGroupsNotFound("Security Groups {} not found".format(
self.obj.name))
@navigator.register(SecurityGroupCollection, 'Add')
class Add(CFMENavigateStep):
VIEW = SecurityGroupAddView
prerequisite = NavigateToSibling("All")
def step(self, *args, **kwargs):
"""Raises DropdownItemDisabled from widgetastic_patternfly
if no RHOS Network manager present"""
# Todo remove when fixed 1520669
if (BZ(1520669, forced_streams='5.9').blocks and
self.prerequisite_view.flash.messages):
self.prerequisite_view.flash.dismiss()
self.prerequisite_view.toolbar.configuration.item_select('Add a new Security Group')
|
akarol/cfme_tests
|
cfme/cloud/security_groups.py
|
Python
|
gpl-2.0
| 8,515
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QApplication
from ..info_model import TableInfo, VectorTableInfo, RasterTableInfo
from ..html_elems import HtmlSection, HtmlParagraph, HtmlTable, HtmlTableHeader, HtmlTableCol
class PGTableInfo(TableInfo):
def __init__(self, table):
self.table = table
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it shouldn't take long time
if self.table.rowCount is None and self.table.estimatedRowCount < 100:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table.isView else QApplication.translate(
"DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner)
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
tbl.extend([
(QApplication.translate("DBManagerPlugin", "Pages:"), self.table.pages),
(QApplication.translate("DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount )
])
# privileges
# has the user access to this schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if schema_priv is None:
pass
elif not schema_priv[1]: # no usage privileges on the schema
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"),
QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have usage privileges for this schema!") ))
else:
table_priv = self.table.database().connector.getTablePrivileges((self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if self.table.rowCount is not None or self.table.rowCount >= 0:
tbl.append((QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate(
"DBManagerPlugin", 'Unknown (<a href="action:rows/count">find out</a>)')))
if table_priv[1]: privileges.append("insert")
if table_priv[2]: privileges.append("update")
if table_priv[3]: privileges.append("delete")
priv_string = u", ".join(privileges) if len(privileges) > 0 else QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"), priv_string ))
ret.append(HtmlTable(tbl))
if schema_priv is not None and schema_priv[1]:
if table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> This user has read-only privileges.")))
if not self.table.isView:
if self.table.rowCount is not None:
if abs(self.table.estimatedRowCount - self.table.rowCount) > 1 and \
(self.table.estimatedRowCount > 2 * self.table.rowCount or
self.table.rowCount > 2 * self.table.estimatedRowCount):
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> There's a significant difference between estimated and real row count. "
'Consider running <a href="action:vacuumanalyze/run">VACUUM ANALYZE</a>.')))
# primary key defined?
if not self.table.isView:
if len(filter(lambda fld: fld.primaryKey, self.table.fields())) <= 0:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
(QApplication.translate("DBManagerPlugin", "Scripts:"), info[3]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if info[1] is not None and info[1] != info[2]:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> Version of installed scripts doesn't match version of released scripts!\n"
"This is probably a result of incorrect PostGIS upgrade.")))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
elif not self.db.connector.has_geometry_columns_access:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have privileges to read contents of geometry_columns table!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default") )
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen is not None and fld.charMaxLen != -1 else ""
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), char_max_len, is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
ret = []
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"),
QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Enabled") )
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
(enabled, action) = (QApplication.translate("DBManagerPlugin", "Yes"), "disable") if trig.enabled else (
QApplication.translate("DBManagerPlugin", "No"), "enable")
txt_enabled = u'%(enabled)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {
"name": trig.name, "action": action, "enabled": enabled}
tbl.append((name, trig.function, trig.type2String(), txt_enabled))
ret.append(HtmlTable(tbl, {"class": "header"}))
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<a href="action:triggers/enable">Enable all triggers</a> / <a href="action:triggers/disable">Disable all triggers</a>')))
return ret
def rulesDetails(self):
if self.table.rules() is None or len(self.table.rules()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Definition") )
tbl.append(HtmlTableHeader(header))
# add table contents
for rule in self.table.rules():
name = u'%(name)s (<a href="action:rule/%(name)s/%(action)s">%(action)s</a>)' % {"name": rule.name,
"action": "delete"}
tbl.append((name, rule.definition))
return HtmlTable(tbl, {"class": "header"})
def getTableInfo(self):
ret = TableInfo.getTableInfo(self)
# rules
rules_details = self.rulesDetails()
if rules_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Rules'), rules_details))
return ret
class PGVectorTableInfo(PGTableInfo, VectorTableInfo):
def __init__(self, table):
VectorTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return VectorTableInfo.spatialInfo(self)
class PGRasterTableInfo(PGTableInfo, RasterTableInfo):
def __init__(self, table):
RasterTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return RasterTableInfo.spatialInfo(self)
|
michaelkirk/QGIS
|
python/plugins/db_manager/db_plugins/postgis/info_model.py
|
Python
|
gpl-2.0
| 11,526
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Api serving config collection service implementation.
Contains the implementation for BackendService as defined in api_backend.py.
"""
try:
import json
except ImportError:
import simplejson as json
import logging
from endpoints import api_backend
from endpoints import api_config
from endpoints import api_exceptions
from protorpc import message_types
__all__ = [
'ApiConfigRegistry',
'BackendServiceImpl',
]
class ApiConfigRegistry(object):
"""Registry of active APIs to be registered with Google API Server."""
def __init__(self):
self.__registered_classes = set()
self.__api_configs = set()
self.__api_methods = {}
def register_spi(self, config_contents):
"""Register a single SPI and its config contents.
Args:
config_contents: String containing API configuration.
"""
if config_contents is None:
return
parsed_config = json.loads(config_contents)
self.__register_class(parsed_config)
self.__api_configs.add(config_contents)
self.__register_methods(parsed_config)
def __register_class(self, parsed_config):
"""Register the class implementing this config, so we only add it once.
Args:
parsed_config: The JSON object with the API configuration being added.
Raises:
ApiConfigurationError: If the class has already been registered.
"""
methods = parsed_config.get('methods')
if not methods:
return
service_classes = set()
for method in methods.itervalues():
rosy_method = method.get('rosyMethod')
if rosy_method and '.' in rosy_method:
method_class = rosy_method.split('.', 1)[0]
service_classes.add(method_class)
for service_class in service_classes:
if service_class in self.__registered_classes:
raise api_config.ApiConfigurationError(
'SPI class %s has already been registered.' % service_class)
self.__registered_classes.add(service_class)
def __register_methods(self, parsed_config):
"""Register all methods from the given api config file.
Methods are stored in a map from method_name to rosyMethod,
the name of the ProtoRPC method to be called on the backend.
If no rosyMethod was specified the value will be None.
Args:
parsed_config: The JSON object with the API configuration being added.
"""
methods = parsed_config.get('methods')
if not methods:
return
for method_name, method in methods.iteritems():
self.__api_methods[method_name] = method.get('rosyMethod')
def lookup_api_method(self, api_method_name):
"""Looks an API method up by name to find the backend method to call.
Args:
api_method_name: Name of the method in the API that was called.
Returns:
Name of the ProtoRPC method called on the backend, or None if not found.
"""
return self.__api_methods.get(api_method_name)
def all_api_configs(self):
"""Return a list of all API configration specs as registered above."""
return list(self.__api_configs)
class BackendServiceImpl(api_backend.BackendService):
"""Implementation of BackendService."""
def __init__(self, api_config_registry, app_revision):
"""Create a new BackendService implementation.
Args:
api_config_registry: ApiConfigRegistry to register and look up configs.
app_revision: string containing the current app revision.
"""
self.__api_config_registry = api_config_registry
self.__app_revision = app_revision
@staticmethod
def definition_name():
"""Override definition_name so that it is not BackendServiceImpl."""
return api_backend.BackendService.definition_name()
def getApiConfigs(self, request):
"""Return a list of active APIs and their configuration files.
Args:
request: A request which may contain an app revision
Returns:
ApiConfigList: A list of API config strings
"""
if request.appRevision and request.appRevision != self.__app_revision:
raise api_exceptions.BadRequestException(
message='API backend app revision %s not the same as expected %s' % (
self.__app_revision, request.appRevision))
configs = self.__api_config_registry.all_api_configs()
return api_backend.ApiConfigList(items=configs)
def logMessages(self, request):
"""Write a log message from the Swarm FE to the log.
Args:
request: A log message request.
Returns:
Void message.
"""
Level = api_backend.LogMessagesRequest.LogMessage.Level
log = logging.getLogger(__name__)
for message in request.messages:
level = message.level if message.level is not None else Level.info
record = logging.LogRecord(name=__name__, level=level.number, pathname='',
lineno='', msg=message.message, args=None,
exc_info=None)
log.handle(record)
return message_types.VoidMessage()
|
taimur97/Feeder
|
server/appengine/endpoints/api_backend_service.py
|
Python
|
gpl-2.0
| 5,564
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from pootle_app.models.suggestion import Suggestion
from pootle_app.models.directory import Directory
from pootle_app.models.permissions import PermissionSet
__all__ = ["Suggestion", "Directory", "PermissionSet"]
|
ttreeagency/PootleTypo3Org
|
pootle/apps/pootle_app/models/__init__.py
|
Python
|
gpl-2.0
| 1,034
|
# -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.captcha.ReCaptcha import ReCaptcha
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class Keep2ShareCc(SimpleHoster):
__name__ = "Keep2ShareCc"
__type__ = "hoster"
__version__ = "0.25"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(keep2share|k2s|keep2s)\.cc/file/(?P<ID>\w+)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Keep2Share.cc hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com")]
URL_REPLACEMENTS = [(__pattern__ + ".*", "http://keep2s.cc/file/\g<ID>")]
NAME_PATTERN = r'File: <span>(?P<N>.+?)</span>'
SIZE_PATTERN = r'Size: (?P<S>[^<]+)</div>'
OFFLINE_PATTERN = r'File not found or deleted|Sorry, this file is blocked or deleted|Error 404'
TEMP_OFFLINE_PATTERN = r'Downloading blocked due to'
LINK_FREE_PATTERN = r'"(.+?url.html\?file=.+?)"|window\.location\.href = \'(.+?)\';'
LINK_PREMIUM_PATTERN = r'window\.location\.href = \'(.+?)\';'
CAPTCHA_PATTERN = r'src="(/file/captcha\.html.+?)"'
WAIT_PATTERN = r'Please wait ([\d:]+) to download this file'
TEMP_ERROR_PATTERN = r'>\s*(Download count files exceed|Traffic limit exceed|Free account does not allow to download more than one file at the same time)'
ERROR_PATTERN = r'>\s*(Free user can\'t download large files|You no can access to this file|This download available only for premium users|This is private file)'
def check_errors(self):
m = re.search(self.TEMP_ERROR_PATTERN, self.html)
if m:
self.info['error'] = m.group(1)
self.wantReconnect = True
self.retry(wait_time=30 * 60, msg=m.group(0))
m = re.search(self.ERROR_PATTERN, self.html)
if m:
errmsg = self.info['error'] = m.group(1)
self.error(errmsg)
m = re.search(self.WAIT_PATTERN, self.html)
if m:
self.log_debug("Hoster told us to wait for %s" % m.group(1))
#: String to time convert courtesy of https://stackoverflow.com/questions/10663720
ftr = [3600, 60, 1]
wait_time = sum(a * b for a, b in zip(ftr, map(int, m.group(1).split(':'))))
self.wantReconnect = True
self.retry(wait_time=wait_time, msg="Please wait to download this file")
self.info.pop('error', None)
def handle_free(self, pyfile):
self.fid = re.search(r'<input type="hidden" name="slow_id" value="(.+?)">', self.html).group(1)
self.html = self.load(pyfile.url, post={'yt0': '', 'slow_id': self.fid})
# self.log_debug(self.fid)
# self.log_debug(pyfile.url)
self.check_errors()
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.handle_captcha()
self.wait(31)
self.html = self.load(pyfile.url)
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("Free download link not found"))
self.link = m.group(1)
def handle_captcha(self):
post_data = {'free' : 1,
'freeDownloadRequest': 1,
'uniqueId' : self.fid,
'yt0' : ''}
m = re.search(r'id="(captcha\-form)"', self.html)
self.log_debug("captcha-form found %s" % m)
m = re.search(self.CAPTCHA_PATTERN, self.html)
self.log_debug("CAPTCHA_PATTERN found %s" % m)
if m:
captcha_url = urlparse.urljoin("http://keep2s.cc/", m.group(1))
post_data['CaptchaForm[code]'] = self.captcha.decrypt(captcha_url)
else:
recaptcha = ReCaptcha(self)
response, challenge = recaptcha.challenge()
post_data.update({'recaptcha_challenge_field': challenge,
'recaptcha_response_field' : response})
self.html = self.load(self.pyfile.url, post=post_data)
if 'verification code is incorrect' not in self.html:
self.captcha.correct()
else:
self.captcha.invalid()
getInfo = create_getInfo(Keep2ShareCc)
|
mationic/pyload
|
module/plugins/hoster/Keep2ShareCc.py
|
Python
|
gpl-3.0
| 4,411
|
#! /usr/bin/env python
"""
This script checks HighGUI's cvGetCaptureProperty functionality for correct return
of the frame width and height of an .avi file containing uncompressed 32bit Bitmap frames.
"""
# name if this test and it's requirements
TESTNAME = "size_bmp32"
REQUIRED = []
# needed for sys.exit(int), .works file handling and check routine
import sys
import works
import size_test
# check requirements and delete old flag file, if it exists
if not works.check_files(REQUIRED,TESTNAME):
sys.exit(77)
# name of file we check here
FILENAME='bmp32.avi'
# run check routine
result=size_test.size_ok(FILENAME)
# create flag file for following tests
works.set_file(TESTNAME)
# return result of test routine
sys.exit(result)
|
shiftcontrol/UnityOpenCV
|
opencv/tests/swig_python/highgui/size_bmp32.py
|
Python
|
gpl-3.0
| 739
|
'''Test cases for QLayout handling of child widgets references'''
import unittest
from sys import getrefcount
from PySide.QtGui import QHBoxLayout, QVBoxLayout, QGridLayout, QWidget
from PySide.QtGui import QStackedLayout, QFormLayout
from PySide.QtGui import QApplication, QPushButton, QLabel
from helper import UsesQApplication
class SaveReference(UsesQApplication):
'''Test case to check if QLayout-derived classes increment the refcount
of widgets passed to addWidget()'''
# Adding here as nose can't see the qapplication attrib we inherit
qapplication = True
def setUp(self):
#Acquire resources
super(SaveReference, self).setUp()
self.widget1 = QPushButton('click me')
self.widget2 = QLabel('aaa')
def tearDown(self):
#Release resources
del self.widget2
del self.widget1
super(SaveReference, self).tearDown()
def checkLayoutReference(self, layout):
#Checks the reference cound handling of layout.addWidget
self.assertEqual(getrefcount(self.widget1), 2)
layout.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
self.assertEqual(getrefcount(self.widget2), 2)
layout.addWidget(self.widget2)
self.assertEqual(getrefcount(self.widget2), 3)
# Check if doesn't mess around with previous widget refcount
self.assertEqual(getrefcount(self.widget1), 3)
def testMoveLayout(self):
l = QHBoxLayout()
self.assertEqual(getrefcount(self.widget1), 2)
l.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
w = QWidget()
w.setLayout(l)
self.assertEqual(getrefcount(self.widget1), 3)
def testHBoxReference(self):
#QHBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QHBoxLayout(w))
def testVBoxReference(self):
#QVBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QVBoxLayout(w))
def testGridReference(self):
#QGridLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QGridLayout(w))
def testFormReference(self):
#QFormLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QFormLayout(w))
def testStackedReference(self):
#QStackedLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QStackedLayout(w))
class MultipleAdd(UsesQApplication):
'''Test case to check if refcount is incremented only once when multiple
calls to addWidget are made with the same widget'''
qapplication = True
def setUp(self):
#Acquire resources
super(MultipleAdd, self).setUp()
self.widget = QPushButton('click me')
self.win = QWidget()
self.layout = QHBoxLayout(self.win)
def tearDown(self):
#Release resources
del self.widget
del self.layout
del self.win
super(MultipleAdd, self).tearDown()
def testRefCount(self):
#Multiple QLayout.addWidget calls on the same widget
self.assertEqual(getrefcount(self.widget), 2)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
class InternalAdd(UsesQApplication):
def testInternalRef(self):
mw = QWidget()
w = QWidget()
ow = QWidget()
topLayout = QGridLayout()
# unique reference
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
topLayout.addWidget(w, 0, 0)
topLayout.addWidget(ow, 1, 0)
# layout keep the referemce
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 1, 0, 1, 4)
# the same reference
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mw.setLayout(mainLayout)
# now trasfer the ownership to mw
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
del mw
# remove the ref and invalidate the widget
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
if __name__ == '__main__':
unittest.main()
|
M4rtinK/pyside-android
|
tests/QtGui/qlayout_ref_test.py
|
Python
|
lgpl-2.1
| 4,597
|
from __future__ import print_function, division
import random
from itertools import permutations
import numpy as np
from scipy.stats.distributions import vonmises
import pickle
import tempfile
from sklearn.pipeline import Pipeline
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import DihedralFeaturizer
from msmbuilder.hmm import VonMisesHMM
def test_code_works():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def circwrap(x):
"""Wrap an array on (-pi, pi)"""
return x - 2 * np.pi * np.floor(x / (2 * np.pi) + 0.5)
def create_timeseries(means, kappas, transmat):
"""Construct a random timeseries based on a specified Markov model."""
numStates = len(means)
state = random.randint(0, numStates - 1)
cdf = np.cumsum(transmat, 1)
numFrames = 1000
X = np.empty((numFrames, 1))
for i in range(numFrames):
rand = random.random()
state = (cdf[state] > rand).argmax()
X[i, 0] = circwrap(vonmises.rvs(kappas[state], means[state]))
return X
def validate_timeseries(means, kappas, transmat, model, meantol,
kappatol, transmattol):
"""Test our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(circwrap(means[i] - model.means_[order[i]])) > meantol:
match = False
break
if abs(kappas[i] - model.kappas_[order[i]]) > kappatol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [2.0]])
kappas = np.array([[4.0], [8.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=2, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [2.0], [4.0]])
kappas = np.array([[8.0], [8.0], [6.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=3, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_pipeline():
trajs = AlanineDipeptide().get_cached().trajectories
p = Pipeline([
('diheds', DihedralFeaturizer(['phi', 'psi'], sincos=False)),
('hmm', VonMisesHMM(n_states=4))
])
predict = p.fit_predict(trajs)
p.named_steps['hmm'].summarize()
def test_pickle():
"""Test pickling an HMM"""
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
logprob, hidden = hmm.predict(sequences)
with tempfile.TemporaryFile() as savefile:
pickle.dump(hmm, savefile)
savefile.seek(0, 0)
hmm2 = pickle.load(savefile)
logprob2, hidden2 = hmm2.predict(sequences)
assert(logprob == logprob2)
|
msultan/msmbuilder
|
msmbuilder/tests/test_vmhmm.py
|
Python
|
lgpl-2.1
| 5,110
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import unittest
from org.o3project.odenos.core.component.network.topology.port import Port
class PortTest(unittest.TestCase):
def setUp(self):
self.target = Port('Port', '1', 'PortId', 'NodeId', 'OutLink',
'InLink', {'Key': 'Val'})
def tearDown(self):
pass
def test_constructor(self):
self.assertEqual(self.target._body[Port.TYPE], 'Port')
self.assertEqual(self.target._body[Port.VERSION], '1')
self.assertEqual(self.target._body[Port.PORT_ID], 'PortId')
self.assertEqual(self.target._body[Port.NODE_ID], 'NodeId')
self.assertEqual(self.target._body[Port.OUT_LINK], 'OutLink')
self.assertEqual(self.target._body[Port.IN_LINK], 'InLink')
self.assertEqual(self.target._body[Port.ATTRIBUTES]['Key'], 'Val')
def test_type(self):
self.assertEqual(self.target.type, 'Port')
def test_version(self):
self.assertEqual(self.target.version, '1')
def test_port_id(self):
self.assertEqual(self.target.port_id, 'PortId')
def test_node_id(self):
self.assertEqual(self.target.node_id, 'NodeId')
def test_out_link(self):
self.assertEqual(self.target.out_link, 'OutLink')
def test_in_link(self):
self.assertEqual(self.target.in_link, 'InLink')
def test_attributes(self):
result = self.target.attributes
self.assertEqual(len(result), 1)
self.assertEqual(result['Key'], 'Val')
def test_create_from_packed(self):
packed = self.target.packed_object()
result = Port.create_from_packed(packed)
self.assertEqual(result.type, 'Port')
self.assertEqual(result.version, '1')
self.assertEqual(result.port_id, 'PortId')
self.assertEqual(result.node_id, 'NodeId')
self.assertEqual(result.out_link, 'OutLink')
self.assertEqual(result.in_link, 'InLink')
self.assertEqual(len(result.attributes), 1)
self.assertEqual(result.attributes['Key'], 'Val')
def test_create_from_packed_without_version(self):
packed = {'type': 'Port', 'port_id': 'PortId', 'node_id': 'NodeId',
'out_link': 'OutLink', 'in_link': 'InLink',
'attributes': {'Key': 'Val'}}
result = Port.create_from_packed(packed)
self.assertEqual(result.type, 'Port')
self.assertEqual(result.version, None)
self.assertEqual(result.port_id, 'PortId')
self.assertEqual(result.node_id, 'NodeId')
self.assertEqual(result.out_link, 'OutLink')
self.assertEqual(result.in_link, 'InLink')
self.assertEqual(len(result.attributes), 1)
self.assertEqual(result.attributes['Key'], 'Val')
def test_packed_object(self):
result = self.target.packed_object()
self.assertEqual(result, {'type': 'Port', 'version': '1',
'port_id': 'PortId', 'node_id': 'NodeId',
'out_link': 'OutLink', 'in_link': 'InLink',
'attributes': {'Key': 'Val'}})
if __name__ == "__main__":
unittest.main()
|
haizawa/odenos
|
src/test/python/org/o3project/odenos/core/component/network/topology/test_port.py
|
Python
|
apache-2.0
| 4,161
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_AutoShardDataset` transformation."""
import os
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
class AutoShardDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
def getAllDatasetElements(self, dataset):
actual = []
next_fn = self.getNext(dataset)
while True:
try:
actual.append(self.evaluate(next_fn()))
except errors.OutOfRangeError:
break
return actual
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(batch_size=[1, 3, 10])))
def testDatasetOfReaderDatasetsPipeline(self, batch_size):
# This tests a scenario where a list_files main return multiple files
# due to the glob containing wildcards.
def batch(iterator, n):
l = len(iterator)
for i in range(0, l, n):
yield iterator[i:min(i + n, l)]
datasets = []
for files in batch(self._filenames, batch_size):
datasets.append(
dataset_ops.Dataset.list_files(files, shuffle=False).map(
core_readers.TFRecordDataset))
dataset = dataset_ops.Dataset.from_tensor_slices(datasets)
dataset = dataset.flat_map(lambda x: x)
# Simulate additional ops in between flat_map and interleave. This should be
# a no-op since if ShardDataset is placed right after flat_map, we will only
# have two datasets left at this point.
dataset = dataset.prefetch(1)
dataset = dataset.prefetch(1)
dataset = dataset.interleave(
lambda x: x, cycle_length=1, num_parallel_calls=1)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(params=[(1, 0, 10, 10), (2, 1, 20, 5),
(10, 1, 1, 10)])))
def testStandardReaderPipeline(self, params):
num_epochs, index, batch_size, parallel_reads = params
dataset = readers.make_tf_record_dataset(
file_pattern=self._filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(sharding_policy=[
options_lib.AutoShardPolicy.DATA,
options_lib.AutoShardPolicy.AUTO
])))
def testShardByDataBeforePrefetch(self, sharding_policy):
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.apply(testing.assert_next(["Shard", "Prefetch"]))
dataset = dataset.prefetch(1)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [0, 2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(combinations.combine(
sharding_policy=[options_lib.AutoShardPolicy.DATA,
options_lib.AutoShardPolicy.FILE]),
combinations.combine(shuffle=[True, False]))))
def testReplicateAndShardProduceDisjointData(self, shuffle, sharding_policy):
dataset = dataset_ops.Dataset.list_files(self._filenames,
shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=options_lib.ExternalStatePolicy.WARN)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds1 = ds1.with_options(options)
ds2 = ds2.with_options(options)
ds1 = distribute._AutoShardDataset(ds1, 2, 0)
ds2 = distribute._AutoShardDataset(ds2, 2, 1)
elems1 = set(self.getAllDatasetElements(ds1))
elems2 = set(self.getAllDatasetElements(ds2))
self.assertEmpty(elems1.intersection(elems2))
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testAutoshardPolicyOff(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.OFF)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return every record in every file since autosharding is turned off.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithoutReaderDatasetOp(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0])
@combinations.generate(test_base.default_test_combinations())
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testAssertCardinality(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = dataset.apply(cardinality.assert_cardinality(42))
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testMakeBatchedFeaturesDataset(self):
files = 2
records_per_file = 5
def make_record(file_index):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[file_index])),
}))
return example.SerializeToString()
filenames = []
for file_index in range(files):
filename = os.path.join(self.get_temp_dir(),
"tf_record.%d.txt" % file_index)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for _ in range(records_per_file):
writer.write(make_record(file_index))
writer.close()
dataset = readers.make_batched_features_dataset(
file_pattern=filenames,
batch_size=records_per_file,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
},
reader=core_readers.TFRecordDataset,
num_epochs=1)
# We should shard at the file level, so that all records come from file 0.
dataset = distribute._AutoShardDataset(dataset, 2, 0)
dataset = dataset.unbatch()
output = self.getDatasetOutput(dataset)
files = [elem["file"] for elem in output]
self.assertEqual(files, [0] * records_per_file)
@combinations.generate(test_base.default_test_combinations())
def testHintShardingValidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(distribute.SHARD_HINT, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(0, 100, 10)))
@combinations.generate(test_base.default_test_combinations())
def testHintShardingInvalidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(1, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(100)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
auto_shard_policy=list(options_lib.AutoShardPolicy))))
def testEnumerateAutoShardPolicies(self, auto_shard_policy):
"""Verifies tf.data handles every auto-shard policy with no errors."""
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
self.getDatasetOutput(dataset, requires_initialization=True)
class AutoShardWithRebatchDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def _setUpFiles(self, num_files, num_records_per_file):
self._num_files = num_files
self._num_records = num_records_per_file
self._filenames = self._createFiles()
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithLegacyRebatch(self):
# Tests that RebatchDatasetV1 is a passthrough op.
self._setUpFiles(num_files=5, num_records_per_file=10)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._LegacyRebatchDataset(dataset, num_replicas=5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [[self._record(3, i)] for i in range(10)]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithRebatch(self):
# Tests that RebatchDatasetV2 is a passthrough op.
self._setUpFiles(num_files=3, num_records_per_file=5)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._RebatchDataset(dataset, batch_sizes=[2, 1, 2])
dataset = distribute._AutoShardDataset(dataset, 3, 1)
expected = [[self._record(1, 0), self._record(1, 1)], [self._record(1, 2)],
[self._record(1, 3), self._record(1, 4)]]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(sharding_policy=[
options_lib.AutoShardPolicy.DATA,
options_lib.AutoShardPolicy.AUTO
]), combinations.combine(with_prefetch=[True, False]))))
def testUseLegacyRebatchWithDataSharding(self, sharding_policy,
with_prefetch):
# This test simulates a distributed environment with 3 workers, each with
# 1 replica.
dataset = dataset_ops.Dataset.range(8)
dataset = dataset.batch(4)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
# We expect the auto-shard rewrite to rewrite RebatchDatasetV2 to
# RebatchDataset(V1) for correctness reasons. This will modify the output
# of the dataset.
worker_a_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[2, 1, 1])
if with_prefetch:
worker_a_dataset = worker_a_dataset.prefetch(1)
worker_a_dataset = distribute._AutoShardDataset(
worker_a_dataset, 3, 0, num_replicas=3)
expected = [[0, 1], [4, 5]]
self.assertDatasetProduces(worker_a_dataset, expected)
worker_b_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 1, 2])
if with_prefetch:
worker_b_dataset = worker_b_dataset.prefetch(1)
worker_b_dataset = distribute._AutoShardDataset(
worker_b_dataset, 3, 1, num_replicas=3)
expected = [[2, 3], [6, 7]]
self.assertDatasetProduces(worker_b_dataset, expected)
worker_c_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 2, 1])
if with_prefetch:
worker_c_dataset = worker_c_dataset.prefetch(1)
worker_c_dataset = distribute._AutoShardDataset(
worker_c_dataset, 3, 2, num_replicas=3)
expected = [[], []]
self.assertDatasetProduces(worker_c_dataset, expected)
class AutoShardDatasetCheckpointTest(tf_record_test_base.TFRecordTestBase,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetCheckpointTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
def build_dataset():
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
return dataset
verify_fn(self, build_dataset, num_outputs=20)
if __name__ == "__main__":
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py
|
Python
|
apache-2.0
| 27,708
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class EndpointsClient(rest_client.RestClient):
api_version = "v2.0"
def create_endpoint(self, service_id, region_id, **kwargs):
"""Create an endpoint for service."""
post_body = {
'service_id': service_id,
'region': region_id,
'publicurl': kwargs.get('publicurl'),
'adminurl': kwargs.get('adminurl'),
'internalurl': kwargs.get('internalurl')
}
post_body = json.dumps({'endpoint': post_body})
resp, body = self.post('/endpoints', post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_endpoints(self):
"""List Endpoints - Returns Endpoints."""
resp, body = self.get('/endpoints')
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_endpoint(self, endpoint_id):
"""Delete an endpoint."""
url = '/endpoints/%s' % endpoint_id
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
|
HybridF5/tempest_debug
|
tempest/services/identity/v2/json/endpoints_client.py
|
Python
|
apache-2.0
| 1,870
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import six
from cassandra.cqlengine.operators import EqualsOperator
from cassandra.cqlengine.statements import StatementException, WhereClause
class TestWhereClause(unittest.TestCase):
def test_operator_check(self):
""" tests that creating a where statement with a non BaseWhereOperator object fails """
with self.assertRaises(StatementException):
WhereClause('a', 'b', 'c')
def test_where_clause_rendering(self):
""" tests that where clauses are rendered properly """
wc = WhereClause('a', EqualsOperator(), 'c')
wc.set_context_id(5)
self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc))
self.assertEqual('"a" = %(5)s', str(wc), type(wc))
def test_equality_method(self):
""" tests that 2 identical where clauses evaluate as == """
wc1 = WhereClause('a', EqualsOperator(), 'c')
wc2 = WhereClause('a', EqualsOperator(), 'c')
assert wc1 == wc2
|
Richard-Mathie/cassandra_benchmark
|
vendor/github.com/datastax/python-driver/tests/integration/cqlengine/statements/test_where_clause.py
|
Python
|
apache-2.0
| 1,642
|
"""Vizio SmartCast Device support."""
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional, Union
from pyvizio import VizioAsync
from pyvizio.api.apps import find_app_name
from pyvizio.const import APP_HOME, INPUT_APPS, NO_APP_RUNNING, UNKNOWN_APP
from homeassistant.components.media_player import (
DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV,
SUPPORT_SELECT_SOUND_MODE,
MediaPlayerEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_HOST,
CONF_INCLUDE,
CONF_NAME,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import entity_platform
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_ADDITIONAL_CONFIGS,
CONF_APPS,
CONF_VOLUME_STEP,
DEFAULT_TIMEOUT,
DEFAULT_VOLUME_STEP,
DEVICE_ID,
DOMAIN,
ICON,
SERVICE_UPDATE_SETTING,
SUPPORTED_COMMANDS,
UPDATE_SETTING_SCHEMA,
VIZIO_AUDIO_SETTINGS,
VIZIO_DEVICE_CLASSES,
VIZIO_MUTE,
VIZIO_MUTE_ON,
VIZIO_SOUND_MODE,
VIZIO_VOLUME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up a Vizio media player entry."""
host = config_entry.data[CONF_HOST]
token = config_entry.data.get(CONF_ACCESS_TOKEN)
name = config_entry.data[CONF_NAME]
device_class = config_entry.data[CONF_DEVICE_CLASS]
# If config entry options not set up, set them up, otherwise assign values managed in options
volume_step = config_entry.options.get(
CONF_VOLUME_STEP, config_entry.data.get(CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP)
)
params = {}
if not config_entry.options:
params["options"] = {CONF_VOLUME_STEP: volume_step}
include_or_exclude_key = next(
(
key
for key in config_entry.data.get(CONF_APPS, {})
if key in [CONF_INCLUDE, CONF_EXCLUDE]
),
None,
)
if include_or_exclude_key:
params["options"][CONF_APPS] = {
include_or_exclude_key: config_entry.data[CONF_APPS][
include_or_exclude_key
].copy()
}
if not config_entry.data.get(CONF_VOLUME_STEP):
new_data = config_entry.data.copy()
new_data.update({CONF_VOLUME_STEP: volume_step})
params["data"] = new_data
if params:
hass.config_entries.async_update_entry(config_entry, **params)
device = VizioAsync(
DEVICE_ID,
host,
name,
auth_token=token,
device_type=VIZIO_DEVICE_CLASSES[device_class],
session=async_get_clientsession(hass, False),
timeout=DEFAULT_TIMEOUT,
)
if not await device.can_connect_with_auth_check():
_LOGGER.warning("Failed to connect to %s", host)
raise PlatformNotReady
apps_coordinator = hass.data[DOMAIN].get(CONF_APPS)
entity = VizioDevice(config_entry, device, name, device_class, apps_coordinator)
async_add_entities([entity], update_before_add=True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_UPDATE_SETTING, UPDATE_SETTING_SCHEMA, "async_update_setting"
)
class VizioDevice(MediaPlayerEntity):
"""Media Player implementation which performs REST requests to device."""
def __init__(
self,
config_entry: ConfigEntry,
device: VizioAsync,
name: str,
device_class: str,
apps_coordinator: DataUpdateCoordinator,
) -> None:
"""Initialize Vizio device."""
self._config_entry = config_entry
self._apps_coordinator = apps_coordinator
self._name = name
self._state = None
self._volume_level = None
self._volume_step = config_entry.options[CONF_VOLUME_STEP]
self._is_volume_muted = None
self._current_input = None
self._current_app = None
self._current_app_config = None
self._current_sound_mode = None
self._available_sound_modes = []
self._available_inputs = []
self._available_apps = []
self._all_apps = apps_coordinator.data if apps_coordinator else None
self._conf_apps = config_entry.options.get(CONF_APPS, {})
self._additional_app_configs = config_entry.data.get(CONF_APPS, {}).get(
CONF_ADDITIONAL_CONFIGS, []
)
self._device_class = device_class
self._supported_commands = SUPPORTED_COMMANDS[device_class]
self._device = device
self._max_volume = float(self._device.get_max_volume())
self._icon = ICON[device_class]
self._available = True
self._model = None
self._sw_version = None
def _apps_list(self, apps: List[str]) -> List[str]:
"""Return process apps list based on configured filters."""
if self._conf_apps.get(CONF_INCLUDE):
return [app for app in apps if app in self._conf_apps[CONF_INCLUDE]]
if self._conf_apps.get(CONF_EXCLUDE):
return [app for app in apps if app not in self._conf_apps[CONF_EXCLUDE]]
return apps
async def async_update(self) -> None:
"""Retrieve latest state of the device."""
if not self._model:
self._model = await self._device.get_model_name()
if not self._sw_version:
self._sw_version = await self._device.get_version()
is_on = await self._device.get_power_state(log_api_exception=False)
if is_on is None:
if self._available:
_LOGGER.warning(
"Lost connection to %s", self._config_entry.data[CONF_HOST]
)
self._available = False
return
if not self._available:
_LOGGER.info(
"Restored connection to %s", self._config_entry.data[CONF_HOST]
)
self._available = True
if not is_on:
self._state = STATE_OFF
self._volume_level = None
self._is_volume_muted = None
self._current_input = None
self._current_app = None
self._current_app_config = None
self._current_sound_mode = None
return
self._state = STATE_ON
audio_settings = await self._device.get_all_settings(
VIZIO_AUDIO_SETTINGS, log_api_exception=False
)
if audio_settings:
self._volume_level = float(audio_settings[VIZIO_VOLUME]) / self._max_volume
if VIZIO_MUTE in audio_settings:
self._is_volume_muted = (
audio_settings[VIZIO_MUTE].lower() == VIZIO_MUTE_ON
)
else:
self._is_volume_muted = None
if VIZIO_SOUND_MODE in audio_settings:
self._supported_commands |= SUPPORT_SELECT_SOUND_MODE
self._current_sound_mode = audio_settings[VIZIO_SOUND_MODE]
if not self._available_sound_modes:
self._available_sound_modes = (
await self._device.get_setting_options(
VIZIO_AUDIO_SETTINGS, VIZIO_SOUND_MODE
)
)
else:
# Explicitly remove SUPPORT_SELECT_SOUND_MODE from supported features
self._supported_commands &= ~SUPPORT_SELECT_SOUND_MODE
input_ = await self._device.get_current_input(log_api_exception=False)
if input_:
self._current_input = input_
inputs = await self._device.get_inputs_list(log_api_exception=False)
# If no inputs returned, end update
if not inputs:
return
self._available_inputs = [input_.name for input_ in inputs]
# Return before setting app variables if INPUT_APPS isn't in available inputs
if self._device_class == DEVICE_CLASS_SPEAKER or not any(
app for app in INPUT_APPS if app in self._available_inputs
):
return
# Create list of available known apps from known app list after
# filtering by CONF_INCLUDE/CONF_EXCLUDE
self._available_apps = self._apps_list([app["name"] for app in self._all_apps])
self._current_app_config = await self._device.get_current_app_config(
log_api_exception=False
)
self._current_app = find_app_name(
self._current_app_config,
[APP_HOME, *self._all_apps, *self._additional_app_configs],
)
if self._current_app == NO_APP_RUNNING:
self._current_app = None
def _get_additional_app_names(self) -> List[Dict[str, Any]]:
"""Return list of additional apps that were included in configuration.yaml."""
return [
additional_app["name"] for additional_app in self._additional_app_configs
]
@staticmethod
async def _async_send_update_options_signal(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> None:
"""Send update event when Vizio config entry is updated."""
# Move this method to component level if another entity ever gets added for a single config entry.
# See here: https://github.com/home-assistant/core/pull/30653#discussion_r366426121
async_dispatcher_send(hass, config_entry.entry_id, config_entry)
async def _async_update_options(self, config_entry: ConfigEntry) -> None:
"""Update options if the update signal comes from this entity."""
self._volume_step = config_entry.options[CONF_VOLUME_STEP]
# Update so that CONF_ADDITIONAL_CONFIGS gets retained for imports
self._conf_apps.update(config_entry.options.get(CONF_APPS, {}))
async def async_update_setting(
self, setting_type: str, setting_name: str, new_value: Union[int, str]
) -> None:
"""Update a setting when update_setting service is called."""
await self._device.set_setting(
setting_type,
setting_name,
new_value,
)
async def async_added_to_hass(self) -> None:
"""Register callbacks when entity is added."""
# Register callback for when config entry is updated.
self.async_on_remove(
self._config_entry.add_update_listener(
self._async_send_update_options_signal
)
)
# Register callback for update event
self.async_on_remove(
async_dispatcher_connect(
self.hass, self._config_entry.entry_id, self._async_update_options
)
)
# Register callback for app list updates if device is a TV
@callback
def apps_list_update():
"""Update list of all apps."""
self._all_apps = self._apps_coordinator.data
self.async_write_ha_state()
if self._device_class == DEVICE_CLASS_TV:
self.async_on_remove(
self._apps_coordinator.async_add_listener(apps_list_update)
)
@property
def available(self) -> bool:
"""Return the availabiliity of the device."""
return self._available
@property
def state(self) -> Optional[str]:
"""Return the state of the device."""
return self._state
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def icon(self) -> str:
"""Return the icon of the device."""
return self._icon
@property
def volume_level(self) -> Optional[float]:
"""Return the volume level of the device."""
return self._volume_level
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._is_volume_muted
@property
def source(self) -> Optional[str]:
"""Return current input of the device."""
if self._current_app is not None and self._current_input in INPUT_APPS:
return self._current_app
return self._current_input
@property
def source_list(self) -> List[str]:
"""Return list of available inputs of the device."""
# If Smartcast app is in input list, and the app list has been retrieved,
# show the combination with , otherwise just return inputs
if self._available_apps:
return [
*[
_input
for _input in self._available_inputs
if _input not in INPUT_APPS
],
*self._available_apps,
*[
app
for app in self._get_additional_app_names()
if app not in self._available_apps
],
]
return self._available_inputs
@property
def app_id(self) -> Optional[str]:
"""Return the ID of the current app if it is unknown by pyvizio."""
if self._current_app_config and self.app_name == UNKNOWN_APP:
return {
"APP_ID": self._current_app_config.APP_ID,
"NAME_SPACE": self._current_app_config.NAME_SPACE,
"MESSAGE": self._current_app_config.MESSAGE,
}
return None
@property
def app_name(self) -> Optional[str]:
"""Return the friendly name of the current app."""
return self._current_app
@property
def supported_features(self) -> int:
"""Flag device features that are supported."""
return self._supported_commands
@property
def unique_id(self) -> str:
"""Return the unique id of the device."""
return self._config_entry.unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return device registry information."""
return {
"identifiers": {(DOMAIN, self._config_entry.unique_id)},
"name": self.name,
"manufacturer": "VIZIO",
"model": self._model,
"sw_version": self._sw_version,
}
@property
def device_class(self) -> str:
"""Return device class for entity."""
return self._device_class
@property
def sound_mode(self) -> Optional[str]:
"""Name of the current sound mode."""
return self._current_sound_mode
@property
def sound_mode_list(self) -> Optional[List[str]]:
"""List of available sound modes."""
return self._available_sound_modes
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
if sound_mode in self._available_sound_modes:
await self._device.set_setting(
VIZIO_AUDIO_SETTINGS, VIZIO_SOUND_MODE, sound_mode
)
async def async_turn_on(self) -> None:
"""Turn the device on."""
await self._device.pow_on()
async def async_turn_off(self) -> None:
"""Turn the device off."""
await self._device.pow_off()
async def async_mute_volume(self, mute: bool) -> None:
"""Mute the volume."""
if mute:
await self._device.mute_on()
self._is_volume_muted = True
else:
await self._device.mute_off()
self._is_volume_muted = False
async def async_media_previous_track(self) -> None:
"""Send previous channel command."""
await self._device.ch_down()
async def async_media_next_track(self) -> None:
"""Send next channel command."""
await self._device.ch_up()
async def async_select_source(self, source: str) -> None:
"""Select input source."""
if source in self._available_inputs:
await self._device.set_input(source)
elif source in self._get_additional_app_names():
await self._device.launch_app_config(
**next(
app["config"]
for app in self._additional_app_configs
if app["name"] == source
)
)
elif source in self._available_apps:
await self._device.launch_app(source, self._all_apps)
async def async_volume_up(self) -> None:
"""Increase volume of the device."""
await self._device.vol_up(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = min(
1.0, self._volume_level + self._volume_step / self._max_volume
)
async def async_volume_down(self) -> None:
"""Decrease volume of the device."""
await self._device.vol_down(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = max(
0.0, self._volume_level - self._volume_step / self._max_volume
)
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level."""
if self._volume_level is not None:
if volume > self._volume_level:
num = int(self._max_volume * (volume - self._volume_level))
await self._device.vol_up(num=num)
self._volume_level = volume
elif volume < self._volume_level:
num = int(self._max_volume * (self._volume_level - volume))
await self._device.vol_down(num=num)
self._volume_level = volume
|
tboyce021/home-assistant
|
homeassistant/components/vizio/media_player.py
|
Python
|
apache-2.0
| 18,119
|
# Echo client demo using Unix sockets
# Piet van Oostrum
from socket import *
FILE = 'unix-socket'
s = socket(AF_UNIX, SOCK_STREAM)
s.connect(FILE)
s.send('Hello, world')
data = s.recv(1024)
s.close()
print 'Received', repr(data)
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Demo/sockets/unixclient.py
|
Python
|
apache-2.0
| 244
|
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
if not prices:
return 0
n = len(prices)
m1 = [0] * n
m2 = [0] * n
max_profit1 = 0
min_price1 = prices[0]
max_profit2 = 0
max_price2 = prices[-1]
for i in range(n):
max_profit1 = max(max_profit1, prices[i] - min_price1)
m1[i] = max_profit1
min_price1 = min(min_price1, prices[i])
for i in range(n):
max_profit2 = max(max_profit2, max_price2 - prices[n - 1 - i])
m2[n - 1 - i] = max_profit2
max_price2 = max(max_price2, prices[n - 1 - i])
max_profit = 0
for i in range(n):
max_profit = max(m1[i] + m2[i], max_profit)
return max_profit
|
JiaminXuan/leetcode-python
|
best_time_to_buy_and_sell_stock_iii/solution.py
|
Python
|
bsd-2-clause
| 846
|
from flask import Blueprint, render_template, jsonify
from flask_website.utils import request_wants_json
from flask_website.listings.projects import projects
mod = Blueprint('community', __name__, url_prefix='/community')
@mod.route('/')
def index():
return render_template('community/index.html')
@mod.route('/irc/')
def irc():
return render_template('community/irc.html')
@mod.route('/badges/')
def badges():
return render_template('community/badges.html')
@mod.route('/poweredby/')
def poweredby():
if request_wants_json():
return jsonify((k, [p.to_json() for p in v])
for k, v in projects.iteritems())
return render_template('community/poweredby.html', projects=projects)
@mod.route('/logos/')
def logos():
return render_template('community/logos.html')
|
mitsuhiko/flask-website
|
flask_website/views/community.py
|
Python
|
bsd-3-clause
| 824
|
import pingo
'''
In order to use this set of cases, it is necessary to set
the following attributes on your TestCase setUp:
self.analog_input_pin_number = 0
self.expected_analog_input = 1004
self.expected_analog_ratio = 0.98
'''
class AnalogReadBasics(object):
'''
Wire a 10K Ohm resistence from the AnalogPin to the GND.
Then wire a 200 Ohm from the AnalogPin to the VND.
This schema will provide a read of ~98%
'''
def test_200ohmRead(self):
pin = self.board.pins[self.analog_input_pin_number]
pin.mode = pingo.ANALOG
_input = pin.value
# print "Value Read: ", _input
assert self.expected_analog_input - 3 <= _input <= self.expected_analog_input + 3
def test_pin_ratio(self):
pin = self.board.pins[self.analog_input_pin_number]
pin.mode = pingo.ANALOG
bits_resolution = (2 ** pin.bits) - 1
_input = pin.ratio(0, bits_resolution, 0.0, 1.0)
# print "Value Read: ", _input
# Two decimal places check
assert abs(_input - self.expected_analog_ratio) < 10e-1
class AnalogExceptions(object):
def test_wrong_output_mode(self):
pin = self.board.pins[self.analog_input_pin_number]
with self.assertRaises(pingo.ModeNotSuported):
pin.mode = pingo.OUT
|
garoa/pingo
|
pingo/test/level1/cases.py
|
Python
|
mit
| 1,331
|
f = open("pixels.dat", "r")
pixs = f.readline()
f.close()
print len(pixs)
from PIL import Image
import numpy as np
img = Image.new('RGB', (160, 210), "black") # create a new black image
pixels = img.load() # create the pixel map
# Load the hardcoded grayscale array
from grayscale import getGrayscaleArray
colMat = getGrayscaleArray()
for i in range(len(pixs)/2):
row = i % 160
column = i/160
hex1 = int(pixs[i*2], 16)
# Division by 2 because: http://en.wikipedia.org/wiki/List_of_video_game_console_palettes
hex2 = int(pixs[i*2+1], 16)/2
temp = int(colMat[hex2, hex1])
pixels[row, column] = (temp, temp, temp)
img.show()
# Example 1: take one PIL.Image file, preprocess and get its pixel array
from preprocessing import preprocessImage
img2 = preprocessImage(img)
pixels = img2.load()
# Example 2: take a sequence that DOESN'T contain actions and preprocess the images in-place
from preprocessing import preprocessSequenceWithActions
sequence = [img.copy(), 45, img.copy(), 'thisdoesntmatter', img.copy(), 'this neither'] #,deepcopy(img),'thisdoesntmatter',deepcopy(img),deepcopy(img)]
sequence = preprocessSequenceWithActions(sequence)
# Example 3: take a sequence that DOES contain actions and preprocess the images in-place
from preprocessing import preprocessSequenceNoActions
sequence = [img.copy(), img.copy(), img.copy()]
sequence = preprocessSequenceNoActions(sequence)
|
rickyHong/dqn-repl
|
sandbox/old/imageTest.py
|
Python
|
gpl-3.0
| 1,421
|
# coding: UTF-8
"""
Tests for support views.
"""
import itertools
import json
import re
from datetime import datetime, timedelta
import ddt
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from django.db.models import signals
from nose.plugins.attrib import attr
from pytz import UTC
from common.test.utils import disable_signal
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.verify_student.models import VerificationDeadline
from student.models import ENROLLED_TO_ENROLLED, CourseEnrollment, ManualEnrollmentAudit
from student.roles import GlobalStaff, SupportStaffRole
from student.tests.factories import TEST_PASSWORD, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SupportViewTestCase(ModuleStoreTestCase):
"""
Base class for support view tests.
"""
USERNAME = "support"
EMAIL = "support@example.com"
PASSWORD = "support"
def setUp(self):
"""Create a user and log in. """
super(SupportViewTestCase, self).setUp()
self.user = UserFactory(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.course = CourseFactory.create()
success = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(success, msg="Could not log in")
class SupportViewManageUserTests(SupportViewTestCase):
"""
Base class for support view tests.
"""
def setUp(self):
"""Make the user support staff"""
super(SupportViewManageUserTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_get_support_form(self):
"""
Tests Support View to return Manage User Form
"""
url = reverse('support:manage_user')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get_form_with_user_info(self):
"""
Tests Support View to return Manage User Form
with user info
"""
url = reverse('support:manage_user_detail') + self.user.username
response = self.client.get(url)
data = json.loads(response.content)
self.assertEqual(data['username'], self.user.username)
def test_disable_user_account(self):
"""
Tests Support View to disable the user account
"""
test_user = UserFactory(
username='foobar', email='foobar@foobar.com', password='foobar'
)
url = reverse('support:manage_user_detail') + test_user.username
response = self.client.post(url, data={
'username_or_email': test_user.username
})
data = json.loads(response.content)
self.assertEqual(data['success_msg'], 'User Disabled Successfully')
test_user = User.objects.get(username=test_user.username, email=test_user.email)
self.assertEqual(test_user.has_usable_password(), False)
@attr(shard=3)
@ddt.ddt
class SupportViewAccessTests(SupportViewTestCase):
"""
Tests for access control of support views.
"""
@ddt.data(*(
(url_name, role, has_access)
for (url_name, (role, has_access))
in itertools.product((
'support:index',
'support:certificates',
'support:refund',
'support:enrollment',
'support:enrollment_list',
'support:manage_user',
'support:manage_user_detail',
), (
(GlobalStaff, True),
(SupportStaffRole, True),
(None, False)
))
))
@ddt.unpack
def test_access(self, url_name, role, has_access):
if role is not None:
role().add_users(self.user)
url = reverse(url_name)
response = self.client.get(url)
if has_access:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 403)
@ddt.data(
"support:index",
"support:certificates",
"support:refund",
"support:enrollment",
"support:enrollment_list",
"support:manage_user",
"support:manage_user_detail",
)
def test_require_login(self, url_name):
url = reverse(url_name)
# Log out then try to retrieve the page
self.client.logout()
response = self.client.get(url)
# Expect a redirect to the login page
redirect_url = "{login_url}?next={original_url}".format(
login_url=reverse("signin_user"),
original_url=url,
)
self.assertRedirects(response, redirect_url)
class SupportViewIndexTests(SupportViewTestCase):
"""
Tests for the support index view.
"""
EXPECTED_URL_NAMES = [
"support:certificates",
"support:refund",
]
def setUp(self):
"""Make the user support staff. """
super(SupportViewIndexTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_index(self):
response = self.client.get(reverse("support:index"))
self.assertContains(response, "Support")
# Check that all the expected links appear on the index page.
for url_name in self.EXPECTED_URL_NAMES:
self.assertContains(response, reverse(url_name))
class SupportViewCertificatesTests(SupportViewTestCase):
"""
Tests for the certificates support view.
"""
def setUp(self):
"""Make the user support staff. """
super(SupportViewCertificatesTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_certificates_no_filter(self):
# Check that an empty initial filter is passed to the JavaScript client correctly.
response = self.client.get(reverse("support:certificates"))
self.assertContains(response, "userFilter: ''")
def test_certificates_with_user_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com"
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
def test_certificates_along_with_course_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com&course_id=" + unicode(self.course.id)
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
self.assertContains(response, "courseFilter: '" + unicode(self.course.id) + "'")
@ddt.ddt
class SupportViewEnrollmentsTests(SharedModuleStoreTestCase, SupportViewTestCase):
"""Tests for the enrollment support view."""
def setUp(self):
super(SupportViewEnrollmentsTests, self).setUp()
SupportStaffRole().add_users(self.user)
self.course = CourseFactory(display_name=u'teꜱᴛ')
self.student = UserFactory.create(username='student', email='test@example.com', password='test')
for mode in (
CourseMode.AUDIT, CourseMode.PROFESSIONAL, CourseMode.CREDIT_MODE,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.VERIFIED, CourseMode.HONOR
):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id) # pylint: disable=no-member
self.verification_deadline = VerificationDeadline(
course_key=self.course.id, # pylint: disable=no-member
deadline=datetime.now(UTC) + timedelta(days=365)
)
self.verification_deadline.save()
CourseEnrollmentFactory.create(mode=CourseMode.AUDIT, user=self.student, course_id=self.course.id) # pylint: disable=no-member
self.url = reverse('support:enrollment_list', kwargs={'username_or_email': self.student.username})
def assert_enrollment(self, mode):
"""
Assert that the student's enrollment has the correct mode.
"""
enrollment = CourseEnrollment.get_enrollment(self.student, self.course.id) # pylint: disable=no-member
self.assertEqual(enrollment.mode, mode)
@ddt.data('username', 'email')
def test_get_enrollments(self, search_string_type):
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertDictContainsSubset({
'mode': CourseMode.AUDIT,
'manual_enrollment': {},
'user': self.student.username,
'course_id': unicode(self.course.id), # pylint: disable=no-member
'is_active': True,
'verified_upgrade_deadline': None,
}, data[0])
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.HONOR,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.PROFESSIONAL},
{mode['slug'] for mode in data[0]['course_modes']}
)
def test_get_manual_enrollment_history(self):
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.user,
self.student.email,
ENROLLED_TO_ENROLLED,
'Financial Assistance',
CourseEnrollment.objects.get(course_id=self.course.id, user=self.student) # pylint: disable=no-member
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset({
'enrolled_by': self.user.email,
'reason': 'Financial Assistance',
}, json.loads(response.content)[0]['manual_enrollment'])
@disable_signal(signals, 'post_save')
@ddt.data('username', 'email')
def test_change_enrollment(self, search_string_type):
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': 'Financial Assistance'
})
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(CourseMode.VERIFIED)
@ddt.data(
({}, r"The field \"'\w+'\" is required."), # The double quoting goes away in Django 2.0.1
({'course_id': 'bad course key'}, 'Could not parse course key.'),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, 'Could not find enrollment for user'),
({
'course_id': None,
'old_mode': CourseMode.HONOR,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, r'User \w+ is not enrolled with mode ' + CourseMode.HONOR),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.CREDIT_MODE,
'reason': 'Enrollment cannot be changed to credit mode'
}, '')
)
@ddt.unpack
def test_change_enrollment_bad_data(self, data, error_message):
# `self` isn't available from within the DDT declaration, so
# assign the course ID here
if 'course_id' in data and data['course_id'] is None:
data['course_id'] = unicode(self.course.id) # pylint: disable=no-member
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 400)
self.assertIsNotNone(re.match(error_message, response.content))
self.assert_enrollment(CourseMode.AUDIT)
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
@disable_signal(signals, 'post_save')
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_all_modes(self, new_mode):
""" Verify support can changed the enrollment to all available modes
except credit. """
self.assert_update_enrollment('username', new_mode)
@disable_signal(signals, 'post_save')
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_ended_course(self, new_mode):
""" Verify support can changed the enrollment of archived course. """
self.set_course_end_date_and_expiry()
self.assert_update_enrollment('username', new_mode)
def test_update_enrollment_with_credit_mode_throws_error(self):
""" Verify that enrollment cannot be changed to credit mode. """
self.assert_update_enrollment('username', CourseMode.CREDIT_MODE)
@ddt.data('username', 'email')
def test_get_enrollments_with_expired_mode(self, search_string_type):
""" Verify that page can get the all modes with archived course. """
self.set_course_end_date_and_expiry()
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self._assert_generated_modes(response)
@disable_signal(signals, 'post_save')
@ddt.data('username', 'email')
def test_update_enrollments_with_expired_mode(self, search_string_type):
""" Verify that enrollment can be updated to verified mode. """
self.set_course_end_date_and_expiry()
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_update_enrollment(search_string_type, CourseMode.VERIFIED)
def _assert_generated_modes(self, response):
"""Dry method to generate course modes dict and test with response data."""
modes = CourseMode.modes_for_course(self.course.id, include_expired=True) # pylint: disable=no-member
modes_data = []
for mode in modes:
expiry = mode.expiration_datetime.strftime('%Y-%m-%dT%H:%M:%SZ') if mode.expiration_datetime else None
modes_data.append({
'sku': mode.sku,
'expiration_datetime': expiry,
'name': mode.name,
'currency': mode.currency,
'bulk_sku': mode.bulk_sku,
'min_price': mode.min_price,
'suggested_prices': mode.suggested_prices,
'slug': mode.slug,
'description': mode.description
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(
modes_data,
data[0]['course_modes']
)
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.PROFESSIONAL, CourseMode.HONOR},
{mode['slug'] for mode in data[0]['course_modes']}
)
def assert_update_enrollment(self, search_string_type, new_mode):
""" Dry method to update the enrollment and assert response."""
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': new_mode,
'reason': 'Financial Assistance'
})
# Enrollment cannot be changed to credit mode.
if new_mode == CourseMode.CREDIT_MODE:
self.assertEqual(response.status_code, 400)
else:
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(new_mode)
def set_course_end_date_and_expiry(self):
""" Set the course-end date and expire its verified mode."""
self.course.start = datetime(year=1970, month=1, day=1, tzinfo=UTC)
self.course.end = datetime(year=1970, month=1, day=10, tzinfo=UTC)
# change verified mode expiry.
verified_mode = CourseMode.objects.get(
course_id=self.course.id, # pylint: disable=no-member
mode_slug=CourseMode.VERIFIED
)
verified_mode.expiration_datetime = datetime(year=1970, month=1, day=9, tzinfo=UTC)
verified_mode.save()
|
Stanford-Online/edx-platform
|
lms/djangoapps/support/tests/test_views.py
|
Python
|
agpl-3.0
| 17,238
|
"""
:class:`~xblock.field_data.FieldData` subclasses used by the LMS
"""
from xblock.field_data import ReadOnlyFieldData, SplitFieldData
from xblock.fields import Scope
def lms_field_data(authored_data, student_data):
"""
Returns a new :class:`~xblock.field_data.FieldData` that
reads all UserScope.ONE and UserScope.ALL fields from `student_data`
and all UserScope.NONE fields from `authored_data`. It also prevents
writing to `authored_data`.
"""
authored_data = ReadOnlyFieldData(authored_data)
return SplitFieldData({
Scope.content: authored_data,
Scope.settings: authored_data,
Scope.parent: authored_data,
Scope.children: authored_data,
Scope.user_state_summary: student_data,
Scope.user_state: student_data,
Scope.user_info: student_data,
Scope.preferences: student_data,
})
|
PepperPD/edx-pepper-platform
|
lms/xblock/field_data.py
|
Python
|
agpl-3.0
| 885
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import uuid
from sockjs.tornado import SockJSRouter
from flask import Flask, g, request, session, Blueprint
from flask.ext.login import LoginManager, current_user
from flask.ext.principal import Principal, Permission, RoleNeed, identity_loaded, UserNeed
from flask.ext.babel import Babel, gettext, ngettext
from flask.ext.assets import Environment, Bundle
from babel import Locale
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from collections import defaultdict
import os
import logging
import logging.config
import atexit
import signal
SUCCESS = {}
NO_CONTENT = ("", 204)
app = Flask("octoprint")
assets = None
babel = None
debug = False
printer = None
printerProfileManager = None
fileManager = None
slicingManager = None
analysisQueue = None
userManager = None
eventManager = None
loginManager = None
pluginManager = None
appSessionManager = None
pluginLifecycleManager = None
principals = Principal(app)
admin_permission = Permission(RoleNeed("admin"))
user_permission = Permission(RoleNeed("user"))
# only import the octoprint stuff down here, as it might depend on things defined above to be initialized already
from octoprint.printer import get_connection_options
from octoprint.printer.profile import PrinterProfileManager
from octoprint.printer.standard import Printer
from octoprint.settings import settings
import octoprint.users as users
import octoprint.events as events
import octoprint.plugin
import octoprint.timelapse
import octoprint._version
import octoprint.util
import octoprint.filemanager.storage
import octoprint.filemanager.analysis
import octoprint.slicing
from . import util
UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)
versions = octoprint._version.get_versions()
VERSION = versions['version']
BRANCH = versions['branch'] if 'branch' in versions else None
DISPLAY_VERSION = "%s (%s branch)" % (VERSION, BRANCH) if BRANCH else VERSION
del versions
LOCALES = []
LANGUAGES = set()
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = load_user(identity.id)
if user is None:
return
identity.provides.add(UserNeed(user.get_name()))
if user.is_user():
identity.provides.add(RoleNeed("user"))
if user.is_admin():
identity.provides.add(RoleNeed("admin"))
def load_user(id):
if id == "_api":
return users.ApiUser()
if session and "usersession.id" in session:
sessionid = session["usersession.id"]
else:
sessionid = None
if userManager is not None:
if sessionid:
return userManager.findUser(username=id, session=sessionid)
else:
return userManager.findUser(username=id)
return users.DummyUser()
#~~ startup code
class Server():
def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False, logConf=None):
self._configfile = configfile
self._basedir = basedir
self._host = host
self._port = port
self._debug = debug
self._allowRoot = allowRoot
self._logConf = logConf
self._server = None
self._logger = None
self._lifecycle_callbacks = defaultdict(list)
self._template_searchpaths = []
def run(self):
if not self._allowRoot:
self._check_for_root()
global app
global babel
global printer
global printerProfileManager
global fileManager
global slicingManager
global analysisQueue
global userManager
global eventManager
global loginManager
global pluginManager
global appSessionManager
global pluginLifecycleManager
global debug
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
import sys
debug = self._debug
# first initialize the settings singleton and make sure it uses given configfile and basedir if available
s = settings(init=True, basedir=self._basedir, configfile=self._configfile)
# then monkey patch a bunch of stuff
util.tornado.fix_ioloop_scheduling()
util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])
# setup app
self._setup_app()
# setup i18n
self._setup_i18n(app)
# then initialize logging
self._setup_logging(self._debug, self._logConf)
self._logger = logging.getLogger(__name__)
def exception_logger(exc_type, exc_value, exc_tb):
self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
sys.excepthook = exception_logger
self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)
# then initialize the plugin manager
pluginManager = octoprint.plugin.plugin_manager(init=True)
printerProfileManager = PrinterProfileManager()
eventManager = events.eventManager()
analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
storage_managers = dict()
storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
printer = Printer(fileManager, analysisQueue, printerProfileManager)
appSessionManager = util.flask.AppSessionManager()
pluginLifecycleManager = LifecycleManager(pluginManager)
def octoprint_plugin_inject_factory(name, implementation):
if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
return None
return dict(
plugin_manager=pluginManager,
printer_profile_manager=printerProfileManager,
event_bus=eventManager,
analysis_queue=analysisQueue,
slicing_manager=slicingManager,
file_manager=fileManager,
printer=printer,
app_session_manager=appSessionManager,
plugin_lifecycle_manager=pluginLifecycleManager,
data_folder=os.path.join(settings().getBaseFolder("data"), name)
)
def settings_plugin_inject_factory(name, implementation):
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
return None
default_settings = implementation.get_settings_defaults()
get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
plugin_settings = octoprint.plugin.plugin_settings(name,
defaults=default_settings,
get_preprocessors=get_preprocessors,
set_preprocessors=set_preprocessors)
return dict(settings=plugin_settings)
def settings_plugin_config_migration(name, implementation):
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
return
settings_version = implementation.get_settings_version()
settings_migrator = implementation.on_settings_migrate
if settings_version is not None and settings_migrator is not None:
stored_version = implementation._settings.get_int(["_config_version"])
if stored_version is None or stored_version < settings_version:
settings_migrator(settings_version, stored_version)
implementation._settings.set_int(["_config_version"], settings_version)
implementation._settings.save()
implementation.on_settings_initialized()
pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
pluginManager.initialize_implementations()
settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
for implementation in settingsPlugins:
try:
settings_plugin_config_migration(implementation._identifier, implementation)
except:
self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))
pluginManager.implementation_post_inits=[settings_plugin_config_migration]
pluginManager.log_all_plugins()
# initialize file manager and register it for changes in the registered plugins
fileManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())
# initialize slicing manager and register it for changes in the registered plugins
slicingManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())
# setup jinja2
self._setup_jinja2()
def template_enabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._register_additional_template_plugin(plugin.implementation)
def template_disabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._unregister_additional_template_plugin(plugin.implementation)
pluginLifecycleManager.add_callback("enabled", template_enabled)
pluginLifecycleManager.add_callback("disabled", template_disabled)
# setup assets
self._setup_assets()
# configure timelapse
octoprint.timelapse.configureTimelapse()
# setup command triggers
events.CommandTrigger(printer)
if self._debug:
events.DebugEventListener()
# setup access control
if s.getBoolean(["accessControl", "enabled"]):
userManagerName = s.get(["accessControl", "userManager"])
try:
clazz = octoprint.util.get_class(userManagerName)
userManager = clazz()
except AttributeError, e:
self._logger.exception("Could not instantiate user manager %s, will run with accessControl disabled!" % userManagerName)
app.wsgi_app = util.ReverseProxied(
app.wsgi_app,
s.get(["server", "reverseProxy", "prefixHeader"]),
s.get(["server", "reverseProxy", "schemeHeader"]),
s.get(["server", "reverseProxy", "hostHeader"]),
s.get(["server", "reverseProxy", "prefixFallback"]),
s.get(["server", "reverseProxy", "schemeFallback"]),
s.get(["server", "reverseProxy", "hostFallback"])
)
secret_key = s.get(["server", "secretKey"])
if not secret_key:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
secret_key = "".join(choice(chars) for _ in xrange(32))
s.set(["server", "secretKey"], secret_key)
s.save()
app.secret_key = secret_key
loginManager = LoginManager()
loginManager.session_protection = "strong"
loginManager.user_callback = load_user
if userManager is None:
loginManager.anonymous_user = users.DummyUser
principals.identity_loaders.appendleft(users.dummy_identity_loader)
loginManager.init_app(app)
if self._host is None:
self._host = s.get(["server", "host"])
if self._port is None:
self._port = s.getInt(["server", "port"])
app.debug = self._debug
# register API blueprint
self._setup_blueprints()
## Tornado initialization starts here
ioloop = IOLoop()
ioloop.install()
self._router = SockJSRouter(self._create_socket_connection, "/sockjs")
upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))
server_routes = self._router.urls + [
# various downloads
(r"/downloads/timelapse/([^/]*\.mpg)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("timelapse"), as_attachment=True)),
(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("uploads"), as_attachment=True, path_validation=util.tornado.path_validation_factory(lambda path: not os.path.basename(path).startswith("."), status_code=404))),
(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("logs"), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))),
# camera snapshot
(r"/downloads/camera/current", util.tornado.UrlForwardHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
# generated webassets
(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets")))
]
for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
try:
result = hook(list(server_routes))
except:
self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not isinstance(entry[0], basestring):
continue
if not isinstance(entry[2], dict):
continue
route, handler, kwargs = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
server_routes.append((route, handler, kwargs))
server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))
self._tornado_app = Application(server_routes)
max_body_sizes = [
("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
("POST", r"/api/languages", 5 * 1024 * 1024)
]
# allow plugins to extend allowed maximum body sizes
for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
try:
result = hook(list(max_body_sizes))
except:
self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
continue
if not isinstance(entry[2], int):
continue
method, route, size = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
max_body_sizes.append((method, route, size))
self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
self._server.listen(self._port, address=self._host)
eventManager.fire(events.Events.STARTUP)
if s.getBoolean(["serial", "autoconnect"]):
(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
printer_profile = printerProfileManager.get_default()
connectionOptions = get_connection_options()
if port in connectionOptions["ports"]:
printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")
# start up watchdogs
if s.getBoolean(["feature", "pollWatched"]):
# use less performant polling observer if explicitely configured
observer = PollingObserver()
else:
# use os default
observer = Observer()
observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
observer.start()
# run our startup plugins
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_startup",
args=(self._host, self._port))
def call_on_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_startup(self._host, self._port)
pluginLifecycleManager.add_callback("enabled", call_on_startup)
# prepare our after startup function
def on_after_startup():
self._logger.info("Listening on http://%s:%d" % (self._host, self._port))
# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
# create a single use thread in which to perform our after-startup-tasks, start that and hand back
# control to the ioloop
def work():
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_after_startup")
def call_on_after_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_after_startup()
pluginLifecycleManager.add_callback("enabled", call_on_after_startup)
import threading
threading.Thread(target=work).start()
ioloop.add_callback(on_after_startup)
# prepare our shutdown function
def on_shutdown():
# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
# on all registered ShutdownPlugins
self._logger.info("Shutting down...")
observer.stop()
observer.join()
octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
"on_shutdown")
self._logger.info("Goodbye!")
atexit.register(on_shutdown)
def sigterm_handler(*args, **kwargs):
# will stop tornado on SIGTERM, making the program exit cleanly
def shutdown_tornado():
ioloop.stop()
ioloop.add_callback_from_signal(shutdown_tornado)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
# this is the main loop - as long as tornado is running, OctoPrint is running
ioloop.start()
except (KeyboardInterrupt, SystemExit):
pass
except:
self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
self._logger.exception("Stacktrace follows:")
def _create_socket_connection(self, session):
global printer, fileManager, analysisQueue, userManager, eventManager
return util.sockjs.PrinterStateConnection(printer, fileManager, analysisQueue, userManager, eventManager, pluginManager, session)
def _check_for_root(self):
if "geteuid" in dir(os) and os.geteuid() == 0:
exit("You should not run OctoPrint as root!")
def _get_locale(self):
global LANGUAGES
if "l10n" in request.values:
return Locale.negotiate([request.values["l10n"]], LANGUAGES)
if hasattr(g, "identity") and g.identity and userManager is not None:
userid = g.identity.id
try:
user_language = userManager.getUserSetting(userid, ("interface", "language"))
if user_language is not None and not user_language == "_default":
return Locale.negotiate([user_language], LANGUAGES)
except octoprint.users.UnknownUser:
pass
default_language = settings().get(["appearance", "defaultLanguage"])
if default_language is not None and not default_language == "_default" and default_language in LANGUAGES:
return Locale.negotiate([default_language], LANGUAGES)
return request.accept_languages.best_match(LANGUAGES)
def _setup_logging(self, debug, logConf=None):
defaultConfig = {
"version": 1,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file": {
"class": "logging.handlers.TimedRotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"when": "D",
"backupCount": "1",
"filename": os.path.join(settings().getBaseFolder("logs"), "octoprint.log")
},
"serialFile": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"maxBytes": 2 * 1024 * 1024, # let's limit the serial log to 2MB in size
"filename": os.path.join(settings().getBaseFolder("logs"), "serial.log")
}
},
"loggers": {
"SERIAL": {
"level": "CRITICAL",
"handlers": ["serialFile"],
"propagate": False
},
"tornado.application": {
"level": "INFO"
},
"tornado.general": {
"level": "INFO"
},
"octoprint.server.util.flask": {
"level": "WARN"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
if debug:
defaultConfig["root"]["level"] = "DEBUG"
if logConf is None:
logConf = os.path.join(settings().getBaseFolder("base"), "logging.yaml")
configFromFile = {}
if os.path.exists(logConf) and os.path.isfile(logConf):
import yaml
with open(logConf, "r") as f:
configFromFile = yaml.safe_load(f)
config = octoprint.util.dict_merge(defaultConfig, configFromFile)
logging.config.dictConfig(config)
logging.captureWarnings(True)
import warnings
warnings.simplefilter("always")
if settings().getBoolean(["serial", "log"]):
# enable debug logging to serial.log
logging.getLogger("SERIAL").setLevel(logging.DEBUG)
logging.getLogger("SERIAL").debug("Enabling serial logging")
def _setup_app(self):
@app.before_request
def before_request():
g.locale = self._get_locale()
@app.after_request
def after_request(response):
# send no-cache headers with all POST responses
if request.method == "POST":
response.cache_control.no_cache = True
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return response
def _setup_i18n(self, app):
global babel
global LOCALES
global LANGUAGES
babel = Babel(app)
def get_available_locale_identifiers(locales):
result = set()
# add available translations
for locale in locales:
result.add(locale.language)
if locale.territory:
# if a territory is specified, add that too
result.add("%s_%s" % (locale.language, locale.territory))
return result
LOCALES = babel.list_translations()
LANGUAGES = get_available_locale_identifiers(LOCALES)
@babel.localeselector
def get_locale():
return self._get_locale()
def _setup_jinja2(self):
app.jinja_env.add_extension("jinja2.ext.do")
# configure additional template folders for jinja2
import jinja2
filesystem_loader = jinja2.FileSystemLoader([])
filesystem_loader.searchpath = self._template_searchpaths
jinja_loader = jinja2.ChoiceLoader([
app.jinja_loader,
filesystem_loader
])
app.jinja_loader = jinja_loader
del jinja2
self._register_template_plugins()
def _register_template_plugins(self):
template_plugins = pluginManager.get_implementations(octoprint.plugin.TemplatePlugin)
for plugin in template_plugins:
try:
self._register_additional_template_plugin(plugin)
except:
self._logger.exception("Error while trying to register templates of plugin {}, ignoring it".format(plugin._identifier))
def _register_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and not folder in self._template_searchpaths:
self._template_searchpaths.append(folder)
def _unregister_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and folder in self._template_searchpaths:
self._template_searchpaths.remove(folder)
def _setup_blueprints(self):
from octoprint.server.api import api
from octoprint.server.apps import apps, clear_registered_app
import octoprint.server.views
app.register_blueprint(api, url_prefix="/api")
app.register_blueprint(apps, url_prefix="/apps")
# also register any blueprints defined in BlueprintPlugins
self._register_blueprint_plugins()
# and register a blueprint for serving the static files of asset plugins which are not blueprint plugins themselves
self._register_asset_plugins()
global pluginLifecycleManager
def clear_apps(name, plugin):
clear_registered_app()
pluginLifecycleManager.add_callback("enabled", clear_apps)
pluginLifecycleManager.add_callback("disabled", clear_apps)
def _register_blueprint_plugins(self):
blueprint_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.BlueprintPlugin)
for plugin in blueprint_plugins:
try:
self._register_blueprint_plugin(plugin)
except:
self._logger.exception("Error while registering blueprint of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_asset_plugins(self):
asset_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.AssetPlugin)
for plugin in asset_plugins:
if isinstance(plugin, octoprint.plugin.BlueprintPlugin):
continue
try:
self._register_asset_plugin(plugin)
except:
self._logger.exception("Error while registering assets of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_blueprint_plugin(self, plugin):
name = plugin._identifier
blueprint = plugin.get_blueprint()
if blueprint is None:
return
if plugin.is_blueprint_protected():
from octoprint.server.util import apiKeyRequestHandler, corsResponseHandler
blueprint.before_request(apiKeyRequestHandler)
blueprint.after_request(corsResponseHandler)
url_prefix = "/plugin/{name}".format(name=name)
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered API of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _register_asset_plugin(self, plugin):
name = plugin._identifier
url_prefix = "/plugin/{name}".format(name=name)
blueprint = Blueprint("plugin." + name, name, static_folder=plugin.get_asset_folder())
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered assets of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _setup_assets(self):
global app
global assets
global pluginManager
util.flask.fix_webassets_cache()
util.flask.fix_webassets_filtertool()
base_folder = settings().getBaseFolder("generated")
# clean the folder
if settings().getBoolean(["devel", "webassets", "clean_on_startup"]):
import shutil
import errno
import sys
for entry in ("webassets", ".webassets-cache"):
path = os.path.join(base_folder, entry)
# delete path if it exists
if os.path.isdir(path):
try:
self._logger.debug("Deleting {path}...".format(**locals()))
shutil.rmtree(path)
except:
self._logger.exception("Error while trying to delete {path}, leaving it alone".format(**locals()))
continue
# re-create path
self._logger.debug("Creating {path}...".format(**locals()))
error_text = "Error while trying to re-create {path}, that might cause errors with the webassets cache".format(**locals())
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EACCES:
# that might be caused by the user still having the folder open somewhere, let's try again after
# waiting a bit
import time
for n in xrange(3):
time.sleep(0.5)
self._logger.debug("Creating {path}: Retry #{retry} after {time}s".format(path=path, retry=n+1, time=(n + 1)*0.5))
try:
os.makedirs(path)
break
except:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Ignored error while creating directory {path}".format(**locals()))
pass
else:
# this will only get executed if we never did
# successfully execute makedirs above
self._logger.exception(error_text)
continue
else:
# not an access error, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
except:
# not an OSError, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
self._logger.info("Reset webasset folder {path}...".format(**locals()))
AdjustedEnvironment = type(Environment)(Environment.__name__, (Environment,), dict(
resolver_class=util.flask.PluginAssetResolver
))
class CustomDirectoryEnvironment(AdjustedEnvironment):
@property
def directory(self):
return base_folder
assets = CustomDirectoryEnvironment(app)
assets.debug = not settings().getBoolean(["devel", "webassets", "bundle"])
UpdaterType = type(util.flask.SettingsCheckUpdater)(util.flask.SettingsCheckUpdater.__name__, (util.flask.SettingsCheckUpdater,), dict(
updater=assets.updater
))
assets.updater = UpdaterType
enable_gcodeviewer = settings().getBoolean(["gcodeViewer", "enabled"])
preferred_stylesheet = settings().get(["devel", "stylesheet"])
dynamic_assets = util.flask.collect_plugin_assets(
enable_gcodeviewer=enable_gcodeviewer,
preferred_stylesheet=preferred_stylesheet
)
js_libs = [
"js/lib/jquery/jquery.min.js",
"js/lib/modernizr.custom.js",
"js/lib/lodash.min.js",
"js/lib/sprintf.min.js",
"js/lib/knockout.js",
"js/lib/knockout.mapping-latest.js",
"js/lib/babel.js",
"js/lib/avltree.js",
"js/lib/bootstrap/bootstrap.js",
"js/lib/bootstrap/bootstrap-modalmanager.js",
"js/lib/bootstrap/bootstrap-modal.js",
"js/lib/bootstrap/bootstrap-slider.js",
"js/lib/bootstrap/bootstrap-tabdrop.js",
"js/lib/jquery/jquery.ui.core.js",
"js/lib/jquery/jquery.ui.widget.js",
"js/lib/jquery/jquery.ui.mouse.js",
"js/lib/jquery/jquery.flot.js",
"js/lib/jquery/jquery.iframe-transport.js",
"js/lib/jquery/jquery.fileupload.js",
"js/lib/jquery/jquery.slimscroll.min.js",
"js/lib/jquery/jquery.qrcode.min.js",
"js/lib/moment-with-locales.min.js",
"js/lib/pusher.color.min.js",
"js/lib/detectmobilebrowser.js",
"js/lib/md5.min.js",
"js/lib/pnotify.min.js",
"js/lib/bootstrap-slider-knockout-binding.js",
"js/lib/loglevel.min.js",
"js/lib/sockjs-0.3.4.min.js"
]
js_app = dynamic_assets["js"] + [
"js/app/dataupdater.js",
"js/app/helpers.js",
"js/app/main.js",
]
css_libs = [
"css/bootstrap.min.css",
"css/bootstrap-modal.css",
"css/bootstrap-slider.css",
"css/bootstrap-tabdrop.css",
"css/font-awesome.min.css",
"css/jquery.fileupload-ui.css",
"css/pnotify.min.css"
]
css_app = list(dynamic_assets["css"])
if len(css_app) == 0:
css_app = ["empty"]
less_app = list(dynamic_assets["less"])
if len(less_app) == 0:
less_app = ["empty"]
from webassets.filter import register_filter, Filter
from webassets.filter.cssrewrite.base import PatternRewriter
import re
class LessImportRewrite(PatternRewriter):
name = "less_importrewrite"
patterns = {
"import_rewrite": re.compile("(@import(\s+\(.*\))?\s+)\"(.*)\";")
}
def import_rewrite(self, m):
import_with_options = m.group(1)
import_url = m.group(3)
if not import_url.startswith("http:") and not import_url.startswith("https:") and not import_url.startswith("/"):
import_url = "../less/" + import_url
return "{import_with_options}\"{import_url}\";".format(**locals())
class JsDelimiterBundle(Filter):
name = "js_delimiter_bundler"
options = {}
def input(self, _in, out, **kwargs):
out.write(_in.read())
out.write("\n;\n")
register_filter(LessImportRewrite)
register_filter(JsDelimiterBundle)
js_libs_bundle = Bundle(*js_libs, output="webassets/packed_libs.js", filters="js_delimiter_bundler")
if settings().getBoolean(["devel", "webassets", "minify"]):
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="rjsmin, js_delimiter_bundler")
else:
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="js_delimiter_bundler")
css_libs_bundle = Bundle(*css_libs, output="webassets/packed_libs.css")
css_app_bundle = Bundle(*css_app, output="webassets/packed_app.css", filters="cssrewrite")
all_less_bundle = Bundle(*less_app, output="webassets/packed_app.less", filters="cssrewrite, less_importrewrite")
assets.register("js_libs", js_libs_bundle)
assets.register("js_app", js_app_bundle)
assets.register("css_libs", css_libs_bundle)
assets.register("css_app", css_app_bundle)
assets.register("less_app", all_less_bundle)
class LifecycleManager(object):
def __init__(self, plugin_manager):
self._plugin_manager = plugin_manager
self._plugin_lifecycle_callbacks = defaultdict(list)
self._logger = logging.getLogger(__name__)
def on_plugin_event_factory(lifecycle_event):
def on_plugin_event(name, plugin):
self.on_plugin_event(lifecycle_event, name, plugin)
return on_plugin_event
self._plugin_manager.on_plugin_loaded = on_plugin_event_factory("loaded")
self._plugin_manager.on_plugin_unloaded = on_plugin_event_factory("unloaded")
self._plugin_manager.on_plugin_activated = on_plugin_event_factory("activated")
self._plugin_manager.on_plugin_deactivated = on_plugin_event_factory("deactivated")
self._plugin_manager.on_plugin_enabled = on_plugin_event_factory("enabled")
self._plugin_manager.on_plugin_disabled = on_plugin_event_factory("disabled")
def on_plugin_event(self, event, name, plugin):
for lifecycle_callback in self._plugin_lifecycle_callbacks[event]:
lifecycle_callback(name, plugin)
def add_callback(self, events, callback):
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
self._plugin_lifecycle_callbacks[event].append(callback)
def remove_callback(self, callback, events=None):
if events is None:
for event in self._plugin_lifecycle_callbacks:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
else:
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
if __name__ == "__main__":
server = Server()
server.run()
|
dragondgold/OctoPrint
|
src/octoprint/server/__init__.py
|
Python
|
agpl-3.0
| 34,524
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Test if global editor and global reader role has the permission to access
the workflow objects, owned by Admin.
"""
# T0D0: write tests for create, update, delete
from ggrc_workflows.models import Workflow
from ggrc_workflows.models import WorkflowPerson
from ggrc_workflows.models import TaskGroup
from ggrc_workflows.models import TaskGroupObject
from ggrc_workflows.models import TaskGroupTask
from ggrc_workflows.models import Cycle
from ggrc_workflows.models import CycleTaskGroup
from ggrc_workflows.models import CycleTaskGroupObjectTask
from integration.ggrc_workflows.roles import WorkflowRolesTestCase
class GlobalEditorReaderGetTest(WorkflowRolesTestCase):
""" Get workflow objects owned by another user
as global editor and global reader.
"""
def setUp(self):
# old-style class
WorkflowRolesTestCase.setUp(self)
def assert200_helper(self, response, message=None):
"""Helper that adds the info of the current user to the message.
"""
message = message or \
"Requests as user: '{}' Response returned {} instead of 200."\
.format(self.api.person_name, response.status_code)
self.assert200(response, message)
def test_get_obj_as_editor(self):
""" Get workflow object from draft workflow as a editor """
self._get_workflow_objects(self.users['editor'])
def test_get_active_obj_as_editor(self):
""" Get workflow object from active workflow as a editor """
self._get_active_workflow_objects(self.users['editor'])
def test_get_obj_as_reader(self):
""" Get workflow object from draft workflow as a reader """
self._get_workflow_objects(self.users['reader'])
def test_get_active_obj_as_reader(self):
""" Get workflow object from active workflow as a reader """
self._get_active_workflow_objects(self.users['reader'])
def _get_workflow_objects(self, user):
""" Helper method that runs tests for draft workflow
Args:
user: Person object
"""
self.api.set_user(user)
workflow_res = self.api.get(Workflow, self.workflow_obj.id)
self.assert200_helper(workflow_res)
task_group_res = self.api.get(TaskGroup, self.first_task_group.id)
self.assert200_helper(task_group_res)
task_group_object_res = self.api.get(
TaskGroupObject, self.first_task_group_object.id)
self.assert200_helper(task_group_object_res)
task_group_task_res = self.api.get(
TaskGroupTask, self.first_task_group_task.id)
self.assert200_helper(task_group_task_res)
workflow_person_res = self.api.get(
WorkflowPerson, self.first_workflow_person.id)
self.assert200_helper(workflow_person_res)
def _get_active_workflow_objects(self, user):
""" Helper method that runs tests for active workflow
Args:
user: Person object
"""
self.api.set_user(user)
self.workflow_res, self.workflow_obj = \
self.activate_workflow_with_cycle(self.workflow_obj)
self.get_first_objects()
workflow_res = self.api.get(Workflow, self.workflow_obj.id)
self.assert200_helper(workflow_res)
task_group_res = self.api.get(TaskGroup, self.first_task_group.id)
self.assert200_helper(task_group_res)
task_group_object_res = self.api.get(
TaskGroupObject, self.first_task_group_object.id)
self.assert200_helper(task_group_object_res)
task_group_task_res = self.api.get(
TaskGroupTask, self.first_task_group_task.id)
self.assert200_helper(task_group_task_res)
workflow_person_res = self.api.get(
WorkflowPerson, self.first_workflow_person.id)
self.assert200_helper(workflow_person_res)
cycle_obj = self.session.query(Cycle)\
.filter(Cycle.workflow_id == self.workflow_obj.id)\
.first()
cycle_res = self.api.get(
Cycle, cycle_obj.id)
self.assert200_helper(cycle_res)
cycle_task_group_obj = self.session.query(CycleTaskGroup)\
.filter(CycleTaskGroup.cycle_id == cycle_obj.id)\
.first()
cycle_task_group_res = self.api.get(
CycleTaskGroup, cycle_task_group_obj.id)
self.assert200_helper(cycle_task_group_res)
# cycle_object is cycle task group object task
cycle_object_obj =\
self.session.query(CycleTaskGroupObjectTask)\
.filter(
CycleTaskGroupObjectTask.cycle_task_group_id ==
cycle_task_group_obj.id)\
.first()
cycle_object_res = self.api.get(
CycleTaskGroupObjectTask, cycle_object_obj.id)
self.assert200_helper(cycle_object_res)
|
andrei-karalionak/ggrc-core
|
test/integration/ggrc_workflows/roles/test_global_editor_reader.py
|
Python
|
apache-2.0
| 4,622
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import local
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
ntt-sic/neutron
|
neutron/openstack/common/rpc/amqp.py
|
Python
|
apache-2.0
| 22,783
|
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.func_code.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
|
babble/babble
|
include/jython/Lib/test/test_trace.py
|
Python
|
apache-2.0
| 21,728
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import threading
from ggrc import db
from ggrc.app import app
from ggrc.models import create_db, drop_db
from wsgiref.simple_server import make_server
from ggrc import settings
use_migrations = True
def before_all(context):
context.base_url = 'http://localhost:9000'
create_db(use_migrations)
app.debug = False
app.testing = True
if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
from google.appengine.api import memcache
from google.appengine.ext import testbed
context.testbed = testbed.Testbed()
context.testbed.activate()
context.testbed.init_memcache_stub()
context.query_count = 0
def increment_query_count(conn, clauseelement, multiparams, params):
context.query_count += 1
from sqlalchemy import event
event.listen(db.engine, "before_execute", increment_query_count)
context.server = make_server('', 9000, app)
context.thread = threading.Thread(target=context.server.serve_forever)
context.thread.start()
def after_all(context):
context.server.shutdown()
context.thread.join()
db.session.remove()
drop_db(use_migrations)
if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
from google.appengine.api import memcache
from google.appengine.ext import testbed
context.testbed.deactivate()
|
andrei-karalionak/ggrc-core
|
src/service_specs/environment.py
|
Python
|
apache-2.0
| 1,401
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
do_not_default = do_not_default or set()
self._validate(bdm_dict)
# NOTE (ndipanov): Never default db fields
self.update(
dict((field, None)
for field in self._fields - do_not_default))
self.update(bdm_dict)
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in bdm_dict.iteritems())
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details="Some fields are invalid.")
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details="Some required fields are missing")
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details="Boot index is invalid.")
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = dict((fld, val) for fld, val in legacy_bdm.iteritems()
if fld in copy_over_fields)
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details="Unrecognized legacy format.")
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details="Invalid source_type field.")
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details="Missing device UUID.")
api_dict[source_type + '_id'] = device_uuid
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = dict((field, self.get(field))
for field in copy_over_fields if field in self)
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput as e:
raise exception.InvalidBDMFormat(
details="Device name empty or too long.")
if ' ' in value:
raise exception.InvalidBDMFormat(
details="Device name contains spaces.")
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput as e:
raise exception.InvalidBDMFormat(
details="Invalid volume_size.")
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and not
new_format_is_swap(bdm)):
return True
return False
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
if bdm['no_device']:
continue
# ebs volume case
if (bdm['volume_id'] or bdm['snapshot_id']):
ebs_devices.append(bdm['device_name'])
continue
virtual_name = bdm['virtual_name']
if not virtual_name:
continue
if is_swap_or_ephemeral(virtual_name):
mappings[virtual_name] = bdm['device_name']
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
nebs = 0
ebs_devices.sort()
for ebs in ebs_devices:
mappings['ebs%d' % nebs] = ebs
nebs += 1
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return strip_dev(mount_device) in block_device_list
|
TieWei/nova
|
nova/block_device.py
|
Python
|
apache-2.0
| 16,864
|
from amo.utils import chunked
from mkt.developers.tasks import generate_image_assets
from mkt.webapps.models import Webapp
def run():
for chunk in chunked(Webapp.objects.all(), 50):
for app in chunk:
try:
generate_image_assets.delay(app)
except Exception:
pass
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/migrations/465-generate-image-assets.py
|
Python
|
bsd-3-clause
| 331
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .constants import eStart, eError, eItsMe
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == eError:
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
archifix/settings
|
sublime/Packages/SublimeCodeIntel/libs/chardet/utf8prober.py
|
Python
|
mit
| 2,680
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import os
import json
from .util import u, slugify
import codecs
def get_tags_count(journal):
"""Returns a set of tuples (count, tag) for all tags present in the journal."""
# Astute reader: should the following line leave you as puzzled as me the first time
# I came across this construction, worry not and embrace the ensuing moment of enlightment.
tags = [tag
for entry in journal.entries
for tag in set(entry.tags)]
# To be read: [for entry in journal.entries: for tag in set(entry.tags): tag]
tag_counts = set([(tags.count(tag), tag) for tag in tags])
return tag_counts
def to_tag_list(journal):
"""Prints a list of all tags and the number of occurrences."""
tag_counts = get_tags_count(journal)
result = ""
if not tag_counts:
return '[No tags found in journal.]'
elif min(tag_counts)[0] == 0:
tag_counts = filter(lambda x: x[0] > 1, tag_counts)
result += '[Removed tags that appear only once.]\n'
result += "\n".join("{0:20} : {1}".format(tag, n) for n, tag in sorted(tag_counts, reverse=True))
return result
def to_json(journal):
"""Returns a JSON representation of the Journal."""
tags = get_tags_count(journal)
result = {
"tags": dict((tag, count) for count, tag in tags),
"entries": [e.to_dict() for e in journal.entries]
}
return json.dumps(result, indent=2)
def to_md(journal):
"""Returns a markdown representation of the Journal"""
out = []
year, month = -1, -1
for e in journal.entries:
if not e.date.year == year:
year = e.date.year
out.append(str(year))
out.append("=" * len(str(year)) + "\n")
if not e.date.month == month:
month = e.date.month
out.append(e.date.strftime("%B"))
out.append('-' * len(e.date.strftime("%B")) + "\n")
out.append(e.to_md())
result = "\n".join(out)
return result
def to_txt(journal):
"""Returns the complete text of the Journal."""
return journal.pprint()
def export(journal, format, output=None):
"""Exports the journal to various formats.
format should be one of json, txt, text, md, markdown.
If output is None, returns a unicode representation of the output.
If output is a directory, exports entries into individual files.
Otherwise, exports to the given output file.
"""
maps = {
"json": to_json,
"txt": to_txt,
"text": to_txt,
"md": to_md,
"markdown": to_md
}
if format not in maps:
return "[ERROR: can't export to '{0}'. Valid options are 'md', 'txt', and 'json']".format(format)
if output and os.path.isdir(output): # multiple files
return write_files(journal, output, format)
else:
content = maps[format](journal)
if output:
try:
with codecs.open(output, "w", "utf-8") as f:
f.write(content)
return "[Journal exported to {0}]".format(output)
except IOError as e:
return "[ERROR: {0} {1}]".format(e.filename, e.strerror)
else:
return content
def write_files(journal, path, format):
"""Turns your journal into separate files for each entry.
Format should be either json, md or txt."""
make_filename = lambda entry: e.date.strftime("%Y-%m-%d_{0}.{1}".format(slugify(u(e.title)), format))
for e in journal.entries:
full_path = os.path.join(path, make_filename(e))
if format == 'json':
content = json.dumps(e.to_dict(), indent=2) + "\n"
elif format in ('md', 'markdown'):
content = e.to_md()
elif format in ('txt', 'text'):
content = e.__unicode__()
with codecs.open(full_path, "w", "utf-8") as f:
f.write(content)
return "[Journal exported individual files in {0}]".format(path)
|
dzeban/jrnl
|
jrnl/exporters.py
|
Python
|
mit
| 4,046
|