max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
users/models.py | sh4rpy/foodgram | 0 | 12760451 | from django.contrib.auth import get_user_model
# Create your models here.
| 1.359375 | 1 |
Modules/Menus/Menu.py | Spraynard/Card-Game-Suite | 0 | 12760452 | <filename>Modules/Menus/Menu.py
class Menu(object):
def __init__(self):
gameList = None;
def _populateGameList(self):
pass
def chooseGame(self):
pass
def start(self):
pass
| 2.34375 | 2 |
basicswap/interface_btc.py | gerlofvanek/basicswap | 0 | 12760453 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2022 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import json
import time
import base64
import hashlib
import logging
import traceback
from io import BytesIO
from basicswap.contrib.test_framework import segwit_addr
from .util import (
dumpj,
toWIF,
ensure,
make_int,
b58encode,
decodeWif,
decodeAddress,
decodeScriptNum,
pubkeyToAddress,
getCompactSizeLen,
SerialiseNumCompact,
getWitnessElementLen)
from coincurve.keys import (
PrivateKey,
PublicKey)
from coincurve.dleag import (
verify_secp256k1_point)
from coincurve.ecdsaotves import (
ecdsaotves_enc_sign,
ecdsaotves_enc_verify,
ecdsaotves_dec_sig,
ecdsaotves_rec_enc_key)
from .ecc_util import (
ep,
pointToCPK, CPKToPoint,
getSecretInt,
b2h, i2b, b2i, i2h)
from .contrib.test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex)
from .contrib.test_framework.script import (
CScript, CScriptOp,
OP_IF, OP_ELSE, OP_ENDIF,
OP_0, OP_2,
OP_CHECKSIG,
OP_CHECKMULTISIG,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
SIGHASH_ALL,
SegwitV0SignatureHash,
hash160)
from .basicswap_util import (
TxLockTypes)
from .chainparams import CoinInterface, Coins
from .rpc import make_rpc_func, openrpc
SEQUENCE_LOCKTIME_GRANULARITY = 9 # 512 seconds
SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22)
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
def ensure_op(v, err_string='Bad opcode'):
ensure(v, err_string)
def findOutput(tx, script_pk):
for i in range(len(tx.vout)):
if tx.vout[i].scriptPubKey == script_pk:
return i
return None
def find_vout_for_address_from_txobj(tx_obj, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
for i in range(len(tx_obj["vout"])):
if any([addr == a for a in tx_obj["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid={}, addr={}".format(tx_obj['txid'], addr))
class BTCInterface(CoinInterface):
@staticmethod
def coin_type():
return Coins.BTC
@staticmethod
def COIN():
return COIN
@staticmethod
def exp() -> int:
return 8
@staticmethod
def nbk() -> int:
return 32
@staticmethod
def nbK() -> int: # No. of bytes requires to encode a public key
return 33
@staticmethod
def witnessScaleFactor() -> int:
return 4
@staticmethod
def txVersion() -> int:
return 2
@staticmethod
def getTxOutputValue(tx):
rv = 0
for output in tx.vout:
rv += output.nValue
return rv
@staticmethod
def compareFeeRates(a, b) -> bool:
return abs(a - b) < 20
@staticmethod
def xmr_swap_alock_spend_tx_vsize() -> int:
return 147
@staticmethod
def txoType():
return CTxOut
@staticmethod
def getExpectedSequence(lockType, lockVal):
assert(lockVal >= 1), 'Bad lockVal'
if lockType == TxLockTypes.SEQUENCE_LOCK_BLOCKS:
return lockVal
if lockType == TxLockTypes.SEQUENCE_LOCK_TIME:
secondsLocked = lockVal
# Ensure the locked time is never less than lockVal
if secondsLocked % (1 << SEQUENCE_LOCKTIME_GRANULARITY) != 0:
secondsLocked += (1 << SEQUENCE_LOCKTIME_GRANULARITY)
secondsLocked >>= SEQUENCE_LOCKTIME_GRANULARITY
return secondsLocked | SEQUENCE_LOCKTIME_TYPE_FLAG
raise ValueError('Unknown lock type')
@staticmethod
def decodeSequence(lock_value):
# Return the raw value
if lock_value & SEQUENCE_LOCKTIME_TYPE_FLAG:
return (lock_value & SEQUENCE_LOCKTIME_MASK) << SEQUENCE_LOCKTIME_GRANULARITY
return lock_value & SEQUENCE_LOCKTIME_MASK
def __init__(self, coin_settings, network, swap_client=None):
super().__init__(network)
self._rpc_host = coin_settings.get('rpchost', '127.0.0.1')
self._rpcport = coin_settings['rpcport']
self._rpcauth = coin_settings['rpcauth']
self.rpc_callback = make_rpc_func(self._rpcport, self._rpcauth, host=self._rpc_host)
self.blocks_confirmed = coin_settings['blocks_confirmed']
self.setConfTarget(coin_settings['conf_target'])
self._use_segwit = coin_settings['use_segwit']
self._sc = swap_client
self._log = self._sc.log if self._sc and self._sc.log else logging
def open_rpc(self, wallet=None):
return openrpc(self._rpcport, self._rpcauth, wallet=wallet, host=self._rpc_host)
def json_request(self, rpc_conn, method, params):
try:
v = rpc_conn.json_request(method, params)
r = json.loads(v.decode('utf-8'))
except Exception as ex:
traceback.print_exc()
raise ValueError('RPC Server Error ' + str(ex))
if 'error' in r and r['error'] is not None:
raise ValueError('RPC error ' + str(r['error']))
return r['result']
def close_rpc(self, rpc_conn):
rpc_conn.close()
def setConfTarget(self, new_conf_target):
assert(new_conf_target >= 1 and new_conf_target < 33), 'Invalid conf_target value'
self._conf_target = new_conf_target
def testDaemonRPC(self):
self.rpc_callback('getwalletinfo', [])
def getDaemonVersion(self):
return self.rpc_callback('getnetworkinfo')['version']
def getBlockchainInfo(self):
return self.rpc_callback('getblockchaininfo')
def getChainHeight(self):
return self.rpc_callback('getblockcount')
def getMempoolTx(self, txid):
return self.rpc_callback('getrawtransaction', [txid.hex()])
def getBlockHeaderFromHeight(self, height):
block_hash = self.rpc_callback('getblockhash', [height])
return self.rpc_callback('getblockheader', [block_hash])
def getBlockHeader(self, block_hash):
return self.rpc_callback('getblockheader', [block_hash])
def initialiseWallet(self, key_bytes):
key_wif = self.encodeKey(key_bytes)
try:
self.rpc_callback('sethdseed', [True, key_wif])
except Exception as e:
# < 0.21: Cannot set a new HD seed while still in Initial Block Download.
self._log.error('sethdseed failed: {}'.format(str(e)))
def getWalletInfo(self):
return self.rpc_callback('getwalletinfo')
def walletRestoreHeight(self):
return self._restore_height
def getWalletRestoreHeight(self):
start_time = self.rpc_callback('getwalletinfo')['keypoololdest']
blockchaininfo = self.rpc_callback('getblockchaininfo')
best_block = blockchaininfo['bestblockhash']
chain_synced = round(blockchaininfo['verificationprogress'], 3)
if chain_synced < 1.0:
raise ValueError('{} chain isn\'t synced.'.format(self.coin_name()))
self._log.debug('Finding block at time: {}'.format(start_time))
rpc_conn = self.open_rpc()
try:
block_hash = best_block
while True:
block_header = self.json_request(rpc_conn, 'getblockheader', [block_hash])
if block_header['time'] < start_time:
return block_header['height']
block_hash = block_header['previousblockhash']
finally:
self.close_rpc(rpc_conn)
def getWalletSeedID(self):
return self.rpc_callback('getwalletinfo')['hdseedid']
def getNewAddress(self, use_segwit, label='swap_receive'):
args = [label]
if use_segwit:
args.append('bech32')
return self.rpc_callback('getnewaddress', args)
def get_fee_rate(self, conf_target=2):
try:
return self.rpc_callback('estimatesmartfee', [conf_target])['feerate'], 'estimatesmartfee'
except Exception:
try:
fee_rate = self.rpc_callback('getwalletinfo')['paytxfee'], 'paytxfee'
assert(fee_rate > 0.0), '0 feerate'
return fee_rate
except Exception:
return self.rpc_callback('getnetworkinfo')['relayfee'], 'relayfee'
def isSegwitAddress(self, address):
return address.startswith(self.chainparams_network()['hrp'] + '1')
def decodeAddress(self, address):
bech32_prefix = self.chainparams_network()['hrp']
if address.startswith(bech32_prefix + '1'):
return bytes(segwit_addr.decode(bech32_prefix, address)[1])
return decodeAddress(address)[1:]
def pubkey_to_segwit_address(self, pk):
bech32_prefix = self.chainparams_network()['hrp']
version = 0
pkh = hash160(pk)
return segwit_addr.encode(bech32_prefix, version, pkh)
def pkh_to_address(self, pkh):
# pkh is hash160(pk)
assert(len(pkh) == 20)
prefix = self.chainparams_network()['pubkey_address']
data = bytes((prefix,)) + pkh
checksum = hashlib.sha256(hashlib.sha256(data).digest()).digest()
return b58encode(data + checksum[0:4])
def encode_p2wsh(self, script):
bech32_prefix = self.chainparams_network()['hrp']
version = 0
program = script[2:] # strip version and length
return segwit_addr.encode(bech32_prefix, version, program)
def encode_p2sh(self, script):
return pubkeyToAddress(self.chainparams_network()['script_address'], script)
def pubkey_to_address(self, pk):
assert(len(pk) == 33)
return self.pkh_to_address(hash160(pk))
def getNewSecretKey(self):
return getSecretInt()
def getPubkey(self, privkey):
return PublicKey.from_secret(privkey).format()
def getAddressHashFromKey(self, key):
pk = self.getPubkey(key)
return hash160(pk)
def verifyKey(self, k):
i = b2i(k)
return(i < ep.o and i > 0)
def verifyPubkey(self, pubkey_bytes):
return verify_secp256k1_point(pubkey_bytes)
def encodeKey(self, key_bytes):
wif_prefix = self.chainparams_network()['key_prefix']
return toWIF(wif_prefix, key_bytes)
def encodePubkey(self, pk):
return pointToCPK(pk)
def decodePubkey(self, pke):
return CPKToPoint(pke)
def decodeKey(self, k):
return decodeWif(k)
def sumKeys(self, ka, kb):
# TODO: Add to coincurve
return i2b((b2i(ka) + b2i(kb)) % ep.o)
def sumPubkeys(self, Ka, Kb):
return PublicKey.combine_keys([PublicKey(Ka), PublicKey(Kb)]).format()
def getScriptForPubkeyHash(self, pkh):
return CScript([OP_0, pkh])
def extractScriptLockScriptValues(self, script_bytes):
script_len = len(script_bytes)
ensure(script_len == 71, 'Bad script length')
o = 0
ensure_op(script_bytes[o] == OP_2)
ensure_op(script_bytes[o + 1] == 33)
o += 2
pk1 = script_bytes[o: o + 33]
o += 33
ensure_op(script_bytes[o] == 33)
o += 1
pk2 = script_bytes[o: o + 33]
o += 33
ensure_op(script_bytes[o] == OP_2)
ensure_op(script_bytes[o + 1] == OP_CHECKMULTISIG)
return pk1, pk2
def genScriptLockTxScript(self, Kal, Kaf):
Kal_enc = Kal if len(Kal) == 33 else self.encodePubkey(Kal)
Kaf_enc = Kaf if len(Kaf) == 33 else self.encodePubkey(Kaf)
return CScript([2, Kal_enc, Kaf_enc, 2, CScriptOp(OP_CHECKMULTISIG)])
def createScriptLockTx(self, value, Kal, Kaf, vkbv=None):
script = self.genScriptLockTxScript(Kal, Kaf)
tx = CTransaction()
tx.nVersion = self.txVersion()
tx.vout.append(self.txoType()(value, self.getScriptDest(script)))
return tx.serialize(), script
def fundScriptLockTx(self, tx_bytes, feerate, vkbv=None):
return self.fundTx(tx_bytes, feerate)
def extractScriptLockRefundScriptValues(self, script_bytes):
script_len = len(script_bytes)
ensure(script_len > 73, 'Bad script length')
ensure_op(script_bytes[0] == OP_IF)
ensure_op(script_bytes[1] == OP_2)
ensure_op(script_bytes[2] == 33)
pk1 = script_bytes[3: 3 + 33]
ensure_op(script_bytes[36] == 33)
pk2 = script_bytes[37: 37 + 33]
ensure_op(script_bytes[70] == OP_2)
ensure_op(script_bytes[71] == OP_CHECKMULTISIG)
ensure_op(script_bytes[72] == OP_ELSE)
o = 73
csv_val, nb = decodeScriptNum(script_bytes, o)
o += nb
ensure(script_len == o + 5 + 33, 'Bad script length') # Fails if script too long
ensure_op(script_bytes[o] == OP_CHECKSEQUENCEVERIFY)
o += 1
ensure_op(script_bytes[o] == OP_DROP)
o += 1
ensure_op(script_bytes[o] == 33)
o += 1
pk3 = script_bytes[o: o + 33]
o += 33
ensure_op(script_bytes[o] == OP_CHECKSIG)
o += 1
ensure_op(script_bytes[o] == OP_ENDIF)
return pk1, pk2, csv_val, pk3
def genScriptLockRefundTxScript(self, Kal, Kaf, csv_val):
Kal_enc = Kal if len(Kal) == 33 else self.encodePubkey(Kal)
Kaf_enc = Kaf if len(Kaf) == 33 else self.encodePubkey(Kaf)
return CScript([
CScriptOp(OP_IF),
2, Kal_enc, Kaf_enc, 2, CScriptOp(OP_CHECKMULTISIG),
CScriptOp(OP_ELSE),
csv_val, CScriptOp(OP_CHECKSEQUENCEVERIFY), CScriptOp(OP_DROP),
Kaf_enc, CScriptOp(OP_CHECKSIG),
CScriptOp(OP_ENDIF)])
def createScriptLockRefundTx(self, tx_lock_bytes, script_lock, Kal, Kaf, lock1_value, csv_val, tx_fee_rate, vkbv=None):
tx_lock = CTransaction()
tx_lock = FromHex(tx_lock, tx_lock_bytes.hex())
output_script = CScript([OP_0, hashlib.sha256(script_lock).digest()])
locked_n = findOutput(tx_lock, output_script)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx_lock.vout[locked_n].nValue
tx_lock.rehash()
tx_lock_id_int = tx_lock.sha256
refund_script = self.genScriptLockRefundTxScript(Kal, Kaf, csv_val)
tx = CTransaction()
tx.nVersion = self.txVersion()
tx.vin.append(CTxIn(COutPoint(tx_lock_id_int, locked_n), nSequence=lock1_value))
tx.vout.append(self.txoType()(locked_coin, CScript([OP_0, hashlib.sha256(refund_script).digest()])))
dummy_witness_stack = self.getScriptLockTxDummyWitness(script_lock)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
pay_fee = int(tx_fee_rate * vsize // 1000)
tx.vout[0].nValue = locked_coin - pay_fee
tx.rehash()
self._log.info('createScriptLockRefundTx %s:\n fee_rate, vsize, fee: %ld, %ld, %ld.',
i2h(tx.sha256), tx_fee_rate, vsize, pay_fee)
return tx.serialize(), refund_script, tx.vout[0].nValue
def createScriptLockRefundSpendTx(self, tx_lock_refund_bytes, script_lock_refund, pkh_refund_to, tx_fee_rate, vkbv=None):
# Returns the coinA locked coin to the leader
# The follower will sign the multisig path with a signature encumbered by the leader's coinB spend pubkey
# If the leader publishes the decrypted signature the leader's coinB spend privatekey will be revealed to the follower
tx_lock_refund = self.loadTx(tx_lock_refund_bytes)
output_script = CScript([OP_0, hashlib.sha256(script_lock_refund).digest()])
locked_n = findOutput(tx_lock_refund, output_script)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx_lock_refund.vout[locked_n].nValue
tx_lock_refund.rehash()
tx_lock_refund_hash_int = tx_lock_refund.sha256
tx = CTransaction()
tx.nVersion = self.txVersion()
tx.vin.append(CTxIn(COutPoint(tx_lock_refund_hash_int, locked_n), nSequence=0))
tx.vout.append(self.txoType()(locked_coin, self.getScriptForPubkeyHash(pkh_refund_to)))
dummy_witness_stack = self.getScriptLockRefundSpendTxDummyWitness(script_lock_refund)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
pay_fee = int(tx_fee_rate * vsize // 1000)
tx.vout[0].nValue = locked_coin - pay_fee
tx.rehash()
self._log.info('createScriptLockRefundSpendTx %s:\n fee_rate, vsize, fee: %ld, %ld, %ld.',
i2h(tx.sha256), tx_fee_rate, vsize, pay_fee)
return tx.serialize()
def createScriptLockRefundSpendToFTx(self, tx_lock_refund_bytes, script_lock_refund, pkh_dest, tx_fee_rate, vkbv=None):
# lock refund swipe tx
# Sends the coinA locked coin to the follower
tx_lock_refund = self.loadTx(tx_lock_refund_bytes)
output_script = CScript([OP_0, hashlib.sha256(script_lock_refund).digest()])
locked_n = findOutput(tx_lock_refund, output_script)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx_lock_refund.vout[locked_n].nValue
A, B, lock2_value, C = self.extractScriptLockRefundScriptValues(script_lock_refund)
tx_lock_refund.rehash()
tx_lock_refund_hash_int = tx_lock_refund.sha256
tx = CTransaction()
tx.nVersion = self.txVersion()
tx.vin.append(CTxIn(COutPoint(tx_lock_refund_hash_int, locked_n), nSequence=lock2_value))
tx.vout.append(self.txoType()(locked_coin, self.getScriptForPubkeyHash(pkh_dest)))
dummy_witness_stack = self.getScriptLockRefundSwipeTxDummyWitness(script_lock_refund)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
pay_fee = int(tx_fee_rate * vsize // 1000)
tx.vout[0].nValue = locked_coin - pay_fee
tx.rehash()
self._log.info('createScriptLockRefundSpendToFTx %s:\n fee_rate, vsize, fee: %ld, %ld, %ld.',
i2h(tx.sha256), tx_fee_rate, vsize, pay_fee)
return tx.serialize()
def createScriptLockSpendTx(self, tx_lock_bytes, script_lock, pkh_dest, tx_fee_rate, vkbv=None):
tx_lock = self.loadTx(tx_lock_bytes)
output_script = CScript([OP_0, hashlib.sha256(script_lock).digest()])
locked_n = findOutput(tx_lock, output_script)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx_lock.vout[locked_n].nValue
tx_lock.rehash()
tx_lock_id_int = tx_lock.sha256
tx = CTransaction()
tx.nVersion = self.txVersion()
tx.vin.append(CTxIn(COutPoint(tx_lock_id_int, locked_n)))
tx.vout.append(self.txoType()(locked_coin, self.getScriptForPubkeyHash(pkh_dest)))
dummy_witness_stack = self.getScriptLockTxDummyWitness(script_lock)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
pay_fee = int(tx_fee_rate * vsize // 1000)
tx.vout[0].nValue = locked_coin - pay_fee
tx.rehash()
self._log.info('createScriptLockSpendTx %s:\n fee_rate, vsize, fee: %ld, %ld, %ld.',
i2h(tx.sha256), tx_fee_rate, vsize, pay_fee)
return tx.serialize()
def verifyLockTx(self, tx_bytes, script_out,
swap_value,
Kal, Kaf,
feerate,
check_lock_tx_inputs, vkbv=None):
# Verify:
#
# Not necessary to check the lock txn is mineable, as protocol will wait for it to confirm
# However by checking early we can avoid wasting time processing unmineable txns
# Check fee is reasonable
tx = self.loadTx(tx_bytes)
txid = self.getTxid(tx)
self._log.info('Verifying lock tx: {}.'.format(b2h(txid)))
ensure(tx.nVersion == self.txVersion(), 'Bad version')
ensure(tx.nLockTime == 0, 'Bad nLockTime') # TODO match txns created by cores
script_pk = CScript([OP_0, hashlib.sha256(script_out).digest()])
locked_n = findOutput(tx, script_pk)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx.vout[locked_n].nValue
# Check value
ensure(locked_coin == swap_value, 'Bad locked value')
# Check script
A, B = self.extractScriptLockScriptValues(script_out)
ensure(A == Kal, 'Bad script pubkey')
ensure(B == Kaf, 'Bad script pubkey')
if check_lock_tx_inputs:
# TODO: Check that inputs are unspent
# Verify fee rate
inputs_value = 0
add_bytes = 0
add_witness_bytes = getCompactSizeLen(len(tx.vin))
for pi in tx.vin:
ptx = self.rpc_callback('getrawtransaction', [i2h(pi.prevout.hash), True])
prevout = ptx['vout'][pi.prevout.n]
inputs_value += make_int(prevout['value'])
prevout_type = prevout['scriptPubKey']['type']
if prevout_type == 'witness_v0_keyhash':
add_witness_bytes += 107 # sig 72, pk 33 and 2 size bytes
add_witness_bytes += getCompactSizeLen(107)
else:
# Assume P2PKH, TODO more types
add_bytes += 107 # OP_PUSH72 <ecdsa_signature> OP_PUSH33 <public_key>
outputs_value = 0
for txo in tx.vout:
outputs_value += txo.nValue
fee_paid = inputs_value - outputs_value
assert(fee_paid > 0)
vsize = self.getTxVSize(tx, add_bytes, add_witness_bytes)
fee_rate_paid = fee_paid * 1000 // vsize
self._log.info('tx amount, vsize, feerate: %ld, %ld, %ld', locked_coin, vsize, fee_rate_paid)
if not self.compareFeeRates(fee_rate_paid, feerate):
self._log.warning('feerate paid doesn\'t match expected: %ld, %ld', fee_rate_paid, feerate)
# TODO: Display warning to user
return txid, locked_n
def verifyLockRefundTx(self, tx_bytes, lock_tx_bytes, script_out,
prevout_id, prevout_n, prevout_seq, prevout_script,
Kal, Kaf, csv_val_expect, swap_value, feerate, vkbv=None):
# Verify:
# Must have only one input with correct prevout and sequence
# Must have only one output to the p2wsh of the lock refund script
# Output value must be locked_coin - lock tx fee
tx = self.loadTx(tx_bytes)
txid = self.getTxid(tx)
self._log.info('Verifying lock refund tx: {}.'.format(b2h(txid)))
ensure(tx.nVersion == self.txVersion(), 'Bad version')
ensure(tx.nLockTime == 0, 'nLockTime not 0')
ensure(len(tx.vin) == 1, 'tx doesn\'t have one input')
ensure(tx.vin[0].nSequence == prevout_seq, 'Bad input nSequence')
ensure(len(tx.vin[0].scriptSig) == 0, 'Input scriptsig not empty')
ensure(tx.vin[0].prevout.hash == b2i(prevout_id) and tx.vin[0].prevout.n == prevout_n, 'Input prevout mismatch')
ensure(len(tx.vout) == 1, 'tx doesn\'t have one output')
script_pk = CScript([OP_0, hashlib.sha256(script_out).digest()])
locked_n = findOutput(tx, script_pk)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = tx.vout[locked_n].nValue
# Check script and values
A, B, csv_val, C = self.extractScriptLockRefundScriptValues(script_out)
ensure(A == Kal, 'Bad script pubkey')
ensure(B == Kaf, 'Bad script pubkey')
ensure(csv_val == csv_val_expect, 'Bad script csv value')
ensure(C == Kaf, 'Bad script pubkey')
fee_paid = swap_value - locked_coin
assert(fee_paid > 0)
dummy_witness_stack = self.getScriptLockTxDummyWitness(prevout_script)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
fee_rate_paid = fee_paid * 1000 // vsize
self._log.info('tx amount, vsize, feerate: %ld, %ld, %ld', locked_coin, vsize, fee_rate_paid)
if not self.compareFeeRates(fee_rate_paid, feerate):
raise ValueError('Bad fee rate, expected: {}'.format(feerate))
return txid, locked_coin, locked_n
def verifyLockRefundSpendTx(self, tx_bytes, lock_refund_tx_bytes,
lock_refund_tx_id, prevout_script,
Kal,
prevout_n, prevout_value, feerate, vkbv=None):
# Verify:
# Must have only one input with correct prevout (n is always 0) and sequence
# Must have only one output sending lock refund tx value - fee to leader's address, TODO: follower shouldn't need to verify destination addr
tx = self.loadTx(tx_bytes)
txid = self.getTxid(tx)
self._log.info('Verifying lock refund spend tx: {}.'.format(b2h(txid)))
ensure(tx.nVersion == self.txVersion(), 'Bad version')
ensure(tx.nLockTime == 0, 'nLockTime not 0')
ensure(len(tx.vin) == 1, 'tx doesn\'t have one input')
ensure(tx.vin[0].nSequence == 0, 'Bad input nSequence')
ensure(len(tx.vin[0].scriptSig) == 0, 'Input scriptsig not empty')
ensure(tx.vin[0].prevout.hash == b2i(lock_refund_tx_id) and tx.vin[0].prevout.n == 0, 'Input prevout mismatch')
ensure(len(tx.vout) == 1, 'tx doesn\'t have one output')
# Destination doesn't matter to the follower
'''
p2wpkh = CScript([OP_0, hash160(Kal)])
locked_n = findOutput(tx, p2wpkh)
ensure(locked_n is not None, 'Output not found in lock refund spend tx')
'''
tx_value = tx.vout[0].nValue
fee_paid = prevout_value - tx_value
assert(fee_paid > 0)
dummy_witness_stack = self.getScriptLockRefundSpendTxDummyWitness(prevout_script)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
fee_rate_paid = fee_paid * 1000 // vsize
self._log.info('tx amount, vsize, feerate: %ld, %ld, %ld', tx_value, vsize, fee_rate_paid)
if not self.compareFeeRates(fee_rate_paid, feerate):
raise ValueError('Bad fee rate, expected: {}'.format(feerate))
return True
def verifyLockSpendTx(self, tx_bytes,
lock_tx_bytes, lock_tx_script,
a_pkhash_f, feerate, vkbv=None):
# Verify:
# Must have only one input with correct prevout (n is always 0) and sequence
# Must have only one output with destination and amount
tx = self.loadTx(tx_bytes)
txid = self.getTxid(tx)
self._log.info('Verifying lock spend tx: {}.'.format(b2h(txid)))
ensure(tx.nVersion == self.txVersion(), 'Bad version')
ensure(tx.nLockTime == 0, 'nLockTime not 0')
ensure(len(tx.vin) == 1, 'tx doesn\'t have one input')
lock_tx = self.loadTx(lock_tx_bytes)
lock_tx_id = self.getTxid(lock_tx)
output_script = CScript([OP_0, hashlib.sha256(lock_tx_script).digest()])
locked_n = findOutput(lock_tx, output_script)
ensure(locked_n is not None, 'Output not found in tx')
locked_coin = lock_tx.vout[locked_n].nValue
ensure(tx.vin[0].nSequence == 0, 'Bad input nSequence')
ensure(len(tx.vin[0].scriptSig) == 0, 'Input scriptsig not empty')
ensure(tx.vin[0].prevout.hash == b2i(lock_tx_id) and tx.vin[0].prevout.n == locked_n, 'Input prevout mismatch')
ensure(len(tx.vout) == 1, 'tx doesn\'t have one output')
p2wpkh = self.getScriptForPubkeyHash(a_pkhash_f)
ensure(tx.vout[0].scriptPubKey == p2wpkh, 'Bad output destination')
# The value of the lock tx output should already be verified, if the fee is as expected the difference will be the correct amount
fee_paid = locked_coin - tx.vout[0].nValue
assert(fee_paid > 0)
dummy_witness_stack = self.getScriptLockTxDummyWitness(lock_tx_script)
witness_bytes = self.getWitnessStackSerialisedLength(dummy_witness_stack)
vsize = self.getTxVSize(tx, add_witness_bytes=witness_bytes)
fee_rate_paid = fee_paid * 1000 // vsize
self._log.info('tx amount, vsize, feerate: %ld, %ld, %ld', tx.vout[0].nValue, vsize, fee_rate_paid)
if not self.compareFeeRates(fee_rate_paid, feerate):
raise ValueError('Bad fee rate, expected: {}'.format(feerate))
return True
def signTx(self, key_bytes, tx_bytes, input_n, prevout_script, prevout_value):
tx = self.loadTx(tx_bytes)
sig_hash = SegwitV0SignatureHash(prevout_script, tx, input_n, SIGHASH_ALL, prevout_value)
eck = PrivateKey(key_bytes)
return eck.sign(sig_hash, hasher=None) + bytes((SIGHASH_ALL,))
def signTxOtVES(self, key_sign, pubkey_encrypt, tx_bytes, input_n, prevout_script, prevout_value):
tx = self.loadTx(tx_bytes)
sig_hash = SegwitV0SignatureHash(prevout_script, tx, input_n, SIGHASH_ALL, prevout_value)
return ecdsaotves_enc_sign(key_sign, pubkey_encrypt, sig_hash)
def verifyTxOtVES(self, tx_bytes, ct, Ks, Ke, input_n, prevout_script, prevout_value):
tx = self.loadTx(tx_bytes)
sig_hash = SegwitV0SignatureHash(prevout_script, tx, input_n, SIGHASH_ALL, prevout_value)
return ecdsaotves_enc_verify(Ks, Ke, sig_hash, ct)
def decryptOtVES(self, k, esig):
return ecdsaotves_dec_sig(k, esig) + bytes((SIGHASH_ALL,))
def verifyTxSig(self, tx_bytes, sig, K, input_n, prevout_script, prevout_value):
tx = self.loadTx(tx_bytes)
sig_hash = SegwitV0SignatureHash(prevout_script, tx, input_n, SIGHASH_ALL, prevout_value)
pubkey = PublicKey(K)
return pubkey.verify(sig[: -1], sig_hash, hasher=None) # Pop the hashtype byte
def verifySig(self, pubkey, signed_hash, sig):
pubkey = PublicKey(pubkey)
return pubkey.verify(sig, signed_hash, hasher=None)
def fundTx(self, tx, feerate):
feerate_str = self.format_amount(feerate)
# TODO: unlock unspents if bid cancelled
options = {
'lockUnspents': True,
'feeRate': feerate_str,
}
rv = self.rpc_callback('fundrawtransaction', [tx.hex(), options])
return bytes.fromhex(rv['hex'])
def listInputs(self, tx_bytes):
tx = self.loadTx(tx_bytes)
all_locked = self.rpc_callback('listlockunspent')
inputs = []
for pi in tx.vin:
txid_hex = i2h(pi.prevout.hash)
islocked = any([txid_hex == a['txid'] and pi.prevout.n == a['vout'] for a in all_locked])
inputs.append({'txid': txid_hex, 'vout': pi.prevout.n, 'islocked': islocked})
return inputs
def unlockInputs(self, tx_bytes):
tx = self.loadTx(tx_bytes)
inputs = []
for pi in tx.vin:
inputs.append({'txid': i2h(pi.prevout.hash), 'vout': pi.prevout.n})
self.rpc_callback('lockunspent', [True, inputs])
def signTxWithWallet(self, tx):
rv = self.rpc_callback('signrawtransactionwithwallet', [tx.hex()])
return bytes.fromhex(rv['hex'])
def publishTx(self, tx):
return self.rpc_callback('sendrawtransaction', [tx.hex()])
def encodeTx(self, tx):
return tx.serialize()
def loadTx(self, tx_bytes):
# Load tx from bytes to internal representation
tx = CTransaction()
tx.deserialize(BytesIO(tx_bytes))
return tx
def getTxid(self, tx):
if isinstance(tx, str):
tx = bytes.fromhex(tx)
if isinstance(tx, bytes):
tx = self.loadTx(tx)
tx.rehash()
return i2b(tx.sha256)
def getTxOutputPos(self, tx, script):
if isinstance(tx, bytes):
tx = self.loadTx(tx)
script_pk = CScript([OP_0, hashlib.sha256(script).digest()])
return findOutput(tx, script_pk)
def getPubkeyHash(self, K):
return hash160(self.encodePubkey(K))
def getScriptDest(self, script):
return CScript([OP_0, hashlib.sha256(script).digest()])
def getPkDest(self, K):
return self.getScriptForPubkeyHash(self.getPubkeyHash(K))
def scanTxOutset(self, dest):
return self.rpc_callback('scantxoutset', ['start', ['raw({})'.format(dest.hex())]])
def getTransaction(self, txid):
try:
return bytes.fromhex(self.rpc_callback('getrawtransaction', [txid.hex()]))
except Exception as ex:
# TODO: filter errors
return None
def getWalletTransaction(self, txid):
try:
return bytes.fromhex(self.rpc_callback('gettransaction', [txid.hex()]))
except Exception as ex:
# TODO: filter errors
return None
def setTxSignature(self, tx_bytes, stack):
tx = self.loadTx(tx_bytes)
tx.wit.vtxinwit.clear()
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = stack
return tx.serialize()
def stripTxSignature(self, tx_bytes):
tx = self.loadTx(tx_bytes)
tx.wit.vtxinwit.clear()
return tx.serialize()
def extractLeaderSig(self, tx_bytes):
tx = self.loadTx(tx_bytes)
return tx.wit.vtxinwit[0].scriptWitness.stack[1]
def extractFollowerSig(self, tx_bytes):
tx = self.loadTx(tx_bytes)
return tx.wit.vtxinwit[0].scriptWitness.stack[2]
def createBLockTx(self, Kbs, output_amount):
tx = CTransaction()
tx.nVersion = self.txVersion()
p2wpkh = self.getPkDest(Kbs)
tx.vout.append(self.txoType()(output_amount, p2wpkh))
return tx.serialize()
def encodeSharedAddress(self, Kbv, Kbs):
return self.pubkey_to_segwit_address(Kbs)
def publishBLockTx(self, Kbv, Kbs, output_amount, feerate):
b_lock_tx = self.createBLockTx(Kbs, output_amount)
b_lock_tx = self.fundTx(b_lock_tx, feerate)
b_lock_tx_id = self.getTxid(b_lock_tx)
b_lock_tx = self.signTxWithWallet(b_lock_tx)
return self.publishTx(b_lock_tx)
def recoverEncKey(self, esig, sig, K):
return ecdsaotves_rec_enc_key(K, esig, sig[:-1]) # Strip sighash type
def getTxVSize(self, tx, add_bytes=0, add_witness_bytes=0):
wsf = self.witnessScaleFactor()
len_full = len(tx.serialize_with_witness()) + add_bytes + add_witness_bytes
len_nwit = len(tx.serialize_without_witness()) + add_bytes
weight = len_nwit * (wsf - 1) + len_full
return (weight + wsf - 1) // wsf
def findTxB(self, kbv, Kbs, cb_swap_value, cb_block_confirmed, restore_height, bid_sender):
raw_dest = self.getPkDest(Kbs)
rv = self.scanTxOutset(raw_dest)
print('scanTxOutset', dumpj(rv))
for utxo in rv['unspents']:
if 'height' in utxo and utxo['height'] > 0 and rv['height'] - utxo['height'] > cb_block_confirmed:
if self.make_int(utxo['amount']) != cb_swap_value:
self._log.warning('Found output to lock tx pubkey of incorrect value: %s', str(utxo['amount']))
else:
return {'txid': utxo['txid'], 'vout': utxo['vout'], 'amount': utxo['amount'], 'height': utxo['height']}
return None
def waitForLockTxB(self, kbv, Kbs, cb_swap_value, cb_block_confirmed):
raw_dest = self.getPkDest(Kbs)
for i in range(20):
time.sleep(1)
rv = self.scanTxOutset(raw_dest)
print('scanTxOutset', dumpj(rv))
for utxo in rv['unspents']:
if 'height' in utxo and utxo['height'] > 0 and rv['height'] - utxo['height'] > cb_block_confirmed:
if self.make_int(utxo['amount']) != cb_swap_value:
self._log.warning('Found output to lock tx pubkey of incorrect value: %s', str(utxo['amount']))
else:
return True
return False
def spendBLockTx(self, chain_b_lock_txid, address_to, kbv, kbs, cb_swap_value, b_fee, restore_height):
raise ValueError('TODO')
def getLockTxHeight(self, txid, dest_address, bid_amount, rescan_from, find_index=False):
# Add watchonly address and rescan if required
addr_info = self.rpc_callback('getaddressinfo', [dest_address])
if not addr_info['iswatchonly']:
ro = self.rpc_callback('importaddress', [dest_address, 'bid', False])
self._log.info('Imported watch-only addr: {}'.format(dest_address))
self._log.info('Rescanning chain from height: {}'.format(rescan_from))
self.rpc_callback('rescanblockchain', [rescan_from])
return_txid = True if txid is None else False
if txid is None:
txns = self.rpc_callback('listunspent', [0, 9999999, [dest_address, ]])
for tx in txns:
if self.make_int(tx['amount']) == bid_amount:
txid = bytes.fromhex(tx['txid'])
break
if txid is None:
return None
try:
tx = self.rpc_callback('gettransaction', [txid.hex()])
block_height = 0
if 'blockhash' in tx:
block_header = self.rpc_callback('getblockheader', [tx['blockhash']])
block_height = block_header['height']
rv = {
'depth': 0 if 'confirmations' not in tx else tx['confirmations'],
'height': block_height}
except Exception as e:
self._log.debug('getLockTxHeight gettransaction failed: %s, %s', txid.hex(), str(e))
return None
if find_index:
tx_obj = self.rpc_callback('decoderawtransaction', [tx['hex']])
rv['index'] = find_vout_for_address_from_txobj(tx_obj, dest_address)
if return_txid:
rv['txid'] = txid.hex()
return rv
def getOutput(self, txid, dest_script, expect_value, xmr_swap=None):
# TODO: Use getrawtransaction if txindex is active
utxos = self.rpc_callback('scantxoutset', ['start', ['raw({})'.format(dest_script.hex())]])
if 'height' in utxos: # chain_height not returned by v18 codebase
chain_height = utxos['height']
else:
chain_height = self.getChainHeight()
rv = []
for utxo in utxos['unspents']:
if txid and txid.hex() != utxo['txid']:
continue
if expect_value != self.make_int(utxo['amount']):
continue
rv.append({
'depth': 0 if 'height' not in utxo else (chain_height - utxo['height']) + 1,
'height': 0 if 'height' not in utxo else utxo['height'],
'amount': self.make_int(utxo['amount']),
'txid': utxo['txid'],
'vout': utxo['vout']})
return rv, chain_height
def withdrawCoin(self, value, addr_to, subfee):
params = [addr_to, value, '', '', subfee, True, self._conf_target]
return self.rpc_callback('sendtoaddress', params)
def signCompact(self, k, message):
message_hash = hashlib.sha256(bytes(message, 'utf-8')).digest()
privkey = PrivateKey(k)
return privkey.sign_recoverable(message_hash, hasher=None)[:64]
def verifyCompact(self, K, message, sig):
message_hash = hashlib.sha256(bytes(message, 'utf-8')).digest()
pubkey = PublicKey(K)
rv = pubkey.verify_compact(sig, message_hash, hasher=None)
assert(rv is True)
def verifyMessage(self, address, message, signature, message_magic=None) -> bool:
if message_magic is None:
message_magic = self.chainparams_network()['message_magic']
message_bytes = SerialiseNumCompact(len(message_magic)) + bytes(message_magic, 'utf-8') + SerialiseNumCompact(len(message)) + bytes(message, 'utf-8')
message_hash = hashlib.sha256(hashlib.sha256(message_bytes).digest()).digest()
signature_bytes = base64.b64decode(signature)
rec_id = (signature_bytes[0] - 27) & 3
signature_bytes = signature_bytes[1:] + bytes((rec_id,))
try:
pubkey = PublicKey.from_signature_and_message(signature_bytes, message_hash, hasher=None)
except Exception as e:
self._log.info('verifyMessage failed: ' + str(e))
return False
address_hash = self.decodeAddress(address)
pubkey_hash = hash160(pubkey.format())
return True if address_hash == pubkey_hash else False
def showLockTransfers(self, Kbv, Kbs):
raise ValueError('Unimplemented')
def getLockTxSwapOutputValue(self, bid, xmr_swap):
return bid.amount
def getLockRefundTxSwapOutputValue(self, bid, xmr_swap):
return xmr_swap.a_swap_refund_value
def getLockRefundTxSwapOutput(self, xmr_swap):
# Only one prevout exists
return 0
def getScriptLockTxDummyWitness(self, script):
return [
b''.hex(),
bytes(72).hex(),
bytes(72).hex(),
bytes(len(script)).hex()
]
def getScriptLockRefundSpendTxDummyWitness(self, script):
return [
b''.hex(),
bytes(72).hex(),
bytes(72).hex(),
bytes((1,)).hex(),
bytes(len(script)).hex()
]
def getScriptLockRefundSwipeTxDummyWitness(self, script):
return [
bytes(72).hex(),
b''.hex(),
bytes(len(script)).hex()
]
def getWitnessStackSerialisedLength(self, witness_stack):
length = getCompactSizeLen(len(witness_stack))
for e in witness_stack:
length += getWitnessElementLen(len(e) // 2) # hex -> bytes
# See core SerializeTransaction
length += 32 + 4 + 1 + 4 # vinDummy
length += 1 # flags
return length
def describeTx(self, tx_hex):
return self.rpc_callback('decoderawtransaction', [tx_hex])
def getSpendableBalance(self):
return self.make_int(self.rpc_callback('getbalances')['mine']['trusted'])
def createUTXO(self, value_sats):
# Create a new address and send value_sats to it
spendable_balance = self.getSpendableBalance()
if spendable_balance < value_sats:
raise ValueError('Balance too low')
address = self.getNewAddress(self._use_segwit, 'create_utxo')
return self.withdrawCoin(self.format_amount(value_sats), address, False), address
def testBTCInterface():
print('testBTCInterface')
if __name__ == "__main__":
testBTCInterface()
| 1.578125 | 2 |
wikked/scm/git.py | ludovicchabant/Wikked | 17 | 12760454 | import os
import os.path
import logging
import subprocess
from .base import (
SourceControl,
STATE_NEW, STATE_MODIFIED, STATE_COMMITTED)
try:
import pygit2
SUPPORTS_GIT = True
except ImportError:
SUPPORTS_GIT = False
logger = logging.getLogger(__name__)
class GitBaseSourceControl(SourceControl):
def __init__(self, root):
SourceControl.__init__(self)
self.root = root
def start(self, wiki):
# Make a Git repo if there's none.
if not os.path.isdir(os.path.join(self.root, '.git')):
logger.info("Creating Git repository at: " + self.root)
self._initRepo(self.root)
# Create a `.gitignore` file there's none.
ignore_path = os.path.join(self.root, '.gitignore')
if not os.path.isfile(ignore_path):
logger.info("Creating `.gitignore` file.")
with open(ignore_path, 'w') as f:
f.write('.wiki')
self._add(ignore_path)
self._commit('Created .gitignore.', [ignore_path])
def getSpecialFilenames(self):
specials = ['.git', '.gitignore']
return [os.path.join(self.root, d) for d in specials]
def getState(self, path):
return self._status(path)
def _run(self, cmd, *args, **kwargs):
exe = [self.git]
if 'norepo' not in kwargs or not kwargs['norepo']:
exe.append('--git-dir="%s"' % self.root)
exe.append(cmd)
exe += args
logger.debug("Running Git: " + str(exe))
return subprocess.check_output(exe)
class GitLibSourceControl(GitBaseSourceControl):
def __init__(self, root):
if not SUPPORTS_GIT:
raise Exception(
"Can't support Git because pygit2 is not available.")
GitBaseSourceControl.__init__(self, root)
def initRepo(self, wiki):
GitBaseSourceControl.initRepo(self, wiki)
self.repo = pygit2.Repository(self.root)
def _initRepo(self, path):
pygit2.init_repository(path, False)
def _add(self, paths):
pass
def _commit(self, message, paths):
pass
def _status(self, path):
flags = self.repo.status_file(self._getRepoPath(path))
if flags == pygit2.GIT_STATUS_CURRENT:
return STATE_COMMITTED
if (flags & pygit2.GIT_STATUS_WT_MODIFIED or
flags & pygit2.GIT_STATUS_INDEX_MODIFIED):
return STATE_MODIFIED
if (flags & pygit2.GIT_STATUS_WT_NEW or
flags & pygit2.GIT_STATUS_INDEX_NEW):
return STATE_NEW
raise Exception("Unsupported status flag combination: %s" % flags)
def _getRepoPath(self, path):
return os.path.relpath(path, self.root).replace('\\', '/')
| 2.515625 | 3 |
linux/lib/python2.7/dist-packages/samba/tests/kcc/kcc_utils.py | nmercier/linux-cross-gcc | 3 | 12760455 | # Unix SMB/CIFS implementation. Tests for samba.kcc.kcc_utils.
# Copyright (C) <NAME> 2015
#
# Written by <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.kcc.kcc_utils"""
import samba
import samba.tests
from samba.kcc.kcc_utils import *
class ScheduleTests(samba.tests.TestCase):
def test_new_connection_schedule(self):
schedule = new_connection_schedule()
self.assertIsInstance(schedule, drsblobs.schedule)
self.assertEquals(schedule.size, 188)
self.assertEquals(len(schedule.dataArray[0].slots), 168)
# OK, this is pathetic, but the rest of it looks really hard, with the
# classes all intertwingled with each other and the samdb. That is to say:
# XXX later.
| 2.375 | 2 |
src/AddressBook/api/views.py | thunderoy/AddressBook | 1 | 12760456 | <reponame>thunderoy/AddressBook<filename>src/AddressBook/api/views.py
#generic
from rest_framework import generics
from AddressBook.models import Contact
from .serializers import ContactSerializer
from rest_framework import permissions
class CreateContactApiView(generics.CreateAPIView):
def get_queryset(self):
return Contact.objects.all()
serializer_class = ContactSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,) | 1.84375 | 2 |
web/sso/models.py | mhamzaerol/kisaweb | 0 | 12760457 | from django.db import models
from tinymce.models import HTMLField
# Create your models here.
class Agreement(models.Model):
english = HTMLField()
korean = HTMLField()
class LoginError(models.Model):
email = models.EmailField()
| 2.15625 | 2 |
viz_net.py | fangkaimin/pytorch_classification_new | 0 | 12760458 | import torch
from torch import nn
from torchviz import make_dot
from torchvision.models import AlexNet
from cnn_flower import CNNNet
model = CNNNet()
x = torch.randn(1, 3, 32, 32).requires_grad_(True)
y = model(x)
vis_graph = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))
vis_graph.view()
# model = AlexNet()
# x = torch.randn(1, 3, 227, 227).requires_grad_(True)
# y = model(x)
# vis_graph = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))
# vis_graph.view()
| 2.546875 | 3 |
myplot/misc.py | wy2136/wython | 1 | 12760459 | <filename>myplot/misc.py<gh_stars>1-10
import matplotlib.pyplot as plt
import datetime, calendar
def xticks2dayofyear():
'''adjust xticks when it represents day of year.'''
xticks = [datetime.datetime(2000, month, 1).timetuple().tm_yday
for month in range(1, 13)]
months = [datetime.datetime(2000, month, 1).strftime('%b')
for month in range(1, 13)]
plt.xticks(xticks, months, ha='left')
plt.xlim(1, 366)
def _num2lat(n):
if n > 0:
lat = '{}$^{{\circ}}$N'.format(n)
elif n <0:
lat = '{}$^{{\circ}}$S'.format(-n)
else:
lat = '{}$^{{\circ}}$'.format(n)
return lat
def _num2lon(n):
n = n % 360
if n < 180:
lon = '{}$^{{\circ}}$E'.format(n)
elif n > 180:
lon = '{}$^{{\circ}}$W'.format(360-n)
else:
lon = '{}$^{{\circ}}$'.format(n)
return lon
def _num2mon(n):
return calendar.month_name[n][0:3]
def _set_new_tick_labels(axis, ticks, ticklabels, **kw):
if axis == 'x':
plt.xticks(ticks, ticklabels, **kw)
elif axis == 'y':
plt.yticks(ticks, ticklabels, **kw)
else:
raise ValueError('The axis parameter is either "x" or "y"!')
def change_ticklabels(axis, kind, ticks=None):
'''
axis: 'x' or 'y'.
kind: 'lat', 'lon', 'month', or 'dayofyear'.'''
# get ticks
if ticks is None:
if axis == 'x':
ticks = plt.gca().get_xticks()
xlim = plt.xlim()
ticks = [tick for tick in ticks if xlim[0]<=tick<=xlim[1]]
elif axis == 'y':
ticks = plt.gca().get_yticks()
ylim = plt.ylim()
ticks = [tick for tick in ticks if ylim[0]<=tick<=ylim[1]]
else:
raise ValueError('axis is either "x" or "y".')
# change tick labels
if kind == 'lat':
ticks = [tick for tick in ticks if -90<=tick<=90 ]
ticklabels = [_num2lat(tick) for tick in ticks]
_set_new_tick_labels(axis, ticks, ticklabels)
elif kind == 'lon':
ticklabels = [_num2lon(tick) for tick in ticks]
_set_new_tick_labels(axis, ticks, ticklabels)
elif kind == 'month':
ticks = np.arange(1, 13)
ticklabels = [_num2mon(tick) for tick in ticks]
_set_new_tick_labels(axis, ticks, ticklabels)
if axis == 'x':
plt.xlim(0.5, 12.5)
else:
plt.ylim(0.5, 12.5)
plt.gca().invert_yaxis()
elif kind == 'dayofyear':
ticks = [datetime.datetime(2000, month, 1).timetuple().tm_yday
for month in range(1, 13)]
ticklabels = [calendar.month_name[i][0:3] for i in range(1,13)]
if axis == 'x':
_set_new_tick_labels(axis, ticks, ticklabels, ha='left')
plt.xlim(1, 366)
else:
_set_new_tick_labels(axis, ticks, ticklabels, va='top')
plt.ylim(1, 366)
plt.gca().invert_yaxis()
| 3.328125 | 3 |
Other/TestScript/testFrame.py | Setti7/SVFB-GUI | 3 | 12760460 | import cv2
import numpy as np
#script que simula o jogo, pra testar um frame
#se apertar esc ele fecha, se apertar espaço alterna entre uma tela preta e um frame
print("Aperte espaço para mudar frame/tela preta")
print("Pressione uma tecla pra selecionar a resolução:")
print("A->1280x720 zoom = 75%")
print("B->1280x720 zoom = 80%")
print("C->1280x720 zoom = 85%")
print("D->1280x720 zoom = 90%")
print("E->1280x720 zoom = 95%")
print("F->1280x720 zoom = 100%")
print("G->1280x720 zoom = 125%")
print("H->1752x712 zoom = 75%")
print("I->1752x712 zoom = 80%")
print("J->1752x712 zoom = 85%")
print("K->1752x712 zoom = 90%")
print("L->1752x712 zoom = 95%")
print("M->1752x712 zoom = 100%")
print("N->1752x712 zoom = 105%")
print("O->1752x712 zoom = 110%")
print("P->1752x712 zoom = 115%")
print("Q->1752x712 zoom = 120%")
print("R->1752x712 zoom = 125%")
print("S->1920x1080 zoom = 90%")
cv2.imshow('Stardew Valley', np.zeros([20,20]))
k = cv2.waitKey()
if k == 65 or k == 97: # letra a
img = cv2.imread('1280x720_zoom75.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 66 or k == 98: # letra b
img = cv2.imread('1280x720_zoom80.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 67 or k == 99: # letra c
img = cv2.imread('1280x720_zoom85.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 68 or k == 100: # letra d
img = cv2.imread('1280x720_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 69 or k == 101: # letra e
img = cv2.imread('1280x720_zoom95.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 70 or k == 102: # letra f
img = cv2.imread('1280x720_zoom100.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 71 or k == 103: # letra g
img = cv2.imread('1280x720_zoom125.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 72 or k == 104: # letra h
img = cv2.imread('1752x712_zoom75.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 73 or k == 105: # letra i
img = cv2.imread('1752x712_zoom80.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 74 or k == 106: # letra j
img = cv2.imread('1752x712_zoom85.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 75 or k == 107: # letra k
img = cv2.imread('1752x712_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 76 or k == 108: # letra l
img = cv2.imread('1752x712_zoom95.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 77 or k == 109: # letra m
img = cv2.imread('1752x712_zoom100.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 78 or k == 110: # letra n
img = cv2.imread('1752x712_zoom105.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 79 or k == 111: # letra o
img = cv2.imread('1752x712_zoom110.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 80 or k == 112: # letra p
img = cv2.imread('1752x712_zoom115.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 81 or k == 113: # letra q
img = cv2.imread('1752x712_zoom120.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 82 or k == 114: # letra r
img = cv2.imread('1752x712_zoom125.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 83 or k == 115: # letra s
img = cv2.imread('1920x1080_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
cv2.imshow('Stardew Valley', img)
while True:
k = cv2.waitKey(0)
if k == 32:
if dsp is img:
dsp = img2
elif dsp is img2:
dsp = img
cv2.imshow('Stardew Valley', dsp)
elif k == 27:
cv2.destroyAllWindows()
break
| 2.953125 | 3 |
feed/standings.py | fearless-spider/python_playground | 0 | 12760461 | # -*- coding: utf-8 -*-
import sys
import MySQLdb
from BeautifulSoup import BeautifulStoneSoup
import feedparser
from datetime import datetime, timedelta
import urllib2
screen = 'standings'
sport = sys.argv[1]
league = sys.argv[2]
direct = sys.argv[3]
news = False
class Season:
id = 0
feed_id = ''
name = ''
stardate = ''
enddate = ''
league_id = 0
class Standings:
id = 0
team_id = 0
conference_feed_id = ''
division_feed_id = ''
team_feed_id = ''
won = 0
lost = 0
draw = 0
pts = 0
gls = 0
glo = 0
otl = 0
lrank = 0
drank = 0
gb = 0
'''home'''
hwon = 0
hlost = 0
hshot = 0
hover = 0
hdraw = 0
home = ''
'''road'''
rwon = 0
rlost = 0
rshot = 0
rover = 0
rdraw = 0
road = ''
'''division'''
dwon = 0
dlost = 0
div = ''
'''conference'''
cwon = 0
clost = 0
conf = ''
'''l10'''
l10won = 0
l10lost = 0
l10 = ''
'''pa and pf'''
pf = 0
rf = 0
pa = 0
ra = 0
gf = 0
ga = 0
diff = 0
slost = 0
swon = 0
streak = ''
def get_team_by_feedid(c, feedid):
c.execute("SELECT id FROM wp_bp_teams WHERE feed_id = '%s'" % (feedid))
return c.fetchone()
def get_stand(c, season_feed_id, team_id):
c.execute("SELECT id FROM standings WHERE season_feed_id = '%s' AND team_id = %d" % (season_feed_id, team_id))
return c.fetchone()
def insert_standing(c, data):
if (data != ''):
sql = """INSERT INTO standings(
`id`,
`won`,
`lost`,
`conf`,
`div`,
`home`,
`road`,
`l10`,
`streak`,
`otl`,
`gb`,
`pf`,
`pa`,
`rf`,
`ra`,
`gf`,
`ga`,
`diff`,
`team_id`,
`team_feed_id`,
`conference_ranking`,
`division_ranking`,
`conference_feed_id`,
`division_feed_id`,
`season_feed_id`,
`pts`,
`draw`
) VALUES %s
ON DUPLICATE KEY UPDATE
`won` = VALUES(`won`),
`lost` = VALUES(`lost`),
`conf` = VALUES(`conf`),
`div` = VALUES(`div`),
`home` = VALUES(`home`),
`road` = VALUES(`road`),
`l10` = VALUES(`l10`),
`streak` = VALUES(`streak`),
`otl` = VALUES(`otl`),
`gb` = VALUES(`gb`),
`pf` = VALUES(`pf`),
`pa` = VALUES(`pa`),
`rf` = VALUES(`rf`),
`ra` = VALUES(`ra`),
`gf` = VALUES(`gf`),
`ga` = VALUES(`ga`),
`diff` = VALUES(`diff`),
`conference_ranking` = VALUES(`conference_ranking`),
`division_ranking` = VALUES(`division_ranking`),
`pts` = VALUES(`pts`),
`draw` = VALUES(`draw`)
;""" % data
# print sql
c.execute(sql)
def get_season(c, feedid):
c.execute("SELECT id FROM wp_bp_seasons WHERE feed_id = '%s'" % feedid)
return c.fetchone()
def insert_season(c, data):
sql = "INSERT INTO wp_bp_seasons(feed_id,name,startdate,enddate,league_id) VALUES ('%s','%s','%s','%s',%d);" % (
data.feed_id, conn.escape_string(data.name), data.startdate, data.enddate, data.league_id)
c.execute(sql)
def get_league(c, league):
c.execute("SELECT id FROM wp_bp_leagues WHERE short = '%s'" % league)
return c.fetchone()
def prepare_data(division, con=False):
insertdata = []
index = 0
if league == 'MLB' or league == 'NBA':
for stand in division:
if con:
if stand.lrank == 1:
break
else:
if stand.drank == 1:
break
index += 1
first = division[index]
stand = Standings()
for stand in division:
wl = float(stand.won + stand.lost)
if wl == 0.0:
wl = 1.0
'''PTS'''
if league == 'MLS':
stand.pts = (stand.won * 3) + stand.draw
'''OTL'''
if league == 'NHL':
stand.otl = stand.glo + stand.gls
else:
stand.otl = stand.won / wl
'''GB'''
if league == 'MLB' or league == 'NBA':
stand.gb = ((first.won - stand.won) + (stand.lost - first.lost)) / 2.0
'''HOME'''
if stand.hover or stand.hshot:
stand.home = str(stand.hwon) + ' - ' + str(stand.hlost) + ' - ' + str(stand.hover + stand.hshot)
else:
stand.home = str(stand.hwon) + ' - ' + str(stand.hlost)
if league == 'MLS':
stand.home = str(stand.hwon) + ' - ' + str(stand.hlost) + ' - ' + str(stand.hdraw)
'''ROAD'''
if stand.rover or stand.rshot:
stand.road = str(stand.rwon) + ' - ' + str(stand.rlost) + ' - ' + str(stand.rover + stand.rshot)
else:
stand.road = str(stand.rwon) + ' - ' + str(stand.rlost)
if league == 'MLS':
stand.road = str(stand.rwon) + ' - ' + str(stand.rlost) + ' - ' + str(stand.rdraw)
if league != 'MLS':
'''DIV'''
stand.div = str(stand.dwon) + ' - ' + str(stand.dlost)
'''CONF'''
stand.conf = str(stand.cwon) + ' - ' + str(stand.clost)
'''L10'''
stand.l10 = str(stand.l10won) + ' - ' + str(stand.l10lost)
if league == 'NCAA BB' or league == 'NBA':
stand.pf = stand.pf / wl
stand.pa = stand.pa / wl
stand.diff = stand.pf - stand.pa
if league == 'MLB':
stand.diff = stand.rf - stand.ra
if league == 'MLS' or league == 'NHL':
stand.diff = stand.gf - stand.ga
if league == 'NCAA FB' or league == 'NFL':
stand.diff = stand.pf - stand.pa
if stand.slost > stand.swon:
stand.streak = 'Lost ' + str(stand.slost)
else:
stand.streak = 'Won ' + str(stand.swon)
insertdata.append("("
+ str(stand.id) +
","
+ str(stand.won) +
","
+ str(stand.lost) +
",'"
+ str(stand.conf) +
"','"
+ str(stand.div) +
"','"
+ str(stand.home) +
"','"
+ str(stand.road) +
"','"
+ str(stand.l10) +
"','"
+ str(stand.streak) +
"',"
+ str(stand.otl) +
","
+ str(stand.gb) +
","
+ str(stand.pf) +
","
+ str(stand.pa) +
","
+ str(stand.rf) +
","
+ str(stand.ra) +
","
+ str(stand.gf) +
","
+ str(stand.ga) +
","
+ str(stand.diff) +
","
+ str(stand.team_id) +
",'"
+ str(stand.team_feed_id) +
"',"
+ str(stand.lrank) +
","
+ str(stand.drank) +
",'"
+ str(stand.conference_feed_id) +
"','"
+ str(stand.division_feed_id) +
"','"
+ str(stand.season_feed_id) +
"',"
+ str(stand.pts) +
","
+ str(stand.draw) +
")")
return insertdata
def prepare_division(xdivisioncontent, season_feed_id, conference_feed_id, division_feed_id):
for xteamcontent in xdivisioncontent.findAll('team-content'):
team_feed_id = xteamcontent.contents[0].contents[0].string
tid = get_team_by_feedid(c, team_feed_id)
if tid is None:
break
team_id = int(tid[0] or 0)
data = Standings()
sid = get_stand(c, season_feed_id, team_id)
if sid is not None:
data.id = int(sid[0] or 0)
for xstatgroup in xteamcontent.findAll('stat-group'):
for xstat in xstatgroup.findAll('stat'):
group = xstatgroup.contents[0].string
type = xstat.get('type')
num = float(xstat.get('num') or 0)
if (group == 'league-standings' and type == 'games_won'):
data.won = int(num or 0)
if (group == 'league-standings' and type == 'games_lost'):
data.lost = int(num or 0)
if (group == 'league-standings' and type == 'games_tied' and league == 'MLS'):
data.draw = int(num or 0)
if (group == 'league-standings' and type == 'games_lost_shootout' and league == 'NHL'):
data.gls = int(num or 0)
if (group == 'league-standings' and type == 'games_lost_overtime' and league == 'NHL'):
data.glo = int(num or 0)
if (group == 'division-standings' and type == 'division_ranking'):
data.drank = int(num or 0)
if (group == 'conference-standings' and type == 'conference_ranking'):
data.lrank = int(num or 0)
if (group == 'home-league-standings' and type == 'games_won'):
data.hwon = int(num or 0)
if (group == 'home-league-standings' and type == 'games_lost'):
data.hlost = int(num or 0)
if (group == 'home-league-standings' and type == 'games_lost_shootout'):
data.hshot = int(num or 0)
if (group == 'home-league-standings' and type == 'games_lost_overtime'):
data.hover = int(num or 0)
if (group == 'home-league-standings' and type == 'games_tied' and league == 'MLS'):
data.hdraw = int(num or 0)
if (group == 'away-league-standings' and type == 'games_won'):
data.rwon = int(num or 0)
if (group == 'away-league-standings' and type == 'games_lost'):
data.rlost = int(num or 0)
if (group == 'away-league-standings' and type == 'games_lost_shootout'):
data.rshot = int(num or 0)
if (group == 'away-league-standings' and type == 'games_lost_overtime'):
data.rover = int(num or 0)
if (group == 'away-league-standings' and type == 'games_tied' and league == 'MLS'):
data.rdraw = int(num or 0)
if (group == 'conference-standings' and type == 'games_won' and league != 'MLS'):
data.cwon = int(num or 0)
if (group == 'conference-standings' and type == 'games_lost' and league != 'MLS'):
data.clost = int(num or 0)
if (group == 'division-standings' and type == 'games_won' and league != 'MLS'):
data.dwon = int(num or 0)
if (group == 'division-standings' and type == 'games_lost' and league != 'MLS'):
data.dlost = int(num or 0)
if (group == 'last-10-league-standings' and type == 'games_won' and league != 'MLS'):
data.l10won = int(num or 0)
if (group == 'last-10-league-standings' and type == 'games_lost' and league != 'MLS'):
data.l10lost = int(num or 0)
if (group == 'league-standings' and type == 'points_for' and (
league == 'NCAA BB' or league == 'NBA' or league == 'NCAA FB' or league == 'NFL')):
data.pf = float(num or 0)
if (group == 'league-standings' and type == 'points_against' and (
league == 'NCAA BB' or league == 'NBA' or league == 'NCAA FB' or league == 'NFL')):
data.pa = float(num or 0)
if (group == 'league-standings' and type == 'runs_for' and league == 'MLB'):
data.rf = int(num or 0)
if (group == 'league-standings' and type == 'runs_against' and league == 'MLB'):
data.ra = int(num or 0)
if (group == 'league-standings' and type == 'goals_for' and league == 'NHL'):
data.gf = int(num or 0)
if (group == 'league-standings' and type == 'goals_against' and league == 'NHL'):
data.ga = int(num or 0)
if (group == 'league-standings' and type == 'goals' and league == 'MLS'):
data.gf = int(num or 0)
if (group == 'league-standings' and type == 'goals_against' and league == 'MLS'):
data.ga = int(num or 0)
if (group == 'league-standings' and type == 'loss_streak'):
data.slost = int(num or 0)
if (group == 'league-standings' and type == 'win_streak'):
data.swon = int(num or 0)
data.team_id = team_id
data.season_feed_id = season_feed_id
data.conference_feed_id = conference_feed_id
data.division_feed_id = division_feed_id
data.team_feed_id = team_feed_id
return data
# Calculating the current datetime minus 1 day
d = datetime.today() - timedelta(days=1)
try:
conn = MySQLdb.connect("localhost", "", "", "")
c = conn.cursor()
if len(sys.argv) == 5:
season = sys.argv[4]
d = feedparser.parse("http://xml.sportsdirectinc.com/Atom?feed=/" + sport + "/" + screen + "/" + season)
else:
d = feedparser.parse(
"http://xml.sportsdirectinc.com/Atom?feed=/" + sport + "/" + screen + "&newerThan=" + d.strftime(
"%Y-%m-%dT%H:%M:%S"))
items = []
if len(d['entries']):
for item in d.entries:
if (item.link.find('/' + direct + '/') > 0):
if (item.link.find('/' + str(news) + '/') > 0 and news != False):
items.append(str(item.link))
else:
items.append(str(item.link))
items.reverse()
for item in items:
page = urllib2.urlopen(item)
soup = BeautifulStoneSoup(page)
for xseason in soup.findAll('season'):
season = Season()
season.feed_id = xseason.contents[0].string
season.name = xseason.contents[1].string
season.startdate = xseason.contents[2].contents[0].string
season.enddate = xseason.contents[2].contents[1].string
lid = get_league(c, league)
season.league_id = lid[0]
sid = get_season(c, season.feed_id)
if sid is None:
insert_season(c, season) # insert season
season_id = conn.insert_id()
else:
season_id = sid[0]
season_feed_id = season.feed_id
for xconferencecontent in soup.findAll('conference-content'):
conference_feed_id = xconferencecontent.contents[0].contents[0].string
division_feed_id = ''
xdiv = xconferencecontent.findAll('division-content')
if len(xdiv):
for xdivisioncontent in xdiv:
division_feed_id = xdivisioncontent.contents[0].contents[0].string
division = []
division.append(
prepare_division(xdivisioncontent, season_feed_id, conference_feed_id, division_feed_id))
data = str.join(',', prepare_data(division))
insert_standing(c, data)
else:
division = []
division.append(
prepare_division(xconferencecontent, season_feed_id, conference_feed_id, division_feed_id))
data = str.join(',', prepare_data(division, True))
insert_standing(c, data)
page.close()
conn.commit()
c.close()
conn.close()
except MySQLdb.Error, e:
print "Guru Meditation #%d: %s" % (e.args[0], e.args[1])
sys.exit(1)
| 2.78125 | 3 |
src/ytwrapper/resources/ChannelSectionResources.py | Robert-Phan/yt-wrapper | 0 | 12760462 | """Class representations of the `ChannelSection` resource."""
from dataclasses import dataclass
from .utils import ResponseResourceBase
@dataclass
class Snippet:
type: str = None
channel_id: str = None
title: str = None
position: int = None
@dataclass
class ContentDetails:
playlists: list[str] = None
channels: list[str] = None
@dataclass
class ChannelSectionResource(ResponseResourceBase):
id: str = None
content_details: ContentDetails = ContentDetails()
snippet: Snippet = Snippet()
@dataclass
class ChannelSectionListResponse(ResponseResourceBase):
items: list[ChannelSectionResource] = None | 2.375 | 2 |
src/researchhub_document/tasks.py | ResearchHub/ResearchHub-Backend-Open | 18 | 12760463 | from datetime import datetime, timedelta
from django_elasticsearch_dsl.registries import registry
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from django.apps import apps
from django.http.request import HttpRequest
from django.core.cache import cache
from rest_framework.request import Request
from researchhub_document.related_models.constants.document_type import (
DISCUSSION,
ELN,
PAPER,
POSTS,
ALL,
)
from paper.utils import get_cache_key
from researchhub.celery import app
from researchhub.settings import (
APP_ENV,
STAGING,
PRODUCTION,
)
@app.task
def preload_trending_documents(
document_type,
hub_id,
ordering,
time_difference
):
from researchhub_document.views import ResearchhubUnifiedDocumentViewSet
from researchhub_document.serializers import (
DynamicUnifiedDocumentSerializer
)
initial_date = datetime.now().replace(
hour=7,
minute=0,
second=0,
microsecond=0
)
end_date = datetime.now()
if time_difference > 365:
cache_pk = f'{document_type}_{hub_id}_{ordering}_all_time'
start_date = datetime(
year=2018,
month=12,
day=31,
hour=7
)
elif time_difference == 365:
cache_pk = f'{document_type}_{hub_id}_{ordering}_year'
start_date = initial_date - timedelta(days=365)
elif time_difference == 30 or time_difference == 31:
cache_pk = f'{document_type}_{hub_id}_{ordering}_month'
start_date = initial_date - timedelta(days=30)
elif time_difference == 7:
cache_pk = f'{document_type}_{hub_id}_{ordering}_week'
start_date = initial_date - timedelta(days=7)
else:
start_date = datetime.now().replace(
hour=7,
minute=0,
second=0,
microsecond=0
)
cache_pk = f'{document_type}_{hub_id}_{ordering}_today'
query_string_ordering = 'top_rated'
if ordering == 'removed':
query_string_ordering = 'removed'
elif ordering == '-score':
query_string_ordering = 'top_rated'
elif ordering == '-discussed':
query_string_ordering = 'most_discussed'
elif ordering == '-created_date':
query_string_ordering = 'newest'
elif ordering == '-hot_score':
query_string_ordering = 'hot'
request_path = '/api/paper/get_hub_papers/'
if STAGING:
http_host = 'staging-backend.researchhub.com'
protocol = 'https'
elif PRODUCTION:
http_host = 'backend.researchhub.com'
protocol = 'https'
else:
http_host = 'localhost:8000'
protocol = 'http'
start_date_timestamp = int(start_date.timestamp())
end_date_timestamp = int(end_date.timestamp())
query_string = 'page=1&start_date__gte={}&end_date__lte={}&ordering={}&hub_id={}&'.format(
start_date_timestamp,
end_date_timestamp,
query_string_ordering,
hub_id
)
http_meta = {
'QUERY_STRING': query_string,
'HTTP_HOST': http_host,
'HTTP_X_FORWARDED_PROTO': protocol,
}
document_view = ResearchhubUnifiedDocumentViewSet()
http_req = HttpRequest()
http_req.META = http_meta
http_req.path = request_path
req = Request(http_req)
document_view.request = req
documents = document_view.get_filtered_queryset(
document_type,
ordering,
hub_id,
start_date,
end_date
)
page = document_view.paginate_queryset(documents)
context = document_view._get_serializer_context()
serializer = DynamicUnifiedDocumentSerializer(
page,
_include_fields=[
'created_by',
'documents',
'document_type',
'hot_score',
'score'
],
many=True,
context=context,
)
serializer_data = serializer.data
paginated_response = document_view.get_paginated_response(
serializer_data
)
cache_key_hub = get_cache_key('hub', cache_pk)
cache.set(
cache_key_hub,
paginated_response.data,
timeout=None
)
return paginated_response.data
# Executes every 5 minutes
@periodic_task(
run_every=crontab(minute='*/5'),
priority=1,
options={'queue': f'{APP_ENV}_core_queue'}
)
def preload_hub_documents(
document_type=ALL.lower(),
hub_ids=None
):
from researchhub_document.views import ResearchhubUnifiedDocumentViewSet
from researchhub_document.serializers import (
DynamicUnifiedDocumentSerializer
)
Hub = apps.get_model('hub.Hub')
hubs = Hub.objects.all()
document_view = ResearchhubUnifiedDocumentViewSet()
if document_type == ALL.lower():
document_types = [PAPER, ELN, DISCUSSION]
elif document_type == POSTS.lower():
document_types = [ELN, DISCUSSION]
else:
document_types = [PAPER]
if hub_ids:
hubs = hubs.filter(id__in=hub_ids)
data = []
for hub in hubs.iterator():
hub_name = hub.slug
cache_pk = f'{document_type}_{hub_name}'
documents = hub.related_documents.get_queryset().filter(
document_type__in=document_types,
is_removed=False
).order_by(
'-hot_score'
)[:15]
cache_key = get_cache_key('documents', cache_pk)
context = document_view._get_serializer_context()
serializer = DynamicUnifiedDocumentSerializer(
documents,
_include_fields=[
'created_by',
'documents',
'document_type',
'hot_score',
'score'
],
many=True,
context=context
)
serializer_data = serializer.data
data.append(serializer_data)
cache.set(
cache_key,
serializer_data,
timeout=None
)
return data
@app.task
def update_elastic_registry(post):
registry.update(post)
| 1.96875 | 2 |
luminoth/tools/dataset/cli.py | PiterPentester/luminoth | 2 | 12760464 | <filename>luminoth/tools/dataset/cli.py<gh_stars>1-10
import click
from .transform import transform
@click.group(help='Groups of commands to manage datasets')
def dataset():
pass
dataset.add_command(transform)
| 1.3125 | 1 |
core/storage/question/gae_models.py | bching/oppia | 1 | 12760465 | # Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing the question data models."""
from core.platform import models
import utils
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class QuestionSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a question snapshot."""
pass
class QuestionSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a question snapshot."""
pass
class QuestionModel(base_models.VersionedModel):
"""Model for storing Questions.
The ID of instances of this class has the form
{{collection_id}}.{{random_hash_of_16_chars}}
"""
SNAPSHOT_METADATA_CLASS = QuestionSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = QuestionSnapshotContentModel
ALLOW_REVERT = True
# The title of the question.
title = ndb.StringProperty(required=True, indexed=True)
# A dict representing the question data.
question_data = ndb.JsonProperty(indexed=False)
# The schema version for the data.
question_data_schema_version = (
ndb.IntegerProperty(required=True, indexed=True))
# The ID of collection containing the question.
collection_id = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this question is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
@classmethod
def _get_new_id(cls, collection_id):
"""Generates a unique ID for the question of the form
{{collection_id}}.{{random_hash_of_16_chars}}
Args:
collection_id: str. The ID of collection containing the question.
Returns:
new_id: int. ID of the new QuestionModel instance.
Raises:
Exception: The ID generator for QuestionModel is
producing too many collisions.
"""
for _ in range(base_models.MAX_RETRIES):
new_id = '%s.%s' % (
collection_id,
utils.convert_to_hash(
str(utils.get_random_int(base_models.RAND_RANGE)),
base_models.ID_LENGTH))
if not cls.get_by_id(new_id):
return new_id
raise Exception(
'The id generator for QuestionModel is producing too many '
'collisions.')
@classmethod
def create(
cls, title, question_data, question_data_schema_version,
collection_id, language_code):
"""Creates a new QuestionModel entry.
Args:
title: str. The title of the question.
question_data: dict. A dict representing the question data.
question_data_schema_version: int. The schema version for the data.
collection_id: str. The ID of the collection containing the
question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
QuestionModel. Instance of the new QuestionModel entry.
Raises:
Exception: A model with the same ID already exists.
"""
instance_id = cls._get_new_id(collection_id)
question_model_instance = cls(
id=instance_id, title=title,
question_data=question_data,
question_data_schema_version=question_data_schema_version,
collection_id=collection_id,
language_code=language_code)
return question_model_instance
| 2.0625 | 2 |
cogs/carguai.py | appuchias/policia_nacional_bot | 0 | 12760466 | <gh_stars>0
from typing import Union
import discord
from discord.channel import TextChannel
from discord.ext import commands
import asyncio
from discord.ext.commands.core import command
class Carguai(commands.Cog):
def __init__(self, client) -> None:
self.client = client
@commands.command()
@commands.has_permissions(administrator=True)
async def di(
self,
ctx,
message_channel: Union[discord.TextChannel, int] = 571380940664995842,
*message: str,
) -> None:
channel = (
message_channel
if type(message_channel) == discord.TextChannel
else self.client.fetch_channel(message_channel)
)
message = '"*' + " ".join(message) + '*"'
await ctx.send(message)
msg = await ctx.send("Correcto? (Si no es correcto espera un minuto)")
await msg.add_reaction("👍")
def check(reaction, user):
return (
reaction.message.id == msg.id
and user.id == ctx.message.author.id
and str(reaction.emoji) == "👍"
)
try:
reaction, user = await self.client.wait_for("reaction_add", timeout=60.0, check=check)
except asyncio.TimeoutError:
await ctx.send("Tiempo agotado")
await msg.clear_reactions()
else:
await channel.send(message)
@commands.command()
@commands.has_permissions(administrator=True)
async def die(
self, ctx, message_channel: Union[discord.TextChannel, int], *message: str,
) -> None:
channel = (
message_channel
if type(message_channel) == discord.TextChannel
else self.client.fetch_channel(message_channel)
)
msg = [word.replace("\n", "llllll") for word in message]
embed = discord.Embed(
description=f'```{" ".join(msg)}```',
color=discord.Color.blurple(),
)
await channel.send(embed=embed)
def setup(client):
client.add_cog(Carguai(client))
| 2.5625 | 3 |
data/external/repositories/238397/ucla-cs145-kaggle-master/labelpropagationclassifier.py | Keesiu/meta-kaggle | 0 | 12760467 | """
labelpropagationclassifier.py
Builds a label propagation classifier
Takes a very long time, unknown accuracy/execution time
"""
from classifier import Classifier
from matrixdatabase import MatrixDatabase
from sklearn.semi_supervised import LabelPropagation
class LabelPropagationClassifier(Classifier):
def __init__(self, matrixdatabase):
self._matrix_database = matrixdatabase
self._has_fit = False
self._lbl = LabelPropagation()
def learn(self, ingredients, cuisine):
return
def classify(self, ingredients):
if not self._has_fit:
matrix, classes = self._matrix_database.make_train_matrix()
matrix = matrix.toarray()
self._lbl = self._lbl.fit(matrix, classes)
print 'Fitting complete...'
self._has_fit = True
output = self._lbl.predict(self._matrix_database.make_row_from_recipe(ingredients).toarray())
return output[0] | 2.828125 | 3 |
LeetCode/python/031-060/039-combination-sum-ii/solution.py | shootsoft/practice | 0 | 12760468 | class Solution:
# @param {integer[]} candidates
# @param {integer} target
# @return {integer[][]}
def combinationSum2(self, candidates, target):
self.results = []
candidates.sort()
self.combination(candidates, target, 0, [])
return self.results
def combination(self,candidates, target, start, result):
if target == 0 :
self.results.append(result[:])
elif target > 0:
for i in range(start, len(candidates)):
if i > start and candidates[i] == candidates[i-1]:
continue
result.append(candidates[i])
self.combination(candidates, target-candidates[i], i + 1, result)
result.pop() | 3.109375 | 3 |
13.range.py | shaunakganorkar/PythonMeetup-2014 | 0 | 12760469 | for i in range(0,100):
print i,
| 2.296875 | 2 |
DataConnectors/Trend Micro/AzureFunctionTrendMicroXDR/queue_trigger_oat_poison/__init__.py | ChonoN/Azure-Sentinel | 1 | 12760470 | <reponame>ChonoN/Azure-Sentinel<gh_stars>1-10
import azure.functions as func
import os
from azure.storage.queue import QueueClient, TextBase64EncodePolicy
def main(msg: func.QueueMessage) -> None:
queue_client = QueueClient.from_connection_string(
os.environ['AzureWebJobsStorage'],
'oat-queue',
message_encode_policy=TextBase64EncodePolicy(),
)
queue_client.send_message(msg.get_body().decode(), visibility_timeout=3600)
| 1.78125 | 2 |
flask_debug/__init__.py | mbr/Flask-Debug | 11 | 12760471 | from .dbg import dbg
from .security import requires_debug
class Debug(object):
def __init__(self, app=None):
import flask_debug_plugins
dbg._debug_load_plugins()
if app:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('FLASK_DEBUG_DISABLE_STRICT', False)
app.register_blueprint(dbg)
| 1.804688 | 2 |
model_load.py | m-bizhani/Digital-rock-image-processing | 0 | 12760472 | from tensorflow.keras.models import load_model
import tensorflow as tf
def PSNR(y_true, y_pred):
max_pixel = 1.0
return tf.image.psnr(y_true, y_pred, max_val =max_pixel)
def ssim(y_true, y_pred):
max_val = 1.0
return tf.image.ssim(y_true, y_pred, max_val = max_val, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
def mssim(y_true, y_pred):
max_val = 1.0
return tf.image.ssim_multiscale(
y_true, y_pred, max_val = max_val, filter_size=7,
filter_sigma=1.5, k1=0.01, k2=0.03)
def SSIM_loss():
def SSIMLoss(y_true, y_pred):
# l1 = tf.keras.losses.mean_absolute_error(y_true, y_pred)
ss = 1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, max_val = 1.0))
l2 = tf.keras.losses.mean_absolute_error(y_true, y_pred)
return ss + 0.1*l2
return SSIMLoss
def get_combined_models(path = './weights/', load_individual_models = False):
didn = load_model(path + 'DIDN_l1.h5',
custom_objects={'PSNR': PSNR, 'ssim':ssim, 'mssim': mssim, 'tf': tf} ,
compile=False)
for layer in didn.layers:
layer._name = layer._name + str("_1")
didn._name = 'didn'
mimo = load_model(path + 'MIMO_l1.h5',
custom_objects={'PSNR': PSNR, 'ssim':ssim, 'mssim': mssim, 'tf': tf} ,
compile=False)
mimo._name = 'mimo'
for layer in mimo.layers:
layer._name = layer._name + str("_2")
dfcan = load_model(path + 'DFCAN-ssim-l2.h5',
custom_objects={'PSNR': PSNR, 'ssim':ssim, 'mssim': mssim, 'tf': tf, 'SSIM_loss': SSIM_loss} ,
compile=False)
dfcan._name = 'dfcan'
for layer in dfcan.layers:
layer._name = layer._name + str("_3")
model = tf.keras.Sequential([didn, mimo, dfcan])
# model.trainable = False
if load_individual_models:
return didn, mimo, dfcan, model
else:
return model
if __name__ == '__main__':
model = get_combined_models()
print(model.summary())
| 2.359375 | 2 |
geeksw/fitting/convolution.py | guitargeek/geeksw | 2 | 12760473 | import numpy as np
def conv(f, g):
def h(x):
"""Input x has to be equidistant!
"""
# If the support of f or g extends outside x,
# we have to evaluate the functions also outside x
# to get the values of the convolution for all x.
n = len(x)
d = x[1] - x[0]
x_ext = np.concatenate([x[-n:] - n * d, x, x[:n] + n * d])
m = len(x_ext)
x_ext_tiled = np.tile(x_ext, (m, 1))
distance_matrix = x_ext_tiled - x_ext_tiled.T
res = np.sum(g(-distance_matrix) * np.tile(f(x_ext), (m, 1)), axis=1) * d
return res[n:-n]
return h
from scipy.signal import fftconvolve
def fconv(f, g):
def h(x):
"""Input x has to be equidistant!
"""
# Do some trickery to evaluate the convolution at the desired x-values.
n = len(x)
d = x[1] - x[0]
x_ext = np.concatenate([x[-n // 2 :] - n * d, x, x[: n // 2] + n * d])
res = fftconvolve(f(x_ext), g(x_ext), mode="full") * (x_ext[1] - x_ext[0])
return np.interp(x, x_ext * 2, res[::2])
return h
| 2.84375 | 3 |
tutoriales_basicos/scripts/laser.py | lfzarazuaa/LiderSeguidorA | 1 | 12760474 | #!/usr/bin/env python
import numpy as np
import rospy
import time
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped, PointStamped
from tutoriales_basicos.msg import Histogram
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Int16
from std_msgs.msg import Float32
BURGER_MAX_LIN_VEL = 0.22*.7
BURGER_MAX_ANG_VEL = 2.84
#import matplotlib.pyplot as plt
def min(c1,c2):
s = 4
return(np.amin([(c1 - c2),(c1 - c2 - s),(c1 - c2 + s)]))
class LaserSub:
def __init__(self):
self.sub_l_0 = rospy.Subscriber("/tb3_0/scan", LaserScan, self.scan_callback, queue_size=1)
self.pub_H = rospy.Publisher("/tb3_0/Histogram", Histogram, queue_size=10)
self.r = 0.3
self.s = 0.3
self.alfa = 4 #tamano del sector 4 grados.
self.a = 5
self.b = 1
self.H = np.zeros(90)
self.Hp = list()
#def steeringCallback(self,data):
# if self.Hp[int(data.steering/4)] < 1:
# twist = Twist()
# twist.linear.x = BURGER_MAX_LIN_VEL; twist.linear.y = 0.0; twist.linear.z = 0.0
# #print(twist.linear.x)
# twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = data.steering*Kp
# self.pub.publish(twist)
# else:
# for k in range(90):
# if self.Hp[k] < 1:
# if k == 0:
# gmin = 5*min(k,int(data.steering/4)) + 2*min(k,int(data.yaw/4))
# gpast = gmin
# orientation = k
# else:
# gmin = 5*min(k,int(data.steering/4)) + 2*min(k,int(data.yaw/4))
# if gmin < gpast:
# gpast = gmin
# orientation = k
def scan_callback(self,data):
# Guardar datos en histograma
#print(data.ranges)
self.H = np.zeros(90)#Crear vector de 90 elementos
size = np.size(data.ranges) #Obtiene el tamano de los datos (360)
for beta in range(size): #For hasta 360
#print(data.ranges[beta])
if data.ranges[beta] > 2: #Si la distancia es mayor a 2
d = 0 #d=0
#print(beta, d)
else:
d = data.ranges[beta] #Si no guarda la distancia
#print(beta, d)
k = int((beta)/self.alfa) # k_alfa es el sector actualmente en calculo
if beta<120 or (beta>240 and beta<360):
#if beta>(beta - np.arcsin((self.r + self.s)/d)) and beta<(beta + np.arcsin((self.r + self.s)/d)):
previus = self.H[k]
self.H[k]=(previus + (15*(self.a-self.b*d*d)))
msg_to_send = Histogram()
msg_to_send.Histogram = self.H
self.pub_H.publish(msg_to_send)
def main():
try:
rospy.init_node('LaseSub')
LaserSub() # constructor creates publishers / subscribers
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__=="__main__":
main() | 2.328125 | 2 |
migrations/versions/164efab664ba_add_abbreviations.py | edbeard/csrweb | 2 | 12760475 | """Add abbreviations
Revision ID: 164efab664ba
Revises: <KEY>
Create Date: 2016-03-14 01:27:28.956379
"""
# revision identifiers, used by Alembic.
revision = '164efab664ba'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('cde_job', sa.Column('abbreviations', postgresql.JSONB(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('cde_job', 'abbreviations')
### end Alembic commands ###
| 1.296875 | 1 |
basic/logger.py | realwrtoff/fastapi-demo | 1 | 12760476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
from types import FrameType
from typing import cast
from loguru import logger
from datetime import timedelta
class InterceptHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = str(record.levelno)
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__: # noqa: WPS609
frame = cast(FrameType, frame.f_back)
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage(),
)
# logging configuration
LOGGING_LEVEL = logging.INFO
LOGGERS = ("uvicorn.asgi", "uvicorn.access")
logging.getLogger().handlers = [InterceptHandler()]
for logger_name in LOGGERS:
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler(level=LOGGING_LEVEL)]
# sink可自主配置
logger.configure(
handlers=[
{"sink": sys.stdout, "level": LOGGING_LEVEL},
{"sink": './log/runtime.log', "level": LOGGING_LEVEL, "rotation": timedelta(hours=1)}]
)
| 2.40625 | 2 |
03/crypto.py | Neroec/crypto | 0 | 12760477 | from Cryptodome.Random import get_random_bytes
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHA256
from Cryptodome.Signature import PKCS1_v1_5
import rsa
FILES_DIR = 'files/'
STANDARD_FILE_PATH = f'{FILES_DIR}standard_file.txt'
ENCRYPTED_FILE_PATH = f'{FILES_DIR}encrypted_file.txt'
DECRYPTED_FILE_PATH = f'{FILES_DIR}decrypted_file.txt'
ENCRYPTED_SIGNATURE_PATH = f'{FILES_DIR}signature.txt'
SIGNATURES_DIR = 'signatures/'
def generate_session_key(size=16):
"""
Генерирует сессионный ключ
:param size: размер ключа
:return: сессионный ключ
"""
return get_random_bytes(size)
def encrypt_file(session_key, in_path=STANDARD_FILE_PATH, out_path=ENCRYPTED_FILE_PATH):
"""
Шифрует и сохраняет файл симметричным алгоритмом AES
:param session_key: сессионный ключ симметричного алгоритма
:param in_path: путь к шифруемому файлу
:param out_path: путь к зашифрованному файлу
:return: зашифрованное содержимое файла
"""
with open(in_path, 'rb') as input_file:
data = input_file.read()
with open(out_path, 'wb') as output_file:
cipher_aes = AES.new(session_key, AES.MODE_EAX)
output_file.write(cipher_aes.nonce)
encrypted_data = cipher_aes.encrypt(data)
output_file.write(encrypted_data)
return encrypted_data
def decrypt_file(session_key, in_path=ENCRYPTED_FILE_PATH, out_path=DECRYPTED_FILE_PATH):
"""
Расшифровывает и сохраняет файл симметричным алгоритмом AES
:param session_key: сессионный ключ симметричного алгоритма
:param in_path: путь к зашифрованному файлу
:param out_path: путь к расшифрованному файлу
:return: расшифрованное содержимое файла
"""
with open(in_path, 'rb') as input_file:
nonce, data = [input_file.read(size) for size in (16, -1)]
with open(out_path, 'wb') as output_file:
cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)
decrypted_data = cipher_aes.decrypt(data)
output_file.write(decrypted_data)
return decrypted_data
def sign_file_and_encrypt(private_key, public_key, in_path=STANDARD_FILE_PATH, out_path=ENCRYPTED_SIGNATURE_PATH):
"""
Подписывает файл и сохраняет зашифрованную подпись в файл
:param private_key: приватный ключ подписывающего
:param public_key: публичный ключ проверяющего
:param in_path: путь к подписываемому файлу
:param out_path: путь к зашифрованной подписи
:return: None
"""
signature = PKCS1_v1_5.new(private_key)
with open(in_path, 'rb') as input_file:
file_hash = SHA256.new(input_file.read())
signature = signature.sign(file_hash)
rsa.encrypt(signature, public_key, out_path)
return signature
def sign_file(private_key, in_path=STANDARD_FILE_PATH, out_path=ENCRYPTED_SIGNATURE_PATH):
"""
Подписывает файл и сохраняет подпись в файл
:param private_key: приватный ключ подписывающего
:param in_path: путь к подписываемому файлу
:param out_path: путь к зашифрованной подписи
:return: None
"""
signature = PKCS1_v1_5.new(private_key)
with open(in_path, 'rb') as input_file:
file_hash = SHA256.new(input_file.read())
signature = signature.sign(file_hash)
with open(out_path, 'wb') as output_file:
output_file.write(signature)
return signature
def verify_sign_and_decrypt(public_key, private_key, file_path=DECRYPTED_FILE_PATH, signature_path=ENCRYPTED_SIGNATURE_PATH):
"""
Расшифровывает и проверяет подпись для файла
:param public_key: публичный ключ подписывающего
:param private_key: приватный ключ проверяющего
:param file_path: путь к расшифрованному файлу
:param signature_path: путь к зашифрованной подписи
:return: True - если подпись верна, False - иначе
"""
with open(file_path, 'rb') as input_file:
file_hash = SHA256.new(input_file.read())
decrypted_signature = rsa.decrypt(private_key, signature_path)
signature = PKCS1_v1_5.new(public_key)
return signature.verify(file_hash, decrypted_signature)
def verify_sign(public_key, file_path=DECRYPTED_FILE_PATH, signature_path=ENCRYPTED_SIGNATURE_PATH):
"""
Проверяет подпись для файла
:param public_key: публичный ключ подписывающего
:param file_path: путь к расшифрованному файлу
:param signature_path: путь к зашифрованной подписи
:return: True - если подпись верна, False - иначе
"""
with open(file_path, 'rb') as input_file:
file_hash = SHA256.new(input_file.read())
signature = PKCS1_v1_5.new(public_key)
with open(signature_path, 'rb') as input_file:
sign = input_file.read()
return signature.verify(file_hash, sign)
| 2.890625 | 3 |
PythonExercicio/ex094.py | fotavio16/PycharmProjects | 0 | 12760478 | cadastro = list()
while True:
pessoa = dict()
pessoa['nome'] = str(input('Nome: '))
pessoa['sexo'] = str(input('Sexo: '))
pessoa['idade'] = int(input('Idade: '))
cadastro.append(pessoa.copy())
resp = str(input('Quer continuar? (S/N) '))
if resp in "Nn":
break
print("-="*30)
total = len(cadastro)
print(f'- O grupo tem {total} pessoas.')
mulheres = list()
soma = 0
for p in cadastro:
soma = soma + p['idade']
if p['sexo'] in "Ff":
mulheres.append(p['nome'])
media = soma / total
print(f'- A média de idade é de {media} anos.')
print(f'- As mulheres cadastradas foram: {mulheres}.')
print("- Lista das pessoas que estão acima da média:")
for p in cadastro:
if p['idade'] > media:
for k, v in p.items():
print(f'{k} = {v}; ', end='')
print()
print("<< ENCERRADO >>")
| 3.484375 | 3 |
metal_python/api/machine_api.py | metal-stack/metal-python | 7 | 12760479 | # coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from metal_python.api_client import ApiClient
class MachineApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def abort_reinstall_machine(self, id, body, **kwargs): # noqa: E501
"""abort reinstall this machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.abort_reinstall_machine(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineAbortReinstallRequest body: (required)
:return: V1BootInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.abort_reinstall_machine_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.abort_reinstall_machine_with_http_info(id, body, **kwargs) # noqa: E501
return data
def abort_reinstall_machine_with_http_info(self, id, body, **kwargs): # noqa: E501
"""abort reinstall this machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.abort_reinstall_machine_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineAbortReinstallRequest body: (required)
:return: V1BootInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method abort_reinstall_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `abort_reinstall_machine`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `abort_reinstall_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/abort-reinstall', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BootInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_provisioning_event(self, id, body, **kwargs): # noqa: E501
"""adds a machine provisioning event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_provisioning_event(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineProvisioningEvent body: (required)
:return: V1MachineRecentProvisioningEvents
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_provisioning_event_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.add_provisioning_event_with_http_info(id, body, **kwargs) # noqa: E501
return data
def add_provisioning_event_with_http_info(self, id, body, **kwargs): # noqa: E501
"""adds a machine provisioning event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_provisioning_event_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineProvisioningEvent body: (required)
:return: V1MachineRecentProvisioningEvents
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_provisioning_event" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `add_provisioning_event`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_provisioning_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/event', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineRecentProvisioningEvents', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def allocate_machine(self, body, **kwargs): # noqa: E501
"""allocate a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.allocate_machine(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineAllocateRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.allocate_machine_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.allocate_machine_with_http_info(body, **kwargs) # noqa: E501
return data
def allocate_machine_with_http_info(self, body, **kwargs): # noqa: E501
"""allocate a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.allocate_machine_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineAllocateRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method allocate_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `allocate_machine`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/allocate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def chassis_identify_led_off(self, id, body, **kwargs): # noqa: E501
"""sends a power-off to the chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.chassis_identify_led_off(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:param str description: reason why the chassis identify LED has been turned off
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.chassis_identify_led_off_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.chassis_identify_led_off_with_http_info(id, body, **kwargs) # noqa: E501
return data
def chassis_identify_led_off_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a power-off to the chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.chassis_identify_led_off_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:param str description: reason why the chassis identify LED has been turned off
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'description'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method chassis_identify_led_off" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `chassis_identify_led_off`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `chassis_identify_led_off`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'description' in params:
query_params.append(('description', params['description'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/chassis-identify-led-off', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def chassis_identify_led_on(self, id, body, **kwargs): # noqa: E501
"""sends a power-on to the chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.chassis_identify_led_on(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:param str description: identifier of the machine
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.chassis_identify_led_on_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.chassis_identify_led_on_with_http_info(id, body, **kwargs) # noqa: E501
return data
def chassis_identify_led_on_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a power-on to the chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.chassis_identify_led_on_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:param str description: identifier of the machine
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'description'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method chassis_identify_led_on" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `chassis_identify_led_on`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `chassis_identify_led_on`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'description' in params:
query_params.append(('description', params['description'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/chassis-identify-led-on', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_machine(self, id, **kwargs): # noqa: E501
"""deletes a machine from the database # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_machine(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_machine_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_machine_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_machine_with_http_info(self, id, **kwargs): # noqa: E501
"""deletes a machine from the database # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_machine_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def finalize_allocation(self, id, body, **kwargs): # noqa: E501
"""finalize the allocation of the machine by reconfiguring the switch, sent on successful image installation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.finalize_allocation(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineFinalizeAllocationRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.finalize_allocation_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.finalize_allocation_with_http_info(id, body, **kwargs) # noqa: E501
return data
def finalize_allocation_with_http_info(self, id, body, **kwargs): # noqa: E501
"""finalize the allocation of the machine by reconfiguring the switch, sent on successful image installation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.finalize_allocation_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineFinalizeAllocationRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method finalize_allocation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `finalize_allocation`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `finalize_allocation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/finalize-allocation', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_ipmi_machine(self, id, **kwargs): # noqa: E501
"""returns a machine including the ipmi connection data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_ipmi_machine(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineIPMIResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_ipmi_machine_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.find_ipmi_machine_with_http_info(id, **kwargs) # noqa: E501
return data
def find_ipmi_machine_with_http_info(self, id, **kwargs): # noqa: E501
"""returns a machine including the ipmi connection data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_ipmi_machine_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineIPMIResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_ipmi_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `find_ipmi_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/ipmi', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineIPMIResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_ipmi_machines(self, body, **kwargs): # noqa: E501
"""returns machines including the ipmi connection data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_ipmi_machines(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineFindRequest body: (required)
:return: list[V1MachineIPMIResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_ipmi_machines_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.find_ipmi_machines_with_http_info(body, **kwargs) # noqa: E501
return data
def find_ipmi_machines_with_http_info(self, body, **kwargs): # noqa: E501
"""returns machines including the ipmi connection data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_ipmi_machines_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineFindRequest body: (required)
:return: list[V1MachineIPMIResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_ipmi_machines" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `find_ipmi_machines`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/ipmi/find', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[V1MachineIPMIResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_machine(self, id, **kwargs): # noqa: E501
"""get machine by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_machine(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_machine_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.find_machine_with_http_info(id, **kwargs) # noqa: E501
return data
def find_machine_with_http_info(self, id, **kwargs): # noqa: E501
"""get machine by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_machine_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `find_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_machines(self, body, **kwargs): # noqa: E501
"""find machines by multiple criteria # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_machines(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineFindRequest body: (required)
:return: list[V1MachineResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_machines_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.find_machines_with_http_info(body, **kwargs) # noqa: E501
return data
def find_machines_with_http_info(self, body, **kwargs): # noqa: E501
"""find machines by multiple criteria # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_machines_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineFindRequest body: (required)
:return: list[V1MachineResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_machines" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `find_machines`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/find', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[V1MachineResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def free_machine(self, id, **kwargs): # noqa: E501
"""free a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.free_machine(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.free_machine_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.free_machine_with_http_info(id, **kwargs) # noqa: E501
return data
def free_machine_with_http_info(self, id, **kwargs): # noqa: E501
"""free a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.free_machine_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method free_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `free_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/free', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_machine_console_password(self, body, **kwargs): # noqa: E501
"""get consolepassword for machine by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_machine_console_password(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineConsolePasswordRequest body: (required)
:return: V1MachineConsolePasswordResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_machine_console_password_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.get_machine_console_password_with_http_info(body, **kwargs) # noqa: E501
return data
def get_machine_console_password_with_http_info(self, body, **kwargs): # noqa: E501
"""get consolepassword for machine by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_machine_console_password_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineConsolePasswordRequest body: (required)
:return: V1MachineConsolePasswordResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_machine_console_password" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `get_machine_console_password`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/consolepassword', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineConsolePasswordResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_provisioning_event_container(self, id, **kwargs): # noqa: E501
"""get the current machine provisioning event container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_provisioning_event_container(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineRecentProvisioningEvents
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_provisioning_event_container_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_provisioning_event_container_with_http_info(id, **kwargs) # noqa: E501
return data
def get_provisioning_event_container_with_http_info(self, id, **kwargs): # noqa: E501
"""get the current machine provisioning event container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_provisioning_event_container_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:return: V1MachineRecentProvisioningEvents
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_provisioning_event_container" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_provisioning_event_container`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/event', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineRecentProvisioningEvents', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def ipmi_report(self, body, **kwargs): # noqa: E501
"""reports IPMI ip addresses leased by a management server for machines # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ipmi_report(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineIpmiReports body: (required)
:return: V1MachineIpmiReportResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.ipmi_report_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.ipmi_report_with_http_info(body, **kwargs) # noqa: E501
return data
def ipmi_report_with_http_info(self, body, **kwargs): # noqa: E501
"""reports IPMI ip addresses leased by a management server for machines # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ipmi_report_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineIpmiReports body: (required)
:return: V1MachineIpmiReportResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method ipmi_report" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `ipmi_report`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/ipmi', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineIpmiReportResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machines(self, **kwargs): # noqa: E501
"""get all known machines # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machines(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[V1MachineResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_machines_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_machines_with_http_info(**kwargs) # noqa: E501
return data
def list_machines_with_http_info(self, **kwargs): # noqa: E501
"""get all known machines # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machines_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[V1MachineResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machines" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[V1MachineResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_bios(self, id, body, **kwargs): # noqa: E501
"""boots machine into BIOS # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_bios(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_bios_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_bios_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_bios_with_http_info(self, id, body, **kwargs): # noqa: E501
"""boots machine into BIOS # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_bios_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_bios" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_bios`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_bios`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/bios', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_cycle(self, id, body, **kwargs): # noqa: E501
"""sends a power cycle to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_cycle(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_cycle_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_cycle_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_cycle_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a power cycle to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_cycle_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_cycle" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_cycle`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_cycle`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/cycle', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_disk(self, id, body, **kwargs): # noqa: E501
"""boots machine from disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_disk(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_disk_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_disk_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_disk_with_http_info(self, id, body, **kwargs): # noqa: E501
"""boots machine from disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_disk_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_disk" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_disk`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_disk`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/disk', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_off(self, id, body, **kwargs): # noqa: E501
"""sends a power-off to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_off(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_off_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_off_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_off_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a power-off to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_off_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_off" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_off`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_off`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/off', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_on(self, id, body, **kwargs): # noqa: E501
"""sends a power-on to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_on(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_on_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_on_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_on_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a power-on to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_on_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_on" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_on`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_on`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/on', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_pxe(self, id, body, **kwargs): # noqa: E501
"""boots machine from PXE # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_pxe(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_pxe_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_pxe_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_pxe_with_http_info(self, id, body, **kwargs): # noqa: E501
"""boots machine from PXE # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_pxe_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_pxe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_pxe`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_pxe`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/pxe', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def machine_reset(self, id, body, **kwargs): # noqa: E501
"""sends a reset to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_reset(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.machine_reset_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.machine_reset_with_http_info(id, body, **kwargs) # noqa: E501
return data
def machine_reset_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a reset to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.machine_reset_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1EmptyBody body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method machine_reset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `machine_reset`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `machine_reset`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/power/reset', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def register_machine(self, body, **kwargs): # noqa: E501
"""register a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_machine(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineRegisterRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_machine_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.register_machine_with_http_info(body, **kwargs) # noqa: E501
return data
def register_machine_with_http_info(self, body, **kwargs): # noqa: E501
"""register a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_machine_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1MachineRegisterRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `register_machine`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/register', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reinstall_machine(self, id, body, **kwargs): # noqa: E501
"""reinstall this machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reinstall_machine(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineReinstallRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reinstall_machine_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.reinstall_machine_with_http_info(id, body, **kwargs) # noqa: E501
return data
def reinstall_machine_with_http_info(self, id, body, **kwargs): # noqa: E501
"""reinstall this machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reinstall_machine_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineReinstallRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reinstall_machine" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `reinstall_machine`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `reinstall_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/reinstall', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_chassis_identify_led_state(self, id, body, **kwargs): # noqa: E501
"""set the state of a chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_chassis_identify_led_state(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1ChassisIdentifyLEDState body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_chassis_identify_led_state_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.set_chassis_identify_led_state_with_http_info(id, body, **kwargs) # noqa: E501
return data
def set_chassis_identify_led_state_with_http_info(self, id, body, **kwargs): # noqa: E501
"""set the state of a chassis identify LED # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_chassis_identify_led_state_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1ChassisIdentifyLEDState body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_chassis_identify_led_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `set_chassis_identify_led_state`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `set_chassis_identify_led_state`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/chassis-identify-led-state', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_machine_state(self, id, body, **kwargs): # noqa: E501
"""set the state of a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_machine_state(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineState body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_machine_state_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.set_machine_state_with_http_info(id, body, **kwargs) # noqa: E501
return data
def set_machine_state_with_http_info(self, id, body, **kwargs): # noqa: E501
"""set the state of a machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_machine_state_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineState body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_machine_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `set_machine_state`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `set_machine_state`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/{id}/state', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_firmware(self, id, body, **kwargs): # noqa: E501
"""sends a firmware command to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_firmware(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineUpdateFirmwareRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_firmware_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.update_firmware_with_http_info(id, body, **kwargs) # noqa: E501
return data
def update_firmware_with_http_info(self, id, body, **kwargs): # noqa: E501
"""sends a firmware command to the machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_firmware_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the machine (required)
:param V1MachineUpdateFirmwareRequest body: (required)
:return: V1MachineResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_firmware" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_firmware`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_firmware`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/machine/update-firmware/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1MachineResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1.96875 | 2 |
tensorflow/lite/micro/examples/magic_wand/train/data_prepare.py | katherinekowalski/tensorflow | 0 | 12760480 | # Lint as: python3
# coding=utf-8
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepare data for further process.
Read data from "/slope", "/ring", "/wing", "/negative" and save them
in "/data/complete_data" in python dict format.
It will generate a new file with the following structure:
├── data
│ └── complete_data
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import os
import random
LABEL_NAME = "gesture"
DATA_NAME = "accel_ms2_xyz"
folders = ["A","B","N","O","backspace","space","done"]
# folders = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z",
# "apostrophe","backspace","comma","done","exclamation_point", "period","question_mark","quotes","slash","space"]
names = [
"lauren","katherine","annie", "chris","hallie"
]
def prepare_original_data(folder, name, data, file_to_read): # pylint: disable=redefined-outer-name
"""Read collected data from files."""
if folder != "negative":
with open(file_to_read, "r") as f:
lines = csv.reader(f)
data_new = {}
data_new[LABEL_NAME] = folder
data_new[DATA_NAME] = []
data_new["name"] = name
for idx, line in enumerate(lines): # pylint: disable=unused-variable,redefined-outer-name
if len(line) == 3:
if line[2] == "-":
data.append(data_new)
data_new = {}
data_new[LABEL_NAME] = folder
data_new[DATA_NAME] = [] # this should store (300,3) data recording
data_new["name"] = name
elif line[2] != "-":
data_new[DATA_NAME].append([float(line[i]) if i < 2 else (float(line[i]) -.98) for i in range(3)])
# data_new[DATA_NAME].append([float(i) for i in line[0:3]])
data.append(data_new)
else:
with open(file_to_read, "r") as f:
lines = csv.reader(f)
data_new = {}
data_new[LABEL_NAME] = folder
data_new[DATA_NAME] = []
data_new["name"] = name
for idx, line in enumerate(lines):
if len(line) == 3 and line[2] != "-":
if len(data_new[DATA_NAME]) == 300:
data.append(data_new)
data_new = {}
data_new[LABEL_NAME] = folder
data_new[DATA_NAME] = []
data_new["name"] = name
else:
data_new[DATA_NAME].append([float(i) for i in line[0:3]])
data.append(data_new)
def generate_negative_data(data): # pylint: disable=redefined-outer-name
"""Generate negative data labeled as 'negative6~8'."""
# Big movement -> around straight line
samp = 300
for i in range(100):
if i > 80:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative8"}
elif i > 60:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative7"}
else:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative6"}
start_x = (random.random() - 0.5) * 2000
start_y = (random.random() - 0.5) * 2000
start_z = (random.random() - 0.5) * 2000
x_increase = (random.random() - 0.5) * 10
y_increase = (random.random() - 0.5) * 10
z_increase = (random.random() - 0.5) * 10
for j in range(samp):
dic[DATA_NAME].append([
start_x + j * x_increase + (random.random() - 0.5) * 6,
start_y + j * y_increase + (random.random() - 0.5) * 6,
start_z + j * z_increase + (random.random() - 0.5) * 6
])
data.append(dic)
# Random
for i in range(100):
if i > 80:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative8"}
elif i > 60:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative7"}
else:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative6"}
for j in range(samp):
dic[DATA_NAME].append([(random.random() - 0.5) * 1000,
(random.random() - 0.5) * 1000,
(random.random() - 0.5) * 1000])
data.append(dic)
# Stay still
for i in range(100):
if i > 80:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative8"}
elif i > 60:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative7"}
else:
dic = {DATA_NAME: [], LABEL_NAME: "negative", "name": "negative6"}
start_x = (random.random() - 0.5) * 2000
start_y = (random.random() - 0.5) * 2000
start_z = (random.random() - 0.5) * 2000
for j in range(samp):
dic[DATA_NAME].append([
start_x + (random.random() - 0.5) * 40,
start_y + (random.random() - 0.5) * 40,
start_z + (random.random() - 0.5) * 40
])
data.append(dic)
# Write data to file
def write_data(data_to_write, path):
with open(path, "w") as f:
for idx, item in enumerate(data_to_write): # pylint: disable=unused-variable,redefined-outer-name
dic = json.dumps(item, ensure_ascii=False)
f.write(dic)
f.write("\n")
if __name__ == "__main__":
data = [] # pylint: disable=redefined-outer-name
for idx1, folder in enumerate(folders):
for idx2, name in enumerate(names):
path = "./data/"+folder+"/output_"+folder+"_"+ name + ".txt"
print(path)
prepare_original_data(folder, name, data, path)
# for idx in range(3): ##############THIS IS HOW MANY NEG FILES WE HAVE##############################
# prepare_original_data("negative", "negative_"+name , data, #% (idx + 1)
# "./data/negative/output_negative_"+name+".txt")# % (idx + 1)) #% (idx + 1) #"C:/Users/kathe/Documents/GitHub/tensorflow/tensorflow/lite/micro/examples/magic_wand/train
# # generate_negative_data(data)
print("data_length: " + str(len(data)))
if not os.path.exists("./data"):
os.makedirs("./data")
write_data(data, "./data/complete_data")
| 2.21875 | 2 |
PT2019/TransitAssignment/lp.py | hkujy/PT42185 | 2 | 12760481 | <reponame>hkujy/PT42185
"""
This is for creating the Lp Problem
*LP is prerequsit of the course*
LP Model
min CX
Subject
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x<=ub
Accordingly, it contains four functions
a. find matrix C
b. find matrix A_ub, A_eq, vector b_ub, b_eq
c. find vector lb, ub for
d. solve the model
Remarks:
1. if you are using other package or method to solve LP, i.e., CPLEX, there are different ways (probably simple) to add constraints and build the model.
Nevertheless, as long as you know how to create matrix A, B, C, you should be able to solve general LP problem.
2. One important step is to ensure the coefficient is corresponding to the decsion variables
3. Dimension of C = Dimension of x = number of decision variables = number of links + number of nodes
4. Dimension of b_eq = number of nodes: flow conservation constraints at each node
5. Dimension of b_ub = number of links : flow distribution constraint
6. Dimension of A_ub = number of links * [number of links + number of nodes]
6. In our problem, there is 10 links. so variables index [0] - [9] represent the 10 link decsion variable.
7. There are 6 nodes, so variables index [10] - [16] denote the waiting time variables at each node
"""
from graph import graph_class
from scipy.optimize import linprog
def get_vector_C(g:graph_class):
"""
It contiains two part
1. The first part is the coefficient for link variables
2. The second part is the coefficient for node variables, which is 1
"""
C = []
for l in g.links:
C.append(l.cost)
for n in g.nodes:
C.append(1)
return C
def get_A_ub_and_B_ub(g:graph_class):
"""
v_{a}*<=f*w_{i} is tranformed to v_{a} - f*w_{i} <=0
v_{a} >=0
-v_{a}<=0
1. the first coefficient is for link {a}
2. the secont coefficient is for node {i}, which is the tail node of link i
"""
# demension of B_ub = number of links
B_ub = [0]*(2*len(g.links))
A_ub = []
for l in range(0,2*len(g.links)):
A_ub.append([0]*(len(g.links) + len(g.nodes)))
# set coefficients following the sequence of links
row = 0
for l in g.links:
v_var_index = l.id
w_var_index = l.tail_node[0].id + len(g.links)
f_coefficient = l.lines[0].fre
A_ub[row][v_var_index] = 1
A_ub[row][w_var_index] = -1*f_coefficient
B_ub[row] = 0
row = row + 1
for l in g.links:
v_var_index = l.id
A_ub[row][v_var_index] = -1
B_ub[row] = 0
row = row + 1
return A_ub, B_ub
def get_A_eq_and_B_eq(g:graph_class):
"""
sum(out link flow) - sum(in link flow) = demand generated at each node i
"""
# create matrix
A_eq = []
B_eq = [0]*len(g.nodes)
for n in range(0, len(g.nodes)):
A_eq.append([0]*(len(g.links) + len(g.nodes)))
# build matrix following the sequence of links
row = 0
for n in g.nodes:
for l in n.in_links:
in_link_var_index = l.id
A_eq[row][in_link_var_index] = -1
for l in n.out_links:
out_link_var_index = l.id
A_eq[row][out_link_var_index] = 1
if n.name =="A":
B_eq[row] = 1
elif n.name =="B":
# This is important, I forgot this first time
B_eq[row] = -1
else:
B_eq[row] = 0
row = row + 1
return A_eq, B_eq
pass
def get_lb_and_ub(g:graph_class):
"""
get the upper and lower bound value for x value
for simplicity, I set the lower bound to be 0 and upper bound to 100
the lower and upper bound value could set tight
"""
lb = [0] * (len(g.links)+len(g.nodes))
ub = [100] * (len(g.links) +len(g.nodes))
bounds =[]
for i in range(0, len(lb)):
bounds.append((lb[i],ub[i]))
# print(bounds)
return lb, ub, bounds
pass
def model(network:graph_class):
"""
create a, b, c matrix for the LP problem
"""
C = get_vector_C(network)
(A_ub, B_ub) = get_A_ub_and_B_ub(network)
(A_eq, B_eq) = get_A_eq_and_B_eq(network)
(lb, ub, bounds) = get_lb_and_ub(network)
# solve lp using linprog function
res = linprog(C,A_ub=A_ub,b_ub=B_ub,A_eq=A_eq,b_eq=B_eq, bounds=bounds)
# print output in the files
with open("lp_results.txt",'w+') as f:
"""
ouput lp results in the file
"""
print("Objective = {0:4.2f}".format(res.fun), file = f)
for l in network.links:
print("Link {0}, Flow v_a = {1:4.2f}".format(l.name, res.x[l.id]), file=f)
for n in network.nodes:
print("Node {0}, Wait w_i = {1:4.2f}".format(n.name, res.x[len(network.links)+n.id]),file=f)
# print(res)
| 3.1875 | 3 |
layer23/python2/testutils/TestUtilsL23.py | xenadevel/xenascriptlibs | 7 | 12760482 | import os, sys, time, threading, inspect
from SocketDrivers import SimpleSocket
LOGFILE = "XENALOG"
RESET = " p_reset"
RESERVATION = " p_reservation ?"
RESERVE = " p_reservation reserve"
RELINQUISH = " p_reservation relinquish"
RELEASE = " p_reservation release"
TRAFFIC_ON = " p_traffic on"
TRAFFIC_OFF = " p_traffic off"
COMMENT_START = ';'
def errexit(msg):
print "Error: " + msg + ", exiting..."
sys.exit(1)
## Keepalive thread to ensure tcp connection is kept open
# do not edit this
class KeepAliveThread(threading.Thread):
message = ''
def __init__(self, connection, interval = 10):
threading.Thread.__init__(self)
self.connection = connection
self.interval = interval
self.finished = threading.Event()
self.setDaemon(True)
print '[KeepAliveThread] Thread initiated, interval %d seconds' % (self.interval)
def stop (self):
self.finished.set()
self.join()
def run (self):
while not self.finished.isSet():
self.finished.wait(self.interval)
self.connection.Ask(self.message)
## Low level driver for TCPIP based queried
# do not edit this
class XenaSocketDriver(SimpleSocket):
def __init__(self, hostname, port = 22611):
SimpleSocket.__init__(self, hostname = hostname, port = port)
SimpleSocket.set_keepalives(self)
self.access_semaphor = threading.Semaphore(1)
def SendCommand(self, cmd):
self.access_semaphor.acquire()
print "Sending command: " + cmd
SimpleSocket.SendCommand(self, cmd)
self.access_semaphor.release()
def Ask(self, cmd):
self.access_semaphor.acquire()
reply = SimpleSocket.Ask(self, cmd).strip('\n')
self.access_semaphor.release()
return reply
## Xena supplied class example for Scripting via Python
# feel free to add functions below
#
class XenaScriptTools:
def __init__(self, ip):
self.ip = ip
self.debug = 0
self.halt = 0
self.log = 0
self.cmds = []
self.logf = os.environ.get(LOGFILE)
if self.logf != None:
self.log = 1
self.driver= XenaSocketDriver(ip)
def __del__(self):
if self.log:
lf = open(self.logf, 'w')
for cmd in self.cmds:
lf.write(cmd + "\n")
lf.close()
return
## Enable debug - prints commands and errors
def debugOn(self):
self.debug = 1
return
## Disable debug (default) - no printed output
def debugOff(self):
self.debug = 0
return
def debugMsg(self, msg):
if self.debug == 1:
print msg
def logCmd(self, cmd):
if self.log == 1:
self.cmds.append(cmd)
## Enable halt on error - calls sys.exit(1) upon error
def haltOn(self):
self.halt = 1
return
## Disable halt on error (default)
def haltOff(self):
self.halt = 0
return
## Print diagnostics msg and halt
def errexit(self, msg):
if self.halt == 1:
print
print "Error: " + msg + ", exiting..."
print
sys.exit(1)
###############################################
## Send and Expect primitives
###############################################
## Send command and return response
def Send(self, cmd):
res = self.driver.Ask(cmd)
self.debugMsg("Send() : " + cmd)
self.debugMsg("Send() received: " + res)
self.logCmd(cmd)
return res
## Send multiple commands and return all responses
def SendMulti(self, cmdlist):
cmd = ''
num = len(cmdlist)
for i in range (0,num):
cmd = cmd + cmdlist[i] + '\n'
self.debugMsg("Send() : " + cmd)
res = self.driver.AskMulti(cmd, num)
self.debugMsg("Send() received: " + str(res))
self.logCmd(cmd)
return res
## Send command and expect response (typically <OK>)
def SendExpect(self, cmd, resp):
self.debugMsg("SendExpect("+resp+"): " + cmd)
self.logCmd(cmd)
res = self.driver.Ask(cmd)
if res.rstrip('\n') == resp:
return True;
else:
self.debugMsg("SendExpect() failed")
self.debugMsg(" Expected: " + resp)
self.debugMsg(" Received: " + res)
self.errexit("Halting in line %d" % (inspect.currentframe().f_back.f_lineno))
return False
## Send commands and expect <OK>
def SendExpectOK(self, cmd):
return self.SendExpect(cmd, "<OK>")
## Send command and match response with specified string
def SendAndMatch(self, cmd, str):
self.debugMsg("SendAndMatch() : " + cmd)
self.logCmd(cmd)
res = self.driver.Ask(cmd)
if res.find(str) != -1:
return True
else:
self.debugMsg("SendAndMatch() failed")
self.debugMsg(" Expected: " + str)
self.debugMsg(" Got : " + res)
self.errexit("Halting in line %d" % (inspect.currentframe().f_back.f_lineno))
return False
###############################################
## Xena Scripting API specific commands
###############################################
##############################
# Chassis and Logon Commands
##############################
## Logon
def Logon(self, pwd):
self.SendExpectOK("c_logon \"" + pwd + "\"")
## Logon and set owner
def LogonSetOwner(self, pwd, owner):
self.Logon(pwd)
self.SendExpectOK("c_owner \"" + owner + "\"")
## Logon to chassis, set user name and password, then reserve ports
def LogonAndReserve(self, ports, pwd, owner):
if type(ports) == type(str()):
ports = [ports]
self.LogonSetOwner(pwd, owner)
self.PortReserve(ports)
# Reserve chassis, release or relinquish first if necessary
def ChassisReserve(self):
self.ChassisRelease()
self.SendExpectOK("C_RESERVATION reserve")
# Reserve chassis, release or relinquish first if necessary
def ChassisRelease(self):
res = self.Send("C_RESERVATION ?").split()[1]
if res.find("RESERVED_BY_YOU") != -1:
self.debugMsg("Chassis is reserved by me - release")
self.SendExpectOK("C_RESERVATION release")
elif res.find("RESERVED_BY_OTHER") != -1:
self.debugMsg("Chassis is reserved by other - relinquish")
self.SendExpectOK("C_RESERVATION relinquish")
elif res.find("RELEASED") != -1:
self.debugMsg("Chassis is released - do nothing")
else:
self.errexit("Halting in line %d" % (inspect.currentframe().f_back.f_lineno))
##############################
# Misc Commands
##############################
def Comment(self, text):
self.Send("; ######################################")
self.Send("; " + text)
self.Send("; ######################################")
##############################
# Port Commands
##############################
## Reserve a port - if port is reserved, release or relinquish, then reserve
def PortReserve(self, ports):
if type(ports) == type(str()):
ports = [ports]
for port in ports:
res = self.Send(port + RESERVATION)
if res.find("RESERVED_BY_OTHER") != -1:
self.debugMsg("Port " + port + " is reserved by other - relinquish")
self.SendExpectOK(port + RELINQUISH)
self.SendExpectOK(port + RESERVE)
elif res.find("RESERVED_BY_YOU") != -1:
self.debugMsg("Port " + port + " is reserved by me - do nothing")
else:
self.SendExpectOK(port + RESERVE)
def PortRelease(self, ports):
if type(ports) == type(str()):
ports = [ports]
for port in ports:
res = self.Send(port + RESERVATION)
if res.find("RESERVED_BY_YOU") != -1:
self.SendExpectOK(port + RELEASE)
def PortRelinquish(self, ports):
if type(ports) == type(str()):
ports = [ports]
for port in ports:
res = self.Send(port + RESERVATION)
if res.find("RESERVED_BY_OTHER") != -1:
self.SendExpectOK(port + RELINQUISH)
## Start traffic on ports
def PortTrafficStart(self, ports):
if type(ports) == type(str()):
ports = [ports]
for port in ports:
res = self.SendExpectOK(port + TRAFFIC_ON)
## Stop traffic on ports
def PortTrafficStop(self, ports):
if type(ports) == type(str()):
ports = [ports]
for port in ports:
res = self.SendExpectOK(port + TRAFFIC_OFF)
def get_module_port_prefix(self, moduleIndex, portIndex):
return "%d/%d" % (moduleIndex, portIndex)
def load_script(self, filename, moduleIndex, portIndex, israwlines=False):
module_port_prefix = self.get_module_port_prefix(moduleIndex, portIndex)
self.PortReserve(module_port_prefix)
if not israwlines:
self.driver.SendCommand(module_port_prefix)
line_number = 0;
send_count = 0;
for line in open(filename, 'r'):
command = line.strip('\n')
line_number += 1
if command.startswith(COMMENT_START):
continue
success = self.SendExpectOK(command.strip('\n'))
if not success:
print '[XenaManager] Error in script at line: %d, [%s]' % (line_number, command)
print '[XenaManager] Load interrupted!'
return
send_count += 1
if send_count % 100 == 0:
print "\r[XenaManager] (Sent %d commands ...)" % send_count,
print "\r[XenaManager] Script '%s' (%d commands) loaded succesfully." % (filename, send_count)
| 2.5625 | 3 |
launcher/api/python/notifier/drivers/__init__.py | davidvoler/ate_meteor | 0 | 12760483 | __author__ = 'davidl'
| 1.070313 | 1 |
simulation/p10033.py | sajjadt/competitive-programming | 10 | 12760484 |
def interpret(mem):
pc = 0
reg = [0]*10
ins_commited = 0
halted = False
while not halted:
ir = mem[pc]
opcode = ir[0]
op1 = int(ir[1])
op2 = int(ir[2])
ins_commited += 1
pc = ( pc + 1 ) % 1000
if opcode == "1": # Halt
halted = True
elif opcode == "2": # Load imm
reg[op1] = op2
elif opcode == "3": # Add I
reg[op1] += op2
reg[op1] %= 1000
elif opcode == "4": # Mul I
reg[op1] *= op2
reg[op1] %= 1000
elif opcode == "5": # Move R
reg[op1] = reg[op2]
elif opcode == "6": # Add R
reg[op1] += reg[op2]
reg[op1] %= 1000
elif opcode == "7": # Mul r
reg[op1] *= reg[op2]
reg[op1] %= 1000
elif opcode == "8": # Load
reg[op1] = int(mem[reg[op2]])
elif opcode == "9": # Store
v = str(reg[op1]).zfill(3)
mem[reg[op2]] = v
elif opcode == "0": # Jump
if reg[op2] != 0:
pc = reg[op1]
else:
raise ValueError("unexpected instrution")
return ins_commited
num_cases = int(input())
input()
from sys import stdin, stdout
for c in range(num_cases):
mem = ["000"] * 1001
line = stdin.readline().strip()
i = 0
while line != "":
mem[i] = line
i += 1
line = stdin.readline().strip()
stdout.write("{}\n".format(interpret(mem)))
if c < num_cases - 1:
stdout.write("\n") | 3.078125 | 3 |
stDrosophila/__init__.py | chen-zhan/stDrosophila-release-1 | 0 | 12760485 | <filename>stDrosophila/__init__.py
"""
Toolkit for analyzing Drosophila spatial transcriptome data.
"""
from . import envs
from . import io
from . import od
from . import pl
from . import pp
from . import tl
| 0.75 | 1 |
address.py | asumit499/Python-BootCamp | 4 | 12760486 | print("Address-\nMuzaffarpur\nBihar\nIndia")
| 1.335938 | 1 |
arcana/repository/interfaces.py | apoz00003/arcana | 0 | 12760487 | <reponame>apoz00003/arcana
from arcana.utils import ExitStack
from nipype.interfaces.base import (
traits, DynamicTraitedSpec, Undefined, File, Directory,
BaseInterface, isdefined)
from itertools import chain
from copy import copy
from arcana.utils import PATH_SUFFIX, FIELD_SUFFIX, CHECKSUM_SUFFIX
from arcana.pipeline.provenance import Record
from arcana.exceptions import ArcanaError, ArcanaDesignError
import logging
logger = logging.getLogger('arcana')
PATH_TRAIT = traits.Either(File(exists=True), Directory(exists=True))
FIELD_TRAIT = traits.Either(traits.Int, traits.Float, traits.Str,
traits.List(traits.Int), traits.List(traits.Float),
traits.List(traits.Str))
CHECKSUM_TRAIT = traits.Dict(traits.Str(), traits.Str())
# Trait for checksums that may be joined over iterators
JOINED_CHECKSUM_TRAIT = traits.Either(
CHECKSUM_TRAIT, traits.List(CHECKSUM_TRAIT),
traits.List(traits.List(CHECKSUM_TRAIT)))
class RepositoryInterface(BaseInterface):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage
"""
def __init__(self, collections):
super(RepositoryInterface, self).__init__()
# Protect against iterators
collections = list(collections)
# Check for consistent frequencies in collections
frequencies = set(c.frequency for c in collections)
if len(frequencies) > 1:
raise ArcanaError(
"Attempting to sink multiple frequencies across collections {}"
.format(', '.join(str(c) for c in collections)))
elif frequencies:
# NB: Exclude very rare case where pipeline doesn't have inputs,
# would only really happen in unittests
self._frequency = next(iter(frequencies))
# Extract set of repositories used to source/sink from/to
self.repositories = set(chain(*(
(i.repository for i in c if i.repository is not None)
for c in collections)))
# Segregate into fileset and field collections
self.fileset_collections = [c for c in collections if c.is_fileset]
self.field_collections = [c for c in collections if c.is_field]
def __eq__(self, other):
try:
return (
self.fileset_collections == other.fileset_collections and
self.field_collections == other.field_collections)
except AttributeError:
return False
def __repr__(self):
return "{}(filesets={}, fields={})".format(
type(self).__name__, self.fileset_collections,
self.field_collections)
def __ne__(self, other):
return not self == other
def _run_interface(self, runtime, *args, **kwargs):
return runtime
@property
def collections(self):
return chain(self.fileset_collections, self.field_collections)
@property
def frequency(self):
return self._frequency
@classmethod
def _add_trait(cls, spec, name, trait_type):
spec.add_trait(name, trait_type)
spec.trait_set(trait_change_notify=False, **{name: Undefined})
# Access the trait (not sure why but this is done in add_traits
# so I have also done it here
getattr(spec, name)
@classmethod
def field_trait(cls, field):
if field.array:
trait = traits.List(field.dtype)
else:
trait = field.dtype
return trait
class RepositorySpec(DynamicTraitedSpec):
"""
Base class for input and output specifications for repository source
and sink interfaces
"""
subject_id = traits.Str(desc="The subject ID")
visit_id = traits.Str(desc="The visit ID")
class RepositorySourceInputSpec(RepositorySpec):
"""
Input specification for repository source interfaces.
"""
prereqs = traits.List(
desc=("A list of lists of iterator IDs used in prerequisite pipelines."
" Only passed here to ensure that prerequisites are processed "
"before this source is run (so that their outputs exist in the "
"repository)"))
class RepositorySource(RepositoryInterface):
"""
Parameters
----------
filesets: list
List of all filesets to be extracted from the repository
fields: list
List of all the fields that are to be extracted from the repository
"""
input_spec = RepositorySourceInputSpec
output_spec = RepositorySpec
_always_run = True
def _outputs(self):
outputs = super(RepositorySource, self)._outputs()
# Add traits for filesets to source and their checksums
for fileset_collection in self.fileset_collections:
self._add_trait(outputs,
fileset_collection.name + PATH_SUFFIX, PATH_TRAIT)
self._add_trait(outputs,
fileset_collection.name + CHECKSUM_SUFFIX,
CHECKSUM_TRAIT)
# Add traits for fields to source
for field_collection in self.field_collections:
self._add_trait(outputs,
field_collection.name + FIELD_SUFFIX,
self.field_trait(field_collection))
return outputs
def _list_outputs(self):
# Directory that holds session-specific
outputs = self.output_spec().get()
subject_id = (self.inputs.subject_id
if isdefined(self.inputs.subject_id) else None)
visit_id = (self.inputs.visit_id
if isdefined(self.inputs.visit_id) else None)
outputs['subject_id'] = self.inputs.subject_id
outputs['visit_id'] = self.inputs.visit_id
# Source filesets
with ExitStack() as stack:
# Connect to set of repositories that the collections come from
for repository in self.repositories:
stack.enter_context(repository)
for fileset_collection in self.fileset_collections:
fileset = fileset_collection.item(subject_id, visit_id)
fileset.get()
outputs[fileset_collection.name + PATH_SUFFIX] = fileset.path
outputs[fileset_collection.name +
CHECKSUM_SUFFIX] = fileset.checksums
for field_collection in self.field_collections:
field = field_collection.item(subject_id, visit_id)
field.get()
outputs[field_collection.name + FIELD_SUFFIX] = field.value
return outputs
class RepositorySinkOutputSpec(DynamicTraitedSpec):
checksums = traits.Either(
traits.Dict, FIELD_TRAIT,
desc=("Provenance information sinked with files and fields. Note that"
"at this stage it is only used as something to connect to the "
"\"deiterators\" and eventually the \"final\" node after the "
"pipeline outputs have been sunk"))
class RepositorySink(RepositoryInterface):
"""
Interface used to sink derivatives into the output repository
Parameters
----------
collections : *Collection
The collections of Field and Fileset objects to insert into the
outputs repositor(y|ies)
pipeline : arcana.pipeline.Pipeline
The pipeline that has produced the outputs to sink
required : list[str]
Names of derivatives that are required by downstream nodes. Any
undefined required derivatives that are undefined will raise an error.
"""
input_spec = RepositorySpec
output_spec = RepositorySinkOutputSpec
def __init__(self, collections, pipeline, required=()):
super(RepositorySink, self).__init__(collections)
# Add traits for filesets to sink
for fileset_collection in self.fileset_collections:
self._add_trait(self.inputs,
fileset_collection.name + PATH_SUFFIX,
PATH_TRAIT)
# Add traits for fields to sink
for field_collection in self.field_collections:
self._add_trait(self.inputs,
field_collection.name + FIELD_SUFFIX,
self.field_trait(field_collection))
# Add traits for checksums/values of pipeline inputs
self._pipeline_input_filesets = []
self._pipeline_input_fields = []
for inpt in pipeline.inputs:
if inpt.is_fileset:
trait_t = JOINED_CHECKSUM_TRAIT
else:
trait_t = self.field_trait(inpt)
trait_t = traits.Either(trait_t, traits.List(trait_t),
traits.List(traits.List(trait_t)))
self._add_trait(self.inputs, inpt.checksum_suffixed_name, trait_t)
if inpt.is_fileset:
self._pipeline_input_filesets.append(inpt.name)
elif inpt.is_field:
self._pipeline_input_fields.append(inpt.name)
else:
assert False
self._prov = pipeline.prov
self._pipeline_name = pipeline.name
self._from_study = pipeline.study.name
self._required = required
def _list_outputs(self):
outputs = self.output_spec().get()
# Connect iterables (i.e. subject_id and visit_id)
subject_id = (self.inputs.subject_id
if isdefined(self.inputs.subject_id) else None)
visit_id = (self.inputs.visit_id
if isdefined(self.inputs.visit_id) else None)
missing_inputs = []
# Collate input checksums into a dictionary
input_checksums = {n: getattr(self.inputs, n + CHECKSUM_SUFFIX)
for n in self._pipeline_input_filesets}
input_checksums.update({n: getattr(self.inputs, n + FIELD_SUFFIX)
for n in self._pipeline_input_fields})
output_checksums = {}
with ExitStack() as stack:
# Connect to set of repositories that the collections come from
for repository in self.repositories:
stack.enter_context(repository)
for fileset_collection in self.fileset_collections:
fileset = fileset_collection.item(
subject_id,
visit_id)
path = getattr(self.inputs,
fileset_collection.name + PATH_SUFFIX)
if not isdefined(path):
if fileset.name in self._required:
missing_inputs.append(fileset.name)
continue # skip the upload for this fileset
fileset.path = path # Push to repository
output_checksums[fileset.name] = fileset.checksums
for field_collection in self.field_collections:
field = field_collection.item(
subject_id,
visit_id)
value = getattr(self.inputs,
field_collection.name + FIELD_SUFFIX)
if not isdefined(value):
if field.name in self._required:
missing_inputs.append(field.name)
continue # skip the upload for this field
field.value = value # Push to repository
output_checksums[field.name] = field.value
# Add input and output checksums to provenance record and sink to
# all repositories that have received data (typically only one)
prov = copy(self._prov)
prov['inputs'] = input_checksums
prov['outputs'] = output_checksums
record = Record(self._pipeline_name, self.frequency, subject_id,
visit_id, self._from_study, prov)
for repository in self.repositories:
repository.put_record(record)
if missing_inputs:
raise ArcanaDesignError(
"Required derivatives '{}' to were not created by upstream "
"nodes connected to sink {}".format(
"', '".join(missing_inputs), self))
# Return cache file paths
outputs['checksums'] = output_checksums
return outputs
| 2.171875 | 2 |
packages/syft/tests/syft/lib/torch/tensor/grad_test.py | jackbandy/PySyft | 0 | 12760488 | <reponame>jackbandy/PySyft<filename>packages/syft/tests/syft/lib/torch/tensor/grad_test.py
# third party
# third party
import torch
# syft absolute
import syft as sy
def torch_grad_test(client: sy.VirtualMachineClient) -> None:
x = client.torch.Tensor([[1, 1], [1, 1]])
x.requires_grad = True
gt = client.torch.Tensor([[1, 1], [1, 1]]) * 16 - 0.5
loss_fn = client.torch.nn.MSELoss()
v = x + 2
y = v**2
loss = loss_fn(y, gt)
loss.backward()
assert x.grad.get().equal(torch.Tensor([[-19.5, -19.5], [-19.5, -19.5]]))
assert x.data.get().equal(torch.Tensor([[1, 1], [1, 1]]))
| 1.992188 | 2 |
cw/__main__.py | aarondewindt/cw | 1 | 12760489 | <filename>cw/__main__.py
import argparse
from cw.generate_cython_pyi import Pyx2PyiCLI
from cw.mp.main import MPCLI
def main():
parser = argparse.ArgumentParser(
prog="cw",
description="CW command line tools.",
)
subparsers = parser.add_subparsers()
Pyx2PyiCLI.configure_arg_parser(subparsers)
MPCLI.configure_arg_parser(subparsers)
args = parser.parse_args()
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == '__main__':
main()
| 2.421875 | 2 |
base/file_utils.py | siddiquims/bioinformatics-learning | 0 | 12760490 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
file_utils module to hold simple bioinformatics course text file parsing class
"""
INPUT_STRING = 'Input'
OUTPUT_STRING = 'Output'
class FileUtil(object):
"""
Holds I/O values parsed from course text files for example problems
Initialized with a text file, parses 'Input' and 'Output' values to
object attributes.
Args:
filename (str): filename pointing to supplied text file
Attributes:
inputs (list): inputs from supplied text file
outputs (list): expected outputs from supplied text file
"""
def __init__(self, filename):
self.parse_file(filename)
def parse_file(self, filename):
"""
Helper function parses course text files and returns the list
of inputs and, if provided, the list of expected outputs
If no input string is found all items before output string are
returned to inputs, if neither input string nor output string is
found then all items are returned to input string.
Space delimited strings are separated.
Args:
filename (str): filename pointing to supplied text file
Returns:
inputs (list): inputs from supplied text file
outputs (list): expected outputs from supplied text file
"""
with open(filename, 'r') as f:
raw_text = f.read()
raw_args = [s for i in raw_text.splitlines() for s in i.split(' ')]
try:
input_index = raw_args.index(INPUT_STRING)
except ValueError:
input_index = -1
try:
output_index = raw_args.index(OUTPUT_STRING)
except ValueError:
output_index = len(raw_args)
self.inputs = raw_args[input_index+1:output_index]
self.outputs = raw_args[output_index+1:]
| 3.515625 | 4 |
scripts/make_users.py | vsoch/CogatPheno | 0 | 12760491 | from django.contrib.auth.models import User
from userroles.models import set_user_role
from userroles import roles
# Read in some file with usersnames, emails, etc.
username = "tmp"
email = "<EMAIL>"
password = "<PASSWORD>"
user = User.objects.create_user(username, email, password)
# At this point, user is a User object that has already been saved
# to the database. You can continue to change its attributes
# if you want to change other fields.
# Now set user role
set_user_role(user, roles.question_editor) # roles.question_editor
# roles.assessment_editor
# roles.behavior_editor
| 2.46875 | 2 |
wflow/openda_bmi/openda/bmi/thrift/BMIService.py | quanpands/wflow | 0 | 12760492 | <gh_stars>0
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(object):
def initialize(self, file):
"""
Parameters:
- file
"""
def update(self,):
pass
def update_until(self, time):
"""
Parameters:
- time
"""
def update_frac(self, frac):
"""
Parameters:
- frac
"""
def finalize_model(self,):
pass
def get_component_name(self,):
pass
def get_input_var_names(self,):
pass
def get_output_var_names(self,):
pass
def get_var_type(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_var_units(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_var_rank(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_var_size(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_var_nbytes(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_start_time(self,):
pass
def get_current_time(self,):
pass
def get_end_time(self,):
pass
def get_time_step(self,):
pass
def get_time_units(self,):
pass
def get_value(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_value_at_indices(self, long_var_name, inds):
"""
Parameters:
- long_var_name
- inds
"""
def set_value(self, long_var_name, src):
"""
Parameters:
- long_var_name
- src
"""
def set_value_at_indices(self, long_var_name, inds, src):
"""
Parameters:
- long_var_name
- inds
- src
"""
def get_grid_type(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_shape(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_spacing(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_origin(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_x(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_y(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_z(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_connectivity(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def get_grid_offset(self, long_var_name):
"""
Parameters:
- long_var_name
"""
def initialize_config(self, file):
"""
Parameters:
- file
"""
def initialize_model(self,):
pass
def set_start_time(self, start_time):
"""
Parameters:
- start_time
"""
def set_end_time(self, end_time):
"""
Parameters:
- end_time
"""
def get_attribute_names(self,):
pass
def get_attribute_value(self, attribute_name):
"""
Parameters:
- attribute_name
"""
def set_attribute_value(self, attribute_name, attribute_value):
"""
Parameters:
- attribute_name
- attribute_value
"""
def save_state(self, destination_directory):
"""
Parameters:
- destination_directory
"""
def load_state(self, source_directory):
"""
Parameters:
- source_directory
"""
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def initialize(self, file):
"""
Parameters:
- file
"""
self.send_initialize(file)
self.recv_initialize()
def send_initialize(self, file):
self._oprot.writeMessageBegin("initialize", TMessageType.CALL, self._seqid)
args = initialize_args()
args.file = file
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_initialize(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = initialize_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def update(self,):
self.send_update()
self.recv_update()
def send_update(self,):
self._oprot.writeMessageBegin("update", TMessageType.CALL, self._seqid)
args = update_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_update(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = update_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def update_until(self, time):
"""
Parameters:
- time
"""
self.send_update_until(time)
self.recv_update_until()
def send_update_until(self, time):
self._oprot.writeMessageBegin("update_until", TMessageType.CALL, self._seqid)
args = update_until_args()
args.time = time
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_update_until(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = update_until_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def update_frac(self, frac):
"""
Parameters:
- frac
"""
self.send_update_frac(frac)
self.recv_update_frac()
def send_update_frac(self, frac):
self._oprot.writeMessageBegin("update_frac", TMessageType.CALL, self._seqid)
args = update_frac_args()
args.frac = frac
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_update_frac(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = update_frac_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def finalize_model(self,):
self.send_finalize_model()
self.recv_finalize_model()
def send_finalize_model(self,):
self._oprot.writeMessageBegin("finalize_model", TMessageType.CALL, self._seqid)
args = finalize_model_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finalize_model(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = finalize_model_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def get_component_name(self,):
self.send_get_component_name()
return self.recv_get_component_name()
def send_get_component_name(self,):
self._oprot.writeMessageBegin(
"get_component_name", TMessageType.CALL, self._seqid
)
args = get_component_name_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_component_name(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_component_name_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_component_name failed: unknown result",
)
def get_input_var_names(self,):
self.send_get_input_var_names()
return self.recv_get_input_var_names()
def send_get_input_var_names(self,):
self._oprot.writeMessageBegin(
"get_input_var_names", TMessageType.CALL, self._seqid
)
args = get_input_var_names_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_input_var_names(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_input_var_names_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_input_var_names failed: unknown result",
)
def get_output_var_names(self,):
self.send_get_output_var_names()
return self.recv_get_output_var_names()
def send_get_output_var_names(self,):
self._oprot.writeMessageBegin(
"get_output_var_names", TMessageType.CALL, self._seqid
)
args = get_output_var_names_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_output_var_names(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_output_var_names_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_output_var_names failed: unknown result",
)
def get_var_type(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_var_type(long_var_name)
return self.recv_get_var_type()
def send_get_var_type(self, long_var_name):
self._oprot.writeMessageBegin("get_var_type", TMessageType.CALL, self._seqid)
args = get_var_type_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_var_type(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_var_type_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_var_type failed: unknown result"
)
def get_var_units(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_var_units(long_var_name)
return self.recv_get_var_units()
def send_get_var_units(self, long_var_name):
self._oprot.writeMessageBegin("get_var_units", TMessageType.CALL, self._seqid)
args = get_var_units_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_var_units(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_var_units_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_var_units failed: unknown result"
)
def get_var_rank(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_var_rank(long_var_name)
return self.recv_get_var_rank()
def send_get_var_rank(self, long_var_name):
self._oprot.writeMessageBegin("get_var_rank", TMessageType.CALL, self._seqid)
args = get_var_rank_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_var_rank(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_var_rank_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_var_rank failed: unknown result"
)
def get_var_size(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_var_size(long_var_name)
return self.recv_get_var_size()
def send_get_var_size(self, long_var_name):
self._oprot.writeMessageBegin("get_var_size", TMessageType.CALL, self._seqid)
args = get_var_size_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_var_size(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_var_size_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_var_size failed: unknown result"
)
def get_var_nbytes(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_var_nbytes(long_var_name)
return self.recv_get_var_nbytes()
def send_get_var_nbytes(self, long_var_name):
self._oprot.writeMessageBegin("get_var_nbytes", TMessageType.CALL, self._seqid)
args = get_var_nbytes_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_var_nbytes(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_var_nbytes_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_var_nbytes failed: unknown result",
)
def get_start_time(self,):
self.send_get_start_time()
return self.recv_get_start_time()
def send_get_start_time(self,):
self._oprot.writeMessageBegin("get_start_time", TMessageType.CALL, self._seqid)
args = get_start_time_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_start_time(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_start_time_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_start_time failed: unknown result",
)
def get_current_time(self,):
self.send_get_current_time()
return self.recv_get_current_time()
def send_get_current_time(self,):
self._oprot.writeMessageBegin(
"get_current_time", TMessageType.CALL, self._seqid
)
args = get_current_time_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_current_time(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_current_time_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_current_time failed: unknown result",
)
def get_end_time(self,):
self.send_get_end_time()
return self.recv_get_end_time()
def send_get_end_time(self,):
self._oprot.writeMessageBegin("get_end_time", TMessageType.CALL, self._seqid)
args = get_end_time_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_end_time(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_end_time_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_end_time failed: unknown result"
)
def get_time_step(self,):
self.send_get_time_step()
return self.recv_get_time_step()
def send_get_time_step(self,):
self._oprot.writeMessageBegin("get_time_step", TMessageType.CALL, self._seqid)
args = get_time_step_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_time_step(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_time_step_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_time_step failed: unknown result"
)
def get_time_units(self,):
self.send_get_time_units()
return self.recv_get_time_units()
def send_get_time_units(self,):
self._oprot.writeMessageBegin("get_time_units", TMessageType.CALL, self._seqid)
args = get_time_units_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_time_units(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_time_units_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_time_units failed: unknown result",
)
def get_value(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_value(long_var_name)
return self.recv_get_value()
def send_get_value(self, long_var_name):
self._oprot.writeMessageBegin("get_value", TMessageType.CALL, self._seqid)
args = get_value_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_value(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_value_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_value failed: unknown result"
)
def get_value_at_indices(self, long_var_name, inds):
"""
Parameters:
- long_var_name
- inds
"""
self.send_get_value_at_indices(long_var_name, inds)
return self.recv_get_value_at_indices()
def send_get_value_at_indices(self, long_var_name, inds):
self._oprot.writeMessageBegin(
"get_value_at_indices", TMessageType.CALL, self._seqid
)
args = get_value_at_indices_args()
args.long_var_name = long_var_name
args.inds = inds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_value_at_indices(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_value_at_indices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_value_at_indices failed: unknown result",
)
def set_value(self, long_var_name, src):
"""
Parameters:
- long_var_name
- src
"""
self.send_set_value(long_var_name, src)
self.recv_set_value()
def send_set_value(self, long_var_name, src):
self._oprot.writeMessageBegin("set_value", TMessageType.CALL, self._seqid)
args = set_value_args()
args.long_var_name = long_var_name
args.src = src
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_value(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_value_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def set_value_at_indices(self, long_var_name, inds, src):
"""
Parameters:
- long_var_name
- inds
- src
"""
self.send_set_value_at_indices(long_var_name, inds, src)
self.recv_set_value_at_indices()
def send_set_value_at_indices(self, long_var_name, inds, src):
self._oprot.writeMessageBegin(
"set_value_at_indices", TMessageType.CALL, self._seqid
)
args = set_value_at_indices_args()
args.long_var_name = long_var_name
args.inds = inds
args.src = src
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_value_at_indices(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_value_at_indices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def get_grid_type(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_type(long_var_name)
return self.recv_get_grid_type()
def send_get_grid_type(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_type", TMessageType.CALL, self._seqid)
args = get_grid_type_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_type(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_type_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_grid_type failed: unknown result"
)
def get_grid_shape(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_shape(long_var_name)
return self.recv_get_grid_shape()
def send_get_grid_shape(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_shape", TMessageType.CALL, self._seqid)
args = get_grid_shape_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_shape(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_shape_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_grid_shape failed: unknown result",
)
def get_grid_spacing(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_spacing(long_var_name)
return self.recv_get_grid_spacing()
def send_get_grid_spacing(self, long_var_name):
self._oprot.writeMessageBegin(
"get_grid_spacing", TMessageType.CALL, self._seqid
)
args = get_grid_spacing_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_spacing(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_spacing_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_grid_spacing failed: unknown result",
)
def get_grid_origin(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_origin(long_var_name)
return self.recv_get_grid_origin()
def send_get_grid_origin(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_origin", TMessageType.CALL, self._seqid)
args = get_grid_origin_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_origin(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_origin_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_grid_origin failed: unknown result",
)
def get_grid_x(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_x(long_var_name)
return self.recv_get_grid_x()
def send_get_grid_x(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_x", TMessageType.CALL, self._seqid)
args = get_grid_x_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_x(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_x_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_grid_x failed: unknown result"
)
def get_grid_y(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_y(long_var_name)
return self.recv_get_grid_y()
def send_get_grid_y(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_y", TMessageType.CALL, self._seqid)
args = get_grid_y_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_y(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_y_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_grid_y failed: unknown result"
)
def get_grid_z(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_z(long_var_name)
return self.recv_get_grid_z()
def send_get_grid_z(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_z", TMessageType.CALL, self._seqid)
args = get_grid_z_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_z(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_z_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT, "get_grid_z failed: unknown result"
)
def get_grid_connectivity(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_connectivity(long_var_name)
return self.recv_get_grid_connectivity()
def send_get_grid_connectivity(self, long_var_name):
self._oprot.writeMessageBegin(
"get_grid_connectivity", TMessageType.CALL, self._seqid
)
args = get_grid_connectivity_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_connectivity(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_connectivity_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_grid_connectivity failed: unknown result",
)
def get_grid_offset(self, long_var_name):
"""
Parameters:
- long_var_name
"""
self.send_get_grid_offset(long_var_name)
return self.recv_get_grid_offset()
def send_get_grid_offset(self, long_var_name):
self._oprot.writeMessageBegin("get_grid_offset", TMessageType.CALL, self._seqid)
args = get_grid_offset_args()
args.long_var_name = long_var_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_grid_offset(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_grid_offset_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_grid_offset failed: unknown result",
)
def initialize_config(self, file):
"""
Parameters:
- file
"""
self.send_initialize_config(file)
self.recv_initialize_config()
def send_initialize_config(self, file):
self._oprot.writeMessageBegin(
"initialize_config", TMessageType.CALL, self._seqid
)
args = initialize_config_args()
args.file = file
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_initialize_config(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = initialize_config_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def initialize_model(self,):
self.send_initialize_model()
self.recv_initialize_model()
def send_initialize_model(self,):
self._oprot.writeMessageBegin(
"initialize_model", TMessageType.CALL, self._seqid
)
args = initialize_model_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_initialize_model(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = initialize_model_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def set_start_time(self, start_time):
"""
Parameters:
- start_time
"""
self.send_set_start_time(start_time)
self.recv_set_start_time()
def send_set_start_time(self, start_time):
self._oprot.writeMessageBegin("set_start_time", TMessageType.CALL, self._seqid)
args = set_start_time_args()
args.start_time = start_time
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_start_time(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_start_time_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def set_end_time(self, end_time):
"""
Parameters:
- end_time
"""
self.send_set_end_time(end_time)
self.recv_set_end_time()
def send_set_end_time(self, end_time):
self._oprot.writeMessageBegin("set_end_time", TMessageType.CALL, self._seqid)
args = set_end_time_args()
args.end_time = end_time
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_end_time(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_end_time_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def get_attribute_names(self,):
self.send_get_attribute_names()
return self.recv_get_attribute_names()
def send_get_attribute_names(self,):
self._oprot.writeMessageBegin(
"get_attribute_names", TMessageType.CALL, self._seqid
)
args = get_attribute_names_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_attribute_names(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_attribute_names_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_attribute_names failed: unknown result",
)
def get_attribute_value(self, attribute_name):
"""
Parameters:
- attribute_name
"""
self.send_get_attribute_value(attribute_name)
return self.recv_get_attribute_value()
def send_get_attribute_value(self, attribute_name):
self._oprot.writeMessageBegin(
"get_attribute_value", TMessageType.CALL, self._seqid
)
args = get_attribute_value_args()
args.attribute_name = attribute_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_attribute_value(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_attribute_value_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(
TApplicationException.MISSING_RESULT,
"get_attribute_value failed: unknown result",
)
def set_attribute_value(self, attribute_name, attribute_value):
"""
Parameters:
- attribute_name
- attribute_value
"""
self.send_set_attribute_value(attribute_name, attribute_value)
self.recv_set_attribute_value()
def send_set_attribute_value(self, attribute_name, attribute_value):
self._oprot.writeMessageBegin(
"set_attribute_value", TMessageType.CALL, self._seqid
)
args = set_attribute_value_args()
args.attribute_name = attribute_name
args.attribute_value = attribute_value
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_attribute_value(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = set_attribute_value_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def save_state(self, destination_directory):
"""
Parameters:
- destination_directory
"""
self.send_save_state(destination_directory)
self.recv_save_state()
def send_save_state(self, destination_directory):
self._oprot.writeMessageBegin("save_state", TMessageType.CALL, self._seqid)
args = save_state_args()
args.destination_directory = destination_directory
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_save_state(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = save_state_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
def load_state(self, source_directory):
"""
Parameters:
- source_directory
"""
self.send_load_state(source_directory)
self.recv_load_state()
def send_load_state(self, source_directory):
self._oprot.writeMessageBegin("load_state", TMessageType.CALL, self._seqid)
args = load_state_args()
args.source_directory = source_directory
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_load_state(self,):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = load_state_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.error is not None:
raise result.error
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["initialize"] = Processor.process_initialize
self._processMap["update"] = Processor.process_update
self._processMap["update_until"] = Processor.process_update_until
self._processMap["update_frac"] = Processor.process_update_frac
self._processMap["finalize_model"] = Processor.process_finalize_model
self._processMap["get_component_name"] = Processor.process_get_component_name
self._processMap["get_input_var_names"] = Processor.process_get_input_var_names
self._processMap[
"get_output_var_names"
] = Processor.process_get_output_var_names
self._processMap["get_var_type"] = Processor.process_get_var_type
self._processMap["get_var_units"] = Processor.process_get_var_units
self._processMap["get_var_rank"] = Processor.process_get_var_rank
self._processMap["get_var_size"] = Processor.process_get_var_size
self._processMap["get_var_nbytes"] = Processor.process_get_var_nbytes
self._processMap["get_start_time"] = Processor.process_get_start_time
self._processMap["get_current_time"] = Processor.process_get_current_time
self._processMap["get_end_time"] = Processor.process_get_end_time
self._processMap["get_time_step"] = Processor.process_get_time_step
self._processMap["get_time_units"] = Processor.process_get_time_units
self._processMap["get_value"] = Processor.process_get_value
self._processMap[
"get_value_at_indices"
] = Processor.process_get_value_at_indices
self._processMap["set_value"] = Processor.process_set_value
self._processMap[
"set_value_at_indices"
] = Processor.process_set_value_at_indices
self._processMap["get_grid_type"] = Processor.process_get_grid_type
self._processMap["get_grid_shape"] = Processor.process_get_grid_shape
self._processMap["get_grid_spacing"] = Processor.process_get_grid_spacing
self._processMap["get_grid_origin"] = Processor.process_get_grid_origin
self._processMap["get_grid_x"] = Processor.process_get_grid_x
self._processMap["get_grid_y"] = Processor.process_get_grid_y
self._processMap["get_grid_z"] = Processor.process_get_grid_z
self._processMap[
"get_grid_connectivity"
] = Processor.process_get_grid_connectivity
self._processMap["get_grid_offset"] = Processor.process_get_grid_offset
self._processMap["initialize_config"] = Processor.process_initialize_config
self._processMap["initialize_model"] = Processor.process_initialize_model
self._processMap["set_start_time"] = Processor.process_set_start_time
self._processMap["set_end_time"] = Processor.process_set_end_time
self._processMap["get_attribute_names"] = Processor.process_get_attribute_names
self._processMap["get_attribute_value"] = Processor.process_get_attribute_value
self._processMap["set_attribute_value"] = Processor.process_set_attribute_value
self._processMap["save_state"] = Processor.process_save_state
self._processMap["load_state"] = Processor.process_load_state
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(
TApplicationException.UNKNOWN_METHOD, "Unknown function %s" % (name)
)
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_initialize(self, seqid, iprot, oprot):
args = initialize_args()
args.read(iprot)
iprot.readMessageEnd()
result = initialize_result()
try:
self._handler.initialize(args.file)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("initialize", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_update(self, seqid, iprot, oprot):
args = update_args()
args.read(iprot)
iprot.readMessageEnd()
result = update_result()
try:
self._handler.update()
except ModelException as error:
result.error = error
oprot.writeMessageBegin("update", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_update_until(self, seqid, iprot, oprot):
args = update_until_args()
args.read(iprot)
iprot.readMessageEnd()
result = update_until_result()
try:
self._handler.update_until(args.time)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("update_until", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_update_frac(self, seqid, iprot, oprot):
args = update_frac_args()
args.read(iprot)
iprot.readMessageEnd()
result = update_frac_result()
try:
self._handler.update_frac(args.frac)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("update_frac", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_finalize_model(self, seqid, iprot, oprot):
args = finalize_model_args()
args.read(iprot)
iprot.readMessageEnd()
result = finalize_model_result()
try:
self._handler.finalize_model()
except ModelException as error:
result.error = error
oprot.writeMessageBegin("finalize_model", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_component_name(self, seqid, iprot, oprot):
args = get_component_name_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_component_name_result()
try:
result.success = self._handler.get_component_name()
except ModelException as error:
result.error = error
oprot.writeMessageBegin("get_component_name", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_input_var_names(self, seqid, iprot, oprot):
args = get_input_var_names_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_input_var_names_result()
result.success = self._handler.get_input_var_names()
oprot.writeMessageBegin("get_input_var_names", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_output_var_names(self, seqid, iprot, oprot):
args = get_output_var_names_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_output_var_names_result()
result.success = self._handler.get_output_var_names()
oprot.writeMessageBegin("get_output_var_names", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_var_type(self, seqid, iprot, oprot):
args = get_var_type_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_var_type_result()
result.success = self._handler.get_var_type(args.long_var_name)
oprot.writeMessageBegin("get_var_type", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_var_units(self, seqid, iprot, oprot):
args = get_var_units_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_var_units_result()
result.success = self._handler.get_var_units(args.long_var_name)
oprot.writeMessageBegin("get_var_units", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_var_rank(self, seqid, iprot, oprot):
args = get_var_rank_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_var_rank_result()
result.success = self._handler.get_var_rank(args.long_var_name)
oprot.writeMessageBegin("get_var_rank", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_var_size(self, seqid, iprot, oprot):
args = get_var_size_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_var_size_result()
result.success = self._handler.get_var_size(args.long_var_name)
oprot.writeMessageBegin("get_var_size", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_var_nbytes(self, seqid, iprot, oprot):
args = get_var_nbytes_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_var_nbytes_result()
result.success = self._handler.get_var_nbytes(args.long_var_name)
oprot.writeMessageBegin("get_var_nbytes", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_start_time(self, seqid, iprot, oprot):
args = get_start_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_start_time_result()
result.success = self._handler.get_start_time()
oprot.writeMessageBegin("get_start_time", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_current_time(self, seqid, iprot, oprot):
args = get_current_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_current_time_result()
result.success = self._handler.get_current_time()
oprot.writeMessageBegin("get_current_time", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_end_time(self, seqid, iprot, oprot):
args = get_end_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_end_time_result()
result.success = self._handler.get_end_time()
oprot.writeMessageBegin("get_end_time", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_time_step(self, seqid, iprot, oprot):
args = get_time_step_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_time_step_result()
result.success = self._handler.get_time_step()
oprot.writeMessageBegin("get_time_step", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_time_units(self, seqid, iprot, oprot):
args = get_time_units_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_time_units_result()
result.success = self._handler.get_time_units()
oprot.writeMessageBegin("get_time_units", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_value(self, seqid, iprot, oprot):
args = get_value_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_value_result()
try:
result.success = self._handler.get_value(args.long_var_name)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("get_value", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_value_at_indices(self, seqid, iprot, oprot):
args = get_value_at_indices_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_value_at_indices_result()
try:
result.success = self._handler.get_value_at_indices(
args.long_var_name, args.inds
)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("get_value_at_indices", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_value(self, seqid, iprot, oprot):
args = set_value_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_value_result()
try:
self._handler.set_value(args.long_var_name, args.src)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("set_value", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_value_at_indices(self, seqid, iprot, oprot):
args = set_value_at_indices_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_value_at_indices_result()
try:
self._handler.set_value_at_indices(args.long_var_name, args.inds, args.src)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("set_value_at_indices", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_type(self, seqid, iprot, oprot):
args = get_grid_type_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_type_result()
try:
result.success = self._handler.get_grid_type(args.long_var_name)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("get_grid_type", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_shape(self, seqid, iprot, oprot):
args = get_grid_shape_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_shape_result()
result.success = self._handler.get_grid_shape(args.long_var_name)
oprot.writeMessageBegin("get_grid_shape", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_spacing(self, seqid, iprot, oprot):
args = get_grid_spacing_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_spacing_result()
result.success = self._handler.get_grid_spacing(args.long_var_name)
oprot.writeMessageBegin("get_grid_spacing", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_origin(self, seqid, iprot, oprot):
args = get_grid_origin_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_origin_result()
result.success = self._handler.get_grid_origin(args.long_var_name)
oprot.writeMessageBegin("get_grid_origin", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_x(self, seqid, iprot, oprot):
args = get_grid_x_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_x_result()
result.success = self._handler.get_grid_x(args.long_var_name)
oprot.writeMessageBegin("get_grid_x", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_y(self, seqid, iprot, oprot):
args = get_grid_y_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_y_result()
result.success = self._handler.get_grid_y(args.long_var_name)
oprot.writeMessageBegin("get_grid_y", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_z(self, seqid, iprot, oprot):
args = get_grid_z_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_z_result()
result.success = self._handler.get_grid_z(args.long_var_name)
oprot.writeMessageBegin("get_grid_z", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_connectivity(self, seqid, iprot, oprot):
args = get_grid_connectivity_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_connectivity_result()
result.success = self._handler.get_grid_connectivity(args.long_var_name)
oprot.writeMessageBegin("get_grid_connectivity", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_grid_offset(self, seqid, iprot, oprot):
args = get_grid_offset_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_grid_offset_result()
result.success = self._handler.get_grid_offset(args.long_var_name)
oprot.writeMessageBegin("get_grid_offset", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_initialize_config(self, seqid, iprot, oprot):
args = initialize_config_args()
args.read(iprot)
iprot.readMessageEnd()
result = initialize_config_result()
try:
self._handler.initialize_config(args.file)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("initialize_config", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_initialize_model(self, seqid, iprot, oprot):
args = initialize_model_args()
args.read(iprot)
iprot.readMessageEnd()
result = initialize_model_result()
try:
self._handler.initialize_model()
except ModelException as error:
result.error = error
oprot.writeMessageBegin("initialize_model", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_start_time(self, seqid, iprot, oprot):
args = set_start_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_start_time_result()
try:
self._handler.set_start_time(args.start_time)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("set_start_time", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_end_time(self, seqid, iprot, oprot):
args = set_end_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_end_time_result()
try:
self._handler.set_end_time(args.end_time)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("set_end_time", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_attribute_names(self, seqid, iprot, oprot):
args = get_attribute_names_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_attribute_names_result()
result.success = self._handler.get_attribute_names()
oprot.writeMessageBegin("get_attribute_names", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_attribute_value(self, seqid, iprot, oprot):
args = get_attribute_value_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_attribute_value_result()
try:
result.success = self._handler.get_attribute_value(args.attribute_name)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("get_attribute_value", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_attribute_value(self, seqid, iprot, oprot):
args = set_attribute_value_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_attribute_value_result()
try:
self._handler.set_attribute_value(args.attribute_name, args.attribute_value)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("set_attribute_value", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_save_state(self, seqid, iprot, oprot):
args = save_state_args()
args.read(iprot)
iprot.readMessageEnd()
result = save_state_result()
try:
self._handler.save_state(args.destination_directory)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("save_state", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_load_state(self, seqid, iprot, oprot):
args = load_state_args()
args.read(iprot)
iprot.readMessageEnd()
result = load_state_result()
try:
self._handler.load_state(args.source_directory)
except ModelException as error:
result.error = error
oprot.writeMessageBegin("load_state", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class initialize_args(object):
"""
Attributes:
- file
"""
thrift_spec = (None, (1, TType.STRING, "file", None, None)) # 0 # 1
def __init__(self, file=None):
self.file = file
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_args")
if self.file is not None:
oprot.writeFieldBegin("file", TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class initialize_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_until_args(object):
"""
Attributes:
- time
"""
thrift_spec = (None, (1, TType.DOUBLE, "time", None, None)) # 0 # 1
def __init__(self, time=None):
self.time = time
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.time = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_until_args")
if self.time is not None:
oprot.writeFieldBegin("time", TType.DOUBLE, 1)
oprot.writeDouble(self.time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_until_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_until_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_frac_args(object):
"""
Attributes:
- frac
"""
thrift_spec = (None, (1, TType.DOUBLE, "frac", None, None)) # 0 # 1
def __init__(self, frac=None):
self.frac = frac
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.frac = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_frac_args")
if self.frac is not None:
oprot.writeFieldBegin("frac", TType.DOUBLE, 1)
oprot.writeDouble(self.frac)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class update_frac_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("update_frac_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finalize_model_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("finalize_model_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finalize_model_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("finalize_model_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_component_name_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_component_name_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_component_name_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRING, "success", None, None), # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, success=None, error=None):
self.success = success
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_component_name_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_input_var_names_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_input_var_names_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_input_var_names_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.STRING, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readString()
self.success.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_input_var_names_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter6 in self.success:
oprot.writeString(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_output_var_names_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_output_var_names_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_output_var_names_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.STRING, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readString()
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_output_var_names_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter13 in self.success:
oprot.writeString(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_type_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_type_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_type_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.STRING, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_type_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_units_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_units_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_units_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.STRING, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_units_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_rank_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_rank_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_rank_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.I32, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_rank_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_size_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_size_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_size_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.I32, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_size_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_nbytes_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_nbytes_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_var_nbytes_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.I32, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_var_nbytes_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_start_time_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_start_time_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_start_time_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.DOUBLE, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.DOUBLE:
self.success = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_start_time_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.DOUBLE, 0)
oprot.writeDouble(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_current_time_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_current_time_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_current_time_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.DOUBLE, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.DOUBLE:
self.success = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_current_time_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.DOUBLE, 0)
oprot.writeDouble(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_end_time_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_end_time_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_end_time_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.DOUBLE, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.DOUBLE:
self.success = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_end_time_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.DOUBLE, 0)
oprot.writeDouble(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_time_step_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_time_step_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_time_step_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.DOUBLE, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.DOUBLE:
self.success = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_time_step_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.DOUBLE, 0)
oprot.writeDouble(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_time_units_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_time_units_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_time_units_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.STRING, "success", None, None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_time_units_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_value_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_value_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_value_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRING, "success", None, None), # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, success=None, error=None):
self.success = success
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_value_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_value_at_indices_args(object):
"""
Attributes:
- long_var_name
- inds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, "long_var_name", None, None), # 1
(2, TType.LIST, "inds", (TType.I32, None), None), # 2
)
def __init__(self, long_var_name=None, inds=None):
self.long_var_name = long_var_name
self.inds = inds
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.inds = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readI32()
self.inds.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_value_at_indices_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
if self.inds is not None:
oprot.writeFieldBegin("inds", TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.inds))
for iter20 in self.inds:
oprot.writeI32(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_value_at_indices_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRING, "success", None, None), # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, success=None, error=None):
self.success = success
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_value_at_indices_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_value_args(object):
"""
Attributes:
- long_var_name
- src
"""
thrift_spec = (
None, # 0
(1, TType.STRING, "long_var_name", None, None), # 1
(2, TType.STRING, "src", None, None), # 2
)
def __init__(self, long_var_name=None, src=None):
self.long_var_name = long_var_name
self.src = src
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.src = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_value_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
if self.src is not None:
oprot.writeFieldBegin("src", TType.STRING, 2)
oprot.writeString(self.src)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_value_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_value_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_value_at_indices_args(object):
"""
Attributes:
- long_var_name
- inds
- src
"""
thrift_spec = (
None, # 0
(1, TType.STRING, "long_var_name", None, None), # 1
(2, TType.LIST, "inds", (TType.I32, None), None), # 2
(3, TType.STRING, "src", None, None), # 3
)
def __init__(self, long_var_name=None, inds=None, src=None):
self.long_var_name = long_var_name
self.inds = inds
self.src = src
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.inds = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = iprot.readI32()
self.inds.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.src = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_value_at_indices_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
if self.inds is not None:
oprot.writeFieldBegin("inds", TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.inds))
for iter27 in self.inds:
oprot.writeI32(iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.src is not None:
oprot.writeFieldBegin("src", TType.STRING, 3)
oprot.writeString(self.src)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_value_at_indices_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_value_at_indices_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_type_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_type_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_type_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.I32, "success", None, None), # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, success=None, error=None):
self.success = success
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_type_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_shape_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_shape_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_shape_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.I32, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in range(_size28):
_elem33 = iprot.readI32()
self.success.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_shape_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.I32, len(self.success))
for iter34 in self.success:
oprot.writeI32(iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_spacing_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_spacing_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_spacing_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.DOUBLE, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in range(_size35):
_elem40 = iprot.readDouble()
self.success.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_spacing_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter41 in self.success:
oprot.writeDouble(iter41)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_origin_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_origin_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_origin_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.DOUBLE, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in range(_size42):
_elem47 = iprot.readDouble()
self.success.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_origin_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter48 in self.success:
oprot.writeDouble(iter48)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_x_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_x_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_x_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.DOUBLE, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in range(_size49):
_elem54 = iprot.readDouble()
self.success.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_x_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter55 in self.success:
oprot.writeDouble(iter55)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_y_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_y_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_y_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.DOUBLE, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype59, _size56) = iprot.readListBegin()
for _i60 in range(_size56):
_elem61 = iprot.readDouble()
self.success.append(_elem61)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_y_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter62 in self.success:
oprot.writeDouble(iter62)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_z_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_z_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_z_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.DOUBLE, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype66, _size63) = iprot.readListBegin()
for _i67 in range(_size63):
_elem68 = iprot.readDouble()
self.success.append(_elem68)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_z_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.DOUBLE, len(self.success))
for iter69 in self.success:
oprot.writeDouble(iter69)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_connectivity_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_connectivity_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_connectivity_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.I32, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype73, _size70) = iprot.readListBegin()
for _i74 in range(_size70):
_elem75 = iprot.readI32()
self.success.append(_elem75)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_connectivity_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.I32, len(self.success))
for iter76 in self.success:
oprot.writeI32(iter76)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_offset_args(object):
"""
Attributes:
- long_var_name
"""
thrift_spec = (None, (1, TType.STRING, "long_var_name", None, None)) # 0 # 1
def __init__(self, long_var_name=None):
self.long_var_name = long_var_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.long_var_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_offset_args")
if self.long_var_name is not None:
oprot.writeFieldBegin("long_var_name", TType.STRING, 1)
oprot.writeString(self.long_var_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_grid_offset_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.I32, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype80, _size77) = iprot.readListBegin()
for _i81 in range(_size77):
_elem82 = iprot.readI32()
self.success.append(_elem82)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_grid_offset_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.I32, len(self.success))
for iter83 in self.success:
oprot.writeI32(iter83)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class initialize_config_args(object):
"""
Attributes:
- file
"""
thrift_spec = (None, (1, TType.STRING, "file", None, None)) # 0 # 1
def __init__(self, file=None):
self.file = file
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_config_args")
if self.file is not None:
oprot.writeFieldBegin("file", TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class initialize_config_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_config_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class initialize_model_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_model_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class initialize_model_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("initialize_model_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_start_time_args(object):
"""
Attributes:
- start_time
"""
thrift_spec = (None, (1, TType.DOUBLE, "start_time", None, None)) # 0 # 1
def __init__(self, start_time=None):
self.start_time = start_time
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.start_time = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_start_time_args")
if self.start_time is not None:
oprot.writeFieldBegin("start_time", TType.DOUBLE, 1)
oprot.writeDouble(self.start_time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_start_time_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_start_time_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_end_time_args(object):
"""
Attributes:
- end_time
"""
thrift_spec = (None, (1, TType.DOUBLE, "end_time", None, None)) # 0 # 1
def __init__(self, end_time=None):
self.end_time = end_time
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.end_time = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_end_time_args")
if self.end_time is not None:
oprot.writeFieldBegin("end_time", TType.DOUBLE, 1)
oprot.writeDouble(self.end_time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_end_time_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_end_time_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_attribute_names_args(object):
thrift_spec = ()
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_attribute_names_args")
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_attribute_names_result(object):
"""
Attributes:
- success
"""
thrift_spec = ((0, TType.LIST, "success", (TType.STRING, None), None),) # 0
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype87, _size84) = iprot.readListBegin()
for _i88 in range(_size84):
_elem89 = iprot.readString()
self.success.append(_elem89)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_attribute_names_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter90 in self.success:
oprot.writeString(iter90)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_attribute_value_args(object):
"""
Attributes:
- attribute_name
"""
thrift_spec = (None, (1, TType.STRING, "attribute_name", None, None)) # 0 # 1
def __init__(self, attribute_name=None):
self.attribute_name = attribute_name
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.attribute_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_attribute_value_args")
if self.attribute_name is not None:
oprot.writeFieldBegin("attribute_name", TType.STRING, 1)
oprot.writeString(self.attribute_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_attribute_value_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRING, "success", None, None), # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, success=None, error=None):
self.success = success
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("get_attribute_value_result")
if self.success is not None:
oprot.writeFieldBegin("success", TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_attribute_value_args(object):
"""
Attributes:
- attribute_name
- attribute_value
"""
thrift_spec = (
None, # 0
(1, TType.STRING, "attribute_name", None, None), # 1
(2, TType.STRING, "attribute_value", None, None), # 2
)
def __init__(self, attribute_name=None, attribute_value=None):
self.attribute_name = attribute_name
self.attribute_value = attribute_value
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.attribute_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.attribute_value = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_attribute_value_args")
if self.attribute_name is not None:
oprot.writeFieldBegin("attribute_name", TType.STRING, 1)
oprot.writeString(self.attribute_name)
oprot.writeFieldEnd()
if self.attribute_value is not None:
oprot.writeFieldBegin("attribute_value", TType.STRING, 2)
oprot.writeString(self.attribute_value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_attribute_value_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("set_attribute_value_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class save_state_args(object):
"""
Attributes:
- destination_directory
"""
thrift_spec = (
None, # 0
(1, TType.STRING, "destination_directory", None, None), # 1
)
def __init__(self, destination_directory=None):
self.destination_directory = destination_directory
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.destination_directory = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("save_state_args")
if self.destination_directory is not None:
oprot.writeFieldBegin("destination_directory", TType.STRING, 1)
oprot.writeString(self.destination_directory)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class save_state_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("save_state_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class load_state_args(object):
"""
Attributes:
- source_directory
"""
thrift_spec = (None, (1, TType.STRING, "source_directory", None, None)) # 0 # 1
def __init__(self, source_directory=None):
self.source_directory = source_directory
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.source_directory = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("load_state_args")
if self.source_directory is not None:
oprot.writeFieldBegin("source_directory", TType.STRING, 1)
oprot.writeString(self.source_directory)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class load_state_result(object):
"""
Attributes:
- error
"""
thrift_spec = (
None, # 0
(
1,
TType.STRUCT,
"error",
(ModelException, ModelException.thrift_spec),
None,
), # 1
)
def __init__(self, error=None):
self.error = error
def read(self, iprot):
if (
iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and isinstance(iprot.trans, TTransport.CReadableTransport)
and self.thrift_spec is not None
and fastbinary is not None
):
fastbinary.decode_binary(
self, iprot.trans, (self.__class__, self.thrift_spec)
)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.error = ModelException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (
oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated
and self.thrift_spec is not None
and fastbinary is not None
):
oprot.trans.write(
fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))
)
return
oprot.writeStructBegin("load_state_result")
if self.error is not None:
oprot.writeFieldBegin("error", TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ["%s=%r" % (key, value) for key, value in self.__dict__.items()]
return "%s(%s)" % (self.__class__.__name__, ", ".join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 2.078125 | 2 |
visualization/site/djwebsite/djwebsite/view.py | christnp/e6895-project | 0 | 12760493 | from django.http import HttpResponse
from django.shortcuts import render
import pandas_gbq
from google.oauth2 import service_account
import os
# Make sure you have installed pandas-gbq at first;
# You can use the other way to query BigQuery.
# please have a look at
# https://cloud.google.com/bigquery/docs/reference/libraries#client-libraries-install-nodejs
# To get your credential
app_dir = os.path.dirname(__file__)
service_path = os.path.join(app_dir,'static/eecs-e6895-edu-591e4a34fcda.json')
project_id = "eecs-e6895-edu"
credentials = service_account.Credentials \
.from_service_account_file(service_path)
def dashboard(request):
import json
pandas_gbq.context.credentials = credentials
pandas_gbq.context.project = project_id
dataset = 'usheatmap' # the name of dataset in BigQuery
# table = 'final' # the name of table in BigQuery
table = 'training_data' # the name of table in BigQuery
table_id = '{0}.{1}'.format(dataset,table)
# cols = ['ai', 'data','good','movie','spark'] # column names
data = {}
data['data'] = []
# query the table, return as pandas df.
# SQL = "SELECT * FROM `{}` ORDER BY date ASC LIMIT 8".format(table_id)
SQL = "SELECT * FROM `{}` ORDER BY date ASC".format(table_id)
df = pandas_gbq.read_gbq(SQL)
df.fillna(-1,inplace=True) # fill NaN/Na with -1
# iterate each row of the dataframe
def getGeoid(row):
statefp = row['state'].split("_")[0]
countyfp = row['county'].split("_")[0]
return "{}{}".format(statefp,countyfp)
# add the geoid (FIXME: this should have been included in original processing)
df['geoid'] = df.apply (lambda row: getGeoid(row), axis=1)
# j = (df.groupby(['date','geoid'], as_index=False)
# .apply(lambda x: x[['vci','tci','vhi','tasmin','tasmax','pr']].to_dict('r'))
# .reset_index()
# .rename(columns={0:'Test'})
# .to_json(orient='records'))
# pprint.pprint(j)
# sys.exit()
tmp = {}
for index, row in df.iterrows():
dt_date = row['date'].to_pydatetime().strftime('%Y-%m-%d')
if (dt_date != "2018-01-08"):
continue;
# statefp = row['state'].split("_")[0]
# countyfp = row['county'].split("_")[0]
# geoid = "{}{}".format(statefp,countyfp)
# geoid = SSCCC, SS = State FIPS, CCC = County FIPS
# BQ has state = SS_Name, county = CCC_Name
tmp = { 'date' : dt_date, \
'geoid': row['geoid'], \
'value' : { 'vci' : row['vci'], \
'tci':row['tci'], \
'vhi':row['vhi'], \
'tasmin':row['tasmin'], \
'tasmax':row['tasmax'], \
'pr':row['pr'] \
} \
}
data['data'].append(tmp)
# return render(request, 'map.html', {'results':data,'geojson':geojson_data})
return render(request, 'map.html', data)
# Notes:
# desired output, to organize by date
# 'date1': [
# {
# 'geoid': geoid1,
# 'values': {
# 'vci': vci,
# 'tci': tci,
# ...
# }
# },
# {
# 'geoid': geoid2,
# 'values': {
# 'vci': vci,
# 'tci': tci,
# ...
# }
# }, ...
# ]
#
# # 'date2': [
# {
# 'geoid': geoid1,
# 'values': {
# 'vci': vci,
# 'tci': tci,
# ...
# }
# },
# {
# 'geoid': geoid2,
# 'values': {
# 'vci': vci,
# 'tci': tci,
# ...
# }
# }, ...
# ]
# hello world page
def hello(request):
context = {}
context['content1'] = 'Hello World!'
return render(request, 'helloworld.html', context)
# deprecated
# def dashboard(request):
# pandas_gbq.context.credentials = credentials
# pandas_gbq.context.project = project_id
# dataset = 'usheatmap' # the name of dataset in BigQuery
# table = 'final' # the name of table in BigQuery
# table_id = '{0}.{1}'.format(dataset,table)
# # cols = ['ai', 'data','good','movie','spark'] # column names
# data = {}
# data['data'] = []
# # query the table, return as pandas df.
# SQL = "SELECT * FROM `{}` ORDER BY date ASC LIMIT 8".format(table_id)
# df = pandas_gbq.read_gbq(SQL)
# # iterate each row of the dataframe
# tmp = {}
# for index, row in df.iterrows():
# dt_date = row['date'].to_pydatetime().strftime('%Y-%m-%d')
# tmp = { 'date' : dt_date, \
# 'count' : { 'vci' : row['vci'], \
# 'tci':row['tci'], \
# 'vhi':row['vhi'], \
# 'tasmin':row['tasmin'], \
# 'tasmax':row['tasmax'], \
# 'pr':row['pr'] \
# } \
# }
# data['data'].append(tmp)
# return render(request, 'dashboard.html', data)
# def connection(request):
# pandas_gbq.context.credentials = credentials
# pandas_gbq.context.project = "Your-Project"
# SQL1 = ''
# df1 = pandas_gbq.read_gbq(SQL1)
# SQL2 = ''
# df2 = pandas_gbq.read_gbq(SQL2)
# data = {}
# '''
# TODO: Finish the SQL to query the data, it should be limited to 8 rows.
# Then process them to format below:
# Format of data:
# {'n': [xxx, xxx, xxx, xxx],
# 'e': [{'source': xxx, 'target': xxx},
# {'source': xxx, 'target': xxx},
# ...
# ]
# }
# '''
# return render(request, 'connection.html', data)
| 2.6875 | 3 |
src/python/grid_test.py | YFFY/SeleniumGridExample | 0 | 12760494 | <gh_stars>0
import sys
import unittest
from selenium import webdriver
import HTMLTestRunner
class Grid2(unittest.TestCase):
capabilities = None
def setUp(self):
self.driver = webdriver.Remote(desired_capabilities={
"browserName": browser,
"platform":platform,
"node":port,
"version":version
})
def test_ibm(self):
self.driver.get("http://www.ibm.com/us-en/")
self.assertEqual(self.driver.title, "IBM - United States")
def test_python(self):
self.driver.get("https://www.python.org/")
self.assertEqual("Welcome to Python.org", self.driver.title)
def test_yahoo(self):
self.driver.get("http://clang.llvm.org/get_started.html")
self.assertEqual("Clang - Getting Started", self.driver.title)
def test_stackoverflow(self):
self.driver.get("http://stackoverflow.com/")
self.assertEqual("Stack Overflow", self.driver.title)
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
args = sys.argv
port,platform,browser,version = args[1:]
suite = unittest.makeSuite(Grid2)
report_file = '../report/{0}_{1}_{2}.html'.format(platform,browser,version)
fp = file(report_file, "wb")
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title="test result",
description="test report"
)
runner.run(suite)
| 2.8125 | 3 |
video.py | andygarcia86/python-split-concat-videos | 0 | 12760495 | <gh_stars>0
import ffmpeg #https://pypi.org/project/ffmpeg-python/
import subprocess
import random
import numpy as np, numpy.random
import datetime
ffmpeg_path = "C:/Program Files/ffmpeg/bin/ffmpeg"
def get_video_length(filename):
cmd = ["C:/Program Files/ffmpeg/bin/ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return float(result.stdout)
def cut_video(source_filename, out_filename, start_time, end_time):
subprocess.call([ffmpeg_path,
'-i',source_filename,
'-ss',start_time,
'-to',end_time,
out_filename])
def split_video_random(filename):
duration_total = get_video_length(filename)
randPartitions = random.randint(3, 5)
rand = np.random.dirichlet(np.ones(randPartitions), size=1)
start_seconds = 0
end_seconds = 0
idx = 0
for (x,y), value in numpy.ndenumerate(rand):
seconds = rand[x][y] * duration_total
idx = idx + 1
if (x != 0 | y != 0):
start_seconds = end_seconds
end_seconds = end_seconds + seconds
start = str(start_seconds - start_seconds % 0.001)
end = str(end_seconds - end_seconds % 0.001)
cut_video(filename, 'out-' + str(idx) + '.mp4', start, end)
def concatenate():
subprocess.call([ffmpeg_path,
'-f', 'concat',
'-i', 'inputs.txt'
'-vcodec', 'copy'
'-acodec', 'copy',
'mux1.mp4'])
"""
cmd="( "
h264options="-vcodec libx264 -b 512k -flags +loop+mv4 -cmp 256 \
-partitions +parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 \
-me_method hex -subq 7 -trellis 1 -refs 5 -bf 3 \
-flags2 +bpyramid+wpred+mixed_refs+dct8x8 -coder 1 -me_range 16 \
-g 250 -keyint_min 25 -sc_threshold 40 -i_qfactor 0.71 -qmin 10\
-qmax 51 -qdiff 4"
outfile="out-`date +%F-%H%M.%S`.mp4"
cmd="${cmd}${ffmpeg_path} -i out-1.mp4 -ab 256000 -vb 10000000 -mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -f mpeg -; "
cmd="${cmd} ) | ${ffmpeg_path} -y -i - -threads 8 ${h264options} -vb 10000000 -acodec libfaac -ar 44100 -ab 128k -s 1280x720 ${outfile}"
subprocess.call([ffmpeg_path,
'-i','concat:out-1.mp4|out-2.mp4|out-3.mp4',
'-c','copy',
'output.mp4'])
"""
concatenate()
#split_video_random('video.mp4')
| 2.703125 | 3 |
workshop_material/029_find_paper2.py | nrupatunga/pyimageconf2018 | 106 | 12760496 | <gh_stars>100-1000
from dlib import *
import numpy as np
import sys
sys.path = ['./superfast/build'] + sys.path
import superfast
# NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def discard_all_but_largest_blob(img):
labels, num_blobs = label_connected_blobs(img, connected_if_both_not_zero=True)
h = get_histogram(labels, num_blobs)
# ignore background blobs
h[0] = 0
largest_blob = np.argmax(h)
superfast.zero_pixels_not_labeled_with_val(img, labels, largest_blob)
return img
#img = load_grayscale_image(sys.argv[1])
# discarding all but largest blob fixes this image
img = load_grayscale_image('./images/find_page/paper22.jpg')
# What about this image? Need to do something to fix it
#img = load_grayscale_image('./images/find_page/tissue_04.jpg')
ht = hough_transform(300)
img = resize_image(img, ht.size, ht.size)
win1 = image_window(img)
ig = image_gradients(10)
x = ig.gradient_x(img)
y = ig.gradient_y(img)
edges = suppress_non_maximum_edges(x,y)
win3 = image_window(edges)
edges = discard_all_but_largest_blob(hysteresis_threshold(edges)) # NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
win4 = image_window(edges)
himg = ht(edges)
hits = ht.find_strong_hough_points(himg, hough_count_thresh=ht.size/5, angle_nms_thresh=15, radius_nms_thresh=10)
lines = [ht.get_line(p) for p in hits[0:4]]
win1.add_overlay(lines)
page = extract_image_4points(img, lines, 200,200)
win_page = image_window(page)
input("hit enter to exit")
| 2.15625 | 2 |
runway/cfngin/actions/init.py | onicagroup/runway | 134 | 12760497 | """CFNgin init action."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Optional, Union, cast
from ...compat import cached_property
from ...config.models.cfngin import CfnginStackDefinitionModel
from ...core.providers.aws.s3 import Bucket
from ..exceptions import CfnginBucketAccessDenied
from . import deploy
from .base import BaseAction
if TYPE_CHECKING:
import threading
from ..._logging import RunwayLogger
from ...context import CfnginContext
from ..providers.aws.default import ProviderBuilder
LOGGER = cast("RunwayLogger", logging.getLogger(__name__))
class Action(BaseAction):
"""Initialize environment."""
NAME = "init"
DESCRIPTION = "Initialize environment"
def __init__(
self,
context: CfnginContext,
provider_builder: Optional[ProviderBuilder] = None,
cancel: Optional[threading.Event] = None,
):
"""Instantiate class.
This class creates a copy of the context object prior to initialization
as some of it can perform destructive actions on the context object.
Args:
context: The context for the current run.
provider_builder: An object that will build a provider that will be
interacted with in order to perform the necessary actions.
cancel: Cancel handler.
"""
super().__init__(
context=context.copy(), provider_builder=provider_builder, cancel=cancel
)
@property
def _stack_action(self) -> Any:
"""Run against a step."""
return None
@cached_property
def cfngin_bucket(self) -> Optional[Bucket]:
"""CFNgin bucket.
Raises:
CfnginBucketRequired: cfngin_bucket not defined.
"""
if not self.context.bucket_name:
return None
return Bucket(
self.context,
name=self.context.bucket_name,
region=self.context.bucket_region,
)
@cached_property
def default_cfngin_bucket_stack(self) -> CfnginStackDefinitionModel:
"""CFNgin bucket stack."""
return CfnginStackDefinitionModel(
class_path="runway.cfngin.blueprints.cfngin_bucket.CfnginBucket",
in_progress_behavior="wait",
name="cfngin-bucket",
termination_protection=True,
variables={"BucketName": self.context.bucket_name},
)
def run(
self,
*,
concurrency: int = 0,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
force: bool = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
tail: bool = False,
upload_disabled: bool = True, # pylint: disable=unused-argument
**_kwargs: Any,
) -> None:
"""Run the action.
Args:
concurrency: The maximum number of concurrent deployments.
dump: Not used by this action
force: Not used by this action.
outline: Not used by this action.
tail: Tail the stack's events.
upload_disabled: Not used by this action.
Raises:
CfnginBucketAccessDenied: Could not head cfngin_bucket.
"""
if not self.cfngin_bucket:
LOGGER.info("skipped; cfngin_bucket not defined")
return
if self.cfngin_bucket.forbidden:
raise CfnginBucketAccessDenied(bucket_name=self.cfngin_bucket.name)
if self.cfngin_bucket.exists:
LOGGER.info("cfngin_bucket %s already exists", self.cfngin_bucket.name)
return
if self.context.get_stack("cfngin-bucket"):
LOGGER.verbose(
"found stack for creating cfngin_bucket: cfngin-bucket",
)
self.context.stack_names = ["cfngin-bucket"]
else:
LOGGER.notice("using default blueprint to create cfngin_bucket...")
self.context.config.stacks = [self.default_cfngin_bucket_stack]
# clear cached values that were populated by checking the previous condition
self.context._del_cached_property( # pylint: disable=protected-access
"stacks", "stacks_dict"
)
if self.provider_builder:
self.provider_builder.region = self.context.bucket_region
deploy.Action(
context=self.context,
provider_builder=self.provider_builder,
cancel=self.cancel,
).run(
concurrency=concurrency,
tail=tail,
upload_disabled=True,
)
return
def pre_run(
self,
*,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
**__kwargs: Any,
) -> None:
"""Do nothing."""
def post_run(
self,
*,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
**__kwargs: Any,
) -> None:
"""Do nothing."""
| 1.9375 | 2 |
ckanext/canada/tests/test_prop.py | thriuin/ckanext-canada | 31 | 12760498 | <reponame>thriuin/ckanext-canada
# -*- coding: UTF-8 -*-
from ckan.tests.helpers import FunctionalTestBase, call_action
from ckan.tests import factories
import ckan.lib.search as search
from ckanapi import LocalCKAN, ValidationError
import json
from nose.tools import assert_raises, assert_equal
SIMPLE_SUGGESTION = {
'type': 'prop',
'title_translated': {
'en': u'Simple Suggestion',
'fr': u'Suggestion simple'
},
'notes_translated': {
'en': u'Notes',
'fr': u'Notes',
},
'keywords': {
'en': [u'key'],
'fr': [u'clé'],
},
'reason': 'personal_interest',
'subject': ['persons'],
'date_submitted': '2021-01-01',
'date_forwarded': '2021-02-01',
'status': [],
}
COMPLETE_SUGGESTION = dict(SIMPLE_SUGGESTION,
status=[
{
'date': '2021-03-01',
'reason': 'under_review',
'comments': {
'en': 'good idea',
'fr': 'bon idée',
},
},
]
)
UPDATED_SUGGESTION = dict(SIMPLE_SUGGESTION,
status=[
{
'date': '2021-04-01',
'reason': 'released',
'comments': {
'en': 'here',
'fr': 'ici',
},
},
{
'date': '2021-03-01',
'reason': 'under_review',
'comments': {
'en': 'good idea',
'fr': 'bon idée',
},
},
]
)
class TestSuggestedDataset(FunctionalTestBase):
def test_simple_suggestion(self):
lc = LocalCKAN()
org = factories.Organization()
resp = lc.action.package_create(
owner_org=org['name'],
**SIMPLE_SUGGESTION)
assert 'status' not in resp
def test_normal_user_cant_create(self):
user = factories.User()
lc = LocalCKAN(username=user['name'])
org = factories.Organization(users=[
{
'name': user['name'],
'capacity': 'editor',
}
]
)
assert_raises(ValidationError,
lc.action.package_create,
owner_org=org['name'],
**SIMPLE_SUGGESTION)
def test_normal_user_can_update(self):
user = factories.User()
slc = LocalCKAN()
ulc = LocalCKAN(username=user['name'])
org = factories.Organization(users=[
{
'name': user['name'],
'capacity': 'editor',
}
]
)
resp = slc.action.package_create(
owner_org=org['name'],
**SIMPLE_SUGGESTION)
resp = ulc.action.package_update(
owner_org=org['name'],
id=resp['id'],
**COMPLETE_SUGGESTION)
assert resp['status'][0]['reason'] == 'under_review'
def test_responses_ordered(self):
lc = LocalCKAN()
org = factories.Organization()
resp = lc.action.package_create(
owner_org=org['name'],
**UPDATED_SUGGESTION)
# first update will be moved to end based on date field
assert resp['status'][1]['reason'] == 'released'
| 1.828125 | 2 |
baselines/common/atari_wrappers.py | georgepsh/expert | 11 | 12760499 | import numpy as np
from collections import deque
import gym
import os
import gym.spaces as spaces
import cv2
import datalib.trajectories as trajectories
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class SavedClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._flat_reward = 0
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
self._flat_reward = reward
return np.sign(reward)
class SavedPositiveClippedRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._flat_reward = 0
def reward(self, reward):
"""Bin reward to {+1, 0} by its sign."""
self._flat_reward = reward
return max(np.sign(reward), 0)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, is_monte, is_pong):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.is_monte = is_monte
self.is_pong = is_pong
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
if self.is_monte:
frame[0:23, ...] = 0
if self.is_pong:
frame[0:23, :] = [144, 72, 17]
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class PenalizeDying(gym.Wrapper):
"""
{'ale.lives': 6}
"""
def __init__(self, env, penalty):
gym.Wrapper.__init__(self, env)
self.lives = 6
self.penalty = penalty
def reset(self):
ob = self.env.reset()
self.lives = 6
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
new_lives = info['ale.lives']
if new_lives < self.lives:
self.lives = new_lives
reward -= self.penalty
# done = True
return ob, reward, done, info
class StepPenalty(gym.Wrapper):
def __init__(self, env, step_penalty):
gym.Wrapper.__init__(self, env)
self.env = env
self.step_penalty = step_penalty
def reset(self, **kwargs):
self.env.reset(**kwargs)
def step(self, action):
ob, reward, done, info = self.env.step(action)
reward = reward - self.step_penalty
return ob, reward, done, info
class LimitLength(gym.Wrapper):
def __init__(self, env, k, timeout_penalty):
gym.Wrapper.__init__(self, env)
self.k = k
self.timeout_penalty = timeout_penalty
def reset(self):
# This assumes that reset() will really reset the env.
# If the underlying env tries to be smart about reset
# (e.g. end-of-life), the assumption doesn't hold.
ob = self.env.reset()
self.cnt = 0
return ob
def step(self, action):
ob, r, done, info = self.env.step(action)
self.cnt += 1
if self.cnt == self.k:
done = True
r -= self.timeout_penalty
return ob, r, done, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class RandomStartingWrapper(gym.Wrapper):
def __init__(self, env, config):
super(RandomStartingWrapper, self).__init__(env)
self.config = config
self.df = trajectories.load_trajectories_by_score(
trajectory_dir=config['traj_dir'],
max_score_cutoff=config['max_score_cutoff'],
min_score_cutoff=config['min_score_cutoff'],
project_level_gamma=config['gamma'],
clip_rewards=config['clip_rewards'],
frameskip=config['frameskip'],
process_lost_lifes=config['process_lost_lifes'],
)
self.random_state = None
def seed(self, seed=None):
self.env.seed(seed)
if seed is None:
raise ValueError("Seed cannot be None in case of random starting env wrapper")
self.random_state = np.random.RandomState(seed)
def reset(self, **kwargs):
super(RandomStartingWrapper, self).reset(**kwargs)
wrapped_env = self.env.env
if self.random_state is None:
raise ValueError("Uninitialized random state")
idx = self.random_state.randint(1, len(self.df))
# We have to kick out the first frame, because we don't have observation before it
while self.df.iloc[idx].frame == 0:
idx = self.random_state.randint(1, len(self.df))
# print("Will restore state no = {}".format(idx))
traj = self.df.iloc[idx].trajectory
state_idx = self.df.iloc[idx].frame
state_fname = os.path.join(self.config['stat_dir'], "{}/{:07d}.npy".format(traj, state_idx))
state = np.load(state_fname)
img_fname = os.path.join(self.config['img_dir'], "{}/{:07d}.png".format(traj, state_idx - 1))
img = cv2.imread(img_fname, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
wrapped_env.restore_full_state(state)
# wrapped_env._get_obs() returns observation before state change, so we have to fix it ourselves
# https://github.com/openai/gym/issues/715
return img
class DoomMyWayHomeActionWrapper(gym.ActionWrapper):
"""
Doom my way home env (see doom.env.doom_my_way_home). has action space:
actions = [0] * 43
actions[13] = 0 # MOVE_FORWARD
actions[14] = 1 # TURN_RIGHT
actions[15] = 0 # TURN_LEFT
We need to change that to match the scheme I have implemnted while gathering data
(and to much the stoachastic policy reinforecement learning formulation).
We want to map e.g.:
7 -> [1, 1, 1]
5 -> [1, 0, 1]
(but ofc the relevant array starts from place 13)
"""
def __init__(self, env):
super(DoomMyWayHomeActionWrapper, self).__init__(env)
self.action_space = gym.spaces.Discrete(8)
def action(self, action):
a = action
move_fwd = a % 2
a = a // 2
turn_right = a % 2
a = a // 2
turn_left = a % 2
a = a // 2
assert a == 0
out = [0] * 43
out[0] = move_fwd
out[1] = turn_right
out[2] = turn_left
return out
def make_state_restoring_atari(env_id, config):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = RandomStartingWrapper(env, config)
env = MaxAndSkipEnv(env, skip=4)
return env
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False, is_monte=False, is_pong=False, save_original_reward=False, only_positive_rewards=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, is_monte, is_pong)
if scale:
env = ScaledFloatFrame(env)
if only_positive_rewards:
env = SavedPositiveClippedRewardEnv(env)
elif clip_rewards:
if save_original_reward:
env = SavedClipRewardEnv(env)
else:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def wrap_doom_deepmind_like(env, clip_rewards=True, frame_stack=False, scale=False, save_original_reward=False):
env = WarpFrame(env, is_monte=False, is_pong=False)
env = DoomMyWayHomeActionWrapper(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
if save_original_reward:
env = SavedClipRewardEnv(env)
else:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
| 2.703125 | 3 |
worms_v4/gnparse_python.py | diatomsRcool/dynamic_working_hierarchy | 0 | 12760500 | <filename>worms_v4/gnparse_python.py
import json
"""
#This code block prepares a list of names for gnparser from a dwc taxon file.
in_file = open('taxon.tab', 'r')
out_file = open('taxon_1.tab', 'w')
for line in in_file:
row = line.split('\t')
name = row[5]
out_file.write(name + '\n')
"""
#Use this line in bash to actually use parser: gnparse file --input taxon_1.tab --output taxon_2.tab
#This line can be run regardless of what directory you are in.
#This code block takes the json output from gnparser and reinserts it back into the dwca
out_file = open('taxon_3.tab','w')
with open("taxon.tab") as textfile1, open("taxon_2.tab") as textfile2:
for x, y in zip(textfile1, textfile2):
row = x.split('\t')
name = row[5]
print(name)
p_names = json.loads(y)
v = p_names['parsed']
if v == False:
out_file.write('\t'.join(row))
else:
p_name = p_names['canonical_name']['value']
row[5] = p_name
out_file.write('\t'.join(row))
| 3 | 3 |
mapping_career_causeways/scripts/upskilling_aws_scripts/skill_perturbation_All_to_essential_C.py | bb20417/mapping-career-causeways | 24 | 12760501 | <reponame>bb20417/mapping-career-causeways
import pandas as pd
import numpy as np
from time import time
from itertools import combinations_with_replacement
import pickle
import mapping_career_causeways.compare_nodes_utils as compare_nodes_utils
import os
import boto3
from ast import literal_eval
import sys
## SETUP
# Get the skill integer to check
if len(sys.argv) < 2:
print('Core skill integer missing!')
raise
else:
j_skill = int(sys.argv[1])
# Print out free RAM stats
debug_mem = False
def check_mem():
"""
If running into memory problems, can add a monitoring step here (e.g. using psutils)
"""
pass
if debug_mem: check_mem()
# Set up AWS params
# todo: move the location of the access keys to a config file!
df_keys = pd.read_csv('../../private/karlisKanders_accessKeys.csv')
os.environ["AWS_ACCESS_KEY_ID"] = df_keys['Access key ID'].iloc[0]
os.environ["AWS_SECRET_ACCESS_KEY"] = df_keys['Secret access key'].iloc[0]
bucket_name = 'ojd-temp-storage'
s3_output_folder = 'outputs_All_to_Essential/'
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket(name=bucket_name)
# Set up folder for temporary data
data_folder = '../../data/temp_files/'
if os.path.exists(data_folder) == False:
os.mkdir(data_folder)
# Load embeddings and lists of skills items of each occupation
files_to_download = [
'embeddings_skills_description_SBERT.npy',
'topOccupation_to_all_skills.pickle',
'topOccupation_to_essential_skills.pickle',
'sorted_core_skills_id.pickle']
for file in files_to_download:
if os.path.exists(data_folder + file) == False:
s3_resource.Object(bucket_name=bucket_name, key=file).download_file(data_folder + file)
embeddings = np.load(data_folder + files_to_download[0])
node_to_essential_items_Top = pickle.load(open(data_folder + files_to_download[2], 'rb'))
node_to_all_items_Top = pickle.load(open(data_folder + files_to_download[1], 'rb'))
sorted_core_skills = pickle.load(open(data_folder + files_to_download[3], 'rb'))
n_occupations = len(node_to_essential_items_Top)
## ANALYSIS
## Select skill to add to origin occupation's skill-set
skill_id = sorted_core_skills[j_skill]
## Set up "origin" sector and "destination" sectors
# Origin nodes: here, ESSENTIAL + OPTIONAL items
from_node_to_items = node_to_all_items_Top.copy()
from_node_to_items.sector = 'origin'
# Add the extra skill to job_i skillset
t = time()
skill_added_to = []
new_items_list = []
for job_i, row in from_node_to_items.iterrows():
# Original skillset of the origin occupation
origin_skillset = row.items_list.copy()
# Check if skills is not already in the skillset
if skill_id not in origin_skillset:
list_of_skills = sorted([skill_id] + origin_skillset)
new_items_list.append(str(list_of_skills))
skill_added_to.append(row.original_id)
else:
new_items_list.append(str(origin_skillset))
# Re-evaluate all items lists so that they are treated as lists
from_node_to_items.items_list = new_items_list
from_node_to_items.items_list = from_node_to_items.items_list.apply(lambda x: literal_eval(x))
t_elapsed = time()-t
print(f"Added skill #{skill_id} to {len(skill_added_to)} occupations in {t_elapsed:.2f} seconds")
# Destination nodes: only ESSENTIAL items
to_node_to_items = node_to_essential_items_Top.copy()
to_node_to_items.sector = 'destination'
to_node_to_items.id = to_node_to_items.id + n_occupations
# Combine all into one dataframe
node_to_items = pd.concat([from_node_to_items, to_node_to_items]).reset_index(drop=True)
# Set up the combination of sectors to check
combos = [('origin','destination')]
# Perform the comparison!
comp_all_to_essential = compare_nodes_utils.CompareSectors(
node_to_items,
embeddings,
combos,
metric='cosine',
symmetric=False)
t = time()
print('Running comparisons...')
if debug_mem: check_mem()
comp_all_to_essential.run_comparisons(dump=False)
print('Collecting comparisons...')
del embeddings
if debug_mem: check_mem()
comp_all_to_essential.collect_comparisons()
t_elapsed = time()-t
print(f"Total time elapsed: {t_elapsed:.0f} seconds")
if debug_mem: check_mem()
print('----')
# Select only the edges from origin to destination occupations
print('Organising the similarity matrix...')
W_all_to_essential = comp_all_to_essential.D
i_edges = [edge[0] for edge in comp_all_to_essential.real_edge_list]
from_edges = np.array(comp_all_to_essential.real_edge_list)[np.where(np.array(i_edges)<n_occupations)[0]]
W_perturbed = np.zeros((n_occupations,n_occupations))
for edge in from_edges:
W_perturbed[edge[0], edge[1]-n_occupations] = W_all_to_essential[edge[0],edge[1]]
# Take care of nulls
W_perturbed[np.isinf(W_perturbed)] = 0
# Save
output_file_name = f"W_perturbed_All_to_essential_{j_skill}_Skill_{skill_id}.npy"
np.save(data_folder + output_file_name, W_perturbed)
# Upload to S3
s3_resource.Object(bucket_name, s3_output_folder + output_file_name).upload_file(Filename=data_folder + output_file_name)
print(f'{j_skill}/{len(sorted_core_skills)} done!')
if debug_mem: check_mem()
print('==========')
| 2.515625 | 3 |
.venv/Lib/site-packages/dexpy/simplex_lattice.py | AI-Assistant/FEMAG-Python | 0 | 12760502 | <reponame>AI-Assistant/FEMAG-Python
"""Functions for building a simplex lattice design."""
import dexpy.design as design
import pandas as pd
import numpy as np
import itertools
from dexpy.model import ModelOrder
from dexpy.eval import count_n_choose_k as count_nk
def build_simplex_lattice(factor_count, model_order = ModelOrder.quadratic):
"""Builds a Simplex Lattice mixture design.
This design can be used for 2 to 30 components. A simplex-lattice mixture
design of degree m consists of m+1 points of equally spaced values between
0 and 1 for each component. If m = 2 then possible fractions are 0, 1/2, 1.
For m = 3 the possible values are 0, 1/3, 2/3, 1. The points include the
pure components and enough points between them to estimate an equation of
degree m. This design differs from a simplex-centroid design by having
enough points to estimate a full cubic model.
:param factor_count: The number of mixture components to build for.
:type factor_count: int
:param model_order: The order to build for. ModelOrder.linear will choose
vertices only (pure blends). ModelOrder.quadratice will
add binary blends, and ModelOrder.cubic will add blends
of three components.
:type model_order: dexpy.model.ModelOrder
"""
run_count = factor_count # pure blends
if model_order == ModelOrder.quadratic:
run_count += count_nk(factor_count, 2) # 1/2 1/2 blends
elif model_order == ModelOrder.cubic:
# 2/3 1/3 blends (and vice versa)
run_count += count_nk(factor_count, 2) * 2
if factor_count > 2:
run_count += count_nk(factor_count, 3) # 1/3 1/3 1/3 blends
factor_names = design.get_factor_names(factor_count)
factor_data = pd.DataFrame(0, columns=factor_names,
index=np.arange(0, run_count))
row = 0
# always do pure blends
for combo in itertools.combinations(factor_names, 1):
factor_data.loc[row, combo] = 1.0
row += 1
if model_order == ModelOrder.quadratic:
# 1/2 1/2 binary blends
for combo in itertools.combinations(factor_names, 2):
factor_data.loc[row, combo] = 0.5
row += 1
elif model_order == ModelOrder.cubic:
# 2/3 1/3 blends
for combo in itertools.combinations(factor_names, 2):
factor_data.loc[row, combo] = [2/3, 1/3]
row += 1
factor_data.loc[row, combo] = [1/3, 2/3]
row += 1
# 1/3 1/3 1/3 triple blend
if factor_count > 2:
for combo in itertools.combinations(factor_names, 3):
factor_data.loc[row, combo] = 1/3
row += 1
return factor_data
| 3.1875 | 3 |
kong-python-pdk/kong_pdk/pdk/kong/service/__init__.py | srAtKong/kong-custom-plugin-py-tcp | 18 | 12760503 | <filename>kong-python-pdk/kong_pdk/pdk/kong/service/__init__.py
# AUTO GENERATED BASED ON Kong 2.4.x, DO NOT EDIT
# Original source path: kong/pdk/service.lua
from typing import TypeVar, Any, Union, List, Mapping, Tuple, Optional
number = TypeVar('number', int, float)
table = TypeVar('table', List[Any], Mapping[str, Any])
# XXX
cdata = Any
err = str
from .request import request as cls_request
from .response import response as cls_response
class service():
request = cls_request
response = cls_response
@staticmethod
def set_target(host: str, port: number) -> None:
"""
kong.service.set_target("service.local", 443)
kong.service.set_target("192.168.130.1", 80)
:param host:
:param port:
"""
pass
@staticmethod
def set_tls_verify(on: bool) -> Tuple[bool, str]:
"""
local ok, err = kong.service.set_tls_verify(true)
if not ok then
-- do something with error
end
:param on: Whether to enable TLS certificate verification for the current request
:returns `true` if the operation succeeded, `nil` if an error occurred
returns An error message describing the error if there was one
"""
pass
@staticmethod
def set_tls_verify_depth(depth: number) -> Tuple[bool, str]:
"""
local ok, err = kong.service.set_tls_verify_depth(3)
if not ok then
-- do something with error
end
:param depth: Depth to use when validating. Must be non-negative
:returns `true` if the operation succeeded, `nil` if an error occurred
returns An error message describing the error if there was one
"""
pass
@staticmethod
def set_upstream(host: str) -> Tuple[bool, str]:
"""
local ok, err = kong.service.set_upstream("service.prod")
if not ok then
kong.log.err(err)
return
end
:param host:
:returns `true` on success, or `nil` if no upstream entities
where found
returns An error message describing the error if there was
one.
"""
pass
pass | 2.40625 | 2 |
gen_templates.py | bentotten/gamma-spectra_denoising | 0 | 12760504 | import os
import sys
import time
import json
import h5py
import argparse
import numpy as np
from tqdm import tqdm
from spectra_utils import load_radionuclide_nndc, generate_spectrum, plot_spectrum
def load_nndc_tables(nndc_dir, radionuclides):
nndc_tables = {}
for rn in radionuclides:
keV, intensity = load_radionuclide_nndc(nndc_dir, rn)
nndc_tables[rn] = {"keV": keV, "intensity": intensity}
return nndc_tables
def generate_templates(config, nndc_tables, outdir, savefigs):
templates = {}
for rn_name, rn_values in tqdm(nndc_tables.items()):
#print(f"building template for {rn_name}")
keV, intensity, _, _ = generate_spectrum(rn_values, config)
templates[rn_name] = {"keV": keV, "intensity": intensity}
if savefigs:
plot_spectrum(keV, intensity, rn_name, outdir)
return templates
def save_templates(dettype, templates, outfile):
with h5py.File(outfile, 'a') as h5f:
try:
h5f.create_group(dettype)
except: # does not create detector group if it already exists
pass
for k, v in templates.items():
try:
h5f[dettype].create_group(k)
except: # does not create radionuclide group if it already exists
pass
for k2, v2 in v.items():
try:
h5f[dettype][k].create_dataset(k2, data=v2)
except: # overwrites existing data if data already exists
data = h5f[dettype][k][k2]
data[...]= v2
def main():
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("-rl", "--rnlistfile", help="file containing list of radionuclides to use", default="ANSI_N42.34.json")
parser.add_argument("-cf", "--configfile", help="configuration file for generating data", default="config_data.json")
parser.add_argument("-out", "--outfile", help="output file for data", default="data/templates.h5")
parser.add_argument("-det", "--dettype", help="detector type", default="HPGe,NaI,CZT")
#parser.add_argument("-det", "--dettype", help="detector type", default="HPGe")
parser.add_argument("-nndc", "--nndctables", help="location of NNDC tables data", default="nuclides-nndc")
parser.add_argument("-sf", "--savefigs", help="saves plots of templates", action="store_true")
#parser.add_argument("-n", "--normalize", help="normalize templates by RMS", action="store_true")
arg = parser.parse_args()
outdir = os.path.dirname(arg.outfile)
outfile = arg.outfile
# load configuration parameters
with open(arg.configfile, 'r') as cfile:
config = json.load(cfile)
# make output dir if it does not exist
os.makedirs(outdir, exist_ok=True)
# load NNDC tables for radionuclides
nndc_tables = load_nndc_tables(arg.nndctables, config["RADIONUCLIDES"])
for dettype in arg.dettype.split(','):
dettype = dettype.upper()
print(f'Generating templates for detector {dettype}')
if arg.savefigs:
os.makedirs(os.path.join(outdir, dettype), exist_ok=True)
templates = generate_templates(config["DETECTORS"][dettype], nndc_tables, os.path.join(outdir, dettype), arg.savefigs)
save_templates(dettype, templates, outfile)
print(f'Script completed in {time.time()-start:.2f} secs')
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.453125 | 2 |
tools/util.py | hasahmed/shape_game_cpp | 0 | 12760505 | <reponame>hasahmed/shape_game_cpp<filename>tools/util.py
import os
from os import listdir
from os.path import isdir
DIST_PATH = 'dist-libs'
INCLUDE = 'include'
DEPS = 'deps'
def has_include():
dirs = listdir('.')
for dir in dirs:
if isdir(dir):
if dir == INCLUDE:
return True
return False
def cd_root():
if not has_include():
os.chdir('..')
if not has_include():
print('project root directory was expected in parent directory at highest. Run from tools/ or project root')
exit()
| 2.359375 | 2 |
openstack_dashboard/dashboards/project/stacks/sro.py | ameoba/horizon | 2 | 12760506 | from django.template.defaultfilters import title # noqa
from django.template.loader import render_to_string # noqa
from horizon.utils import filters
def stack_info(stack, stack_image):
stack.stack_status_desc = title(
filters.replace_underscores(stack.stack_status))
if stack.stack_status_reason:
stack.stack_status_reason = title(
filters.replace_underscores(stack.stack_status_reason)
)
context = {}
context['stack'] = stack
context['stack_image'] = stack_image
return render_to_string('project/stacks/_stack_info.html',
context)
def resource_info(resource):
resource.resource_status_desc = title(
filters.replace_underscores(resource.resource_status)
)
if resource.resource_status_reason:
resource.resource_status_reason = title(
filters.replace_underscores(resource.resource_status_reason)
)
context = {}
context['resource'] = resource
return render_to_string('project/stacks/_resource_info.html',
context)
| 2.015625 | 2 |
tools/process_malloc_trace.py | kevinxucs/pyston | 1 | 12760507 | <filename>tools/process_malloc_trace.py
lines = [l for l in open("out.log").readlines() if l.startswith("malloc ") or l.startswith("free ")]
freed = set()
err = set()
for l in lines:
if l.startswith("malloc"):
p = l[7:]
if p in freed:
freed.remove(p)
else:
assert l.startswith("free")
p = l[5:]
if p.startswith("(nil)"):
continue
if p in freed:
if p not in err:
err.add(p)
print p.strip()
freed.add(p)
| 2.421875 | 2 |
sendsecure/json_client.py | xmedius/sendsecure-python | 0 | 12760508 | import os
import io
from .utils import *
from .exceptions import *
class JsonClient:
"""
JsonClient object constructor.
@param api_token:
The API Token to be used for authentication with the SendSecure service
@param user_id:
The user id of the current user
@param enterprise_account:
The SendSecure enterprise account
@param endpoint:
The URL to the SendSecure service ("https://portal.xmedius.com" will be used by default if empty)
@param locale:
The locale in which the server errors will be returned ("en" will be used by default if empty)
"""
def __init__(self, options):
self.locale = options.get('locale', 'en')
self.enterprise_account = options.get('enterprise_account')
self.endpoint = options.get('endpoint', 'https://portal.xmedius.com')
self.sendsecure_endpoint = None
self.token = str(options.get('token'))
self.user_id = options.get('user_id')
"""
Pre-creates a SafeBox on the SendSecure system and initializes the Safebox object accordingly.
@param user_email:
The email address of a SendSecure user of the current enterprise account
@return: The json containing the guid, public encryption key and upload url of the initialize SafeBox
"""
def new_safebox(self, user_email):
params = {'user_email': user_email}
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes/new.json'], params)
return self._do_get(url, 'application/json')
"""
Pre-creates a document on the SendSecure system and initializes the Safebox object accordingly.
@param safebox_guid:
The guid of the existing safebox
@param file_params:
The full json expected by the server
@return: The json containing the temporary document GUID and the upload URL
"""
def new_file(self, safebox_guid, file_params):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes/', safebox_guid, 'uploads.json'])
return self._do_post(url, 'application/json', file_params, 'application/json')
"""
Uploads the specified file as an Attachment of the specified SafeBox.
@param upload_url:
The url returned by the initializeSafeBox. Can be used multiple time
@param source:
The path of the file to upload or the stream
@param content_type:
The MIME content type of the uploaded file
@param filename:
The file name
@param filesize:
The filesize
@return: The json containing the guid of the uploaded file
"""
def upload_file(self, upload_url, source, content_type='application/octet-stream', filename=None, filesize=None):
status_code = None
status_line = None
response_body = None
if type(source) == str:
(status_code, status_line, response_body) = http_upload_filepath(str(upload_url), source, content_type, filename)
elif self._is_file(source):
upload_filename = filename or source.name.split('/')[-1]
upload_filesize = filesize or (os.path.getsize(source.name) - source.tell())
(status_code, status_line, response_body) = http_upload_raw_stream(str(upload_url), source, content_type, upload_filename, upload_filesize)
else:
(status_code, status_line, response_body) = http_upload_raw_stream(str(upload_url), source, content_type, filename, filesize)
if status_code >= 400:
raise SendSecureException(status_code, status_line, response_body)
return response_body
"""
Finalizes the creation (commit) of the SafeBox on the SendSecure system. This actually "Sends" the SafeBox with
all content and contact info previously specified.
@param safebox_json:
The full json expected by the server
@return: The json containing the guid, preview url and encryption key of the created SafeBox
"""
def commit_safebox(self, safebox_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes.json'])
return self._do_post(url, 'application/json', safebox_json, 'application/json')
"""
Retrieves all available security profiles of the enterprise account for a specific user.
@param user_email:
The email address of a SendSecure user of the current enterprise account
@return: The json containing a list of Security Profiles
"""
def get_security_profiles(self, user_email):
params = {'user_email': user_email}
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'security_profiles.json'], params)
return self._do_get(url, 'application/json')
"""
Get the Enterprise Settings of the current enterprise account.
@return: The json containing the enterprise settings
"""
def get_enterprise_settings(self):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'settings.json'])
return self._do_get(url, 'application/json')
"""
Get the User Settings of the current user account
@return: The json containing the user settings
"""
def get_user_settings(self):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'users', self.user_id, 'settings.json'])
return self._do_get(url, 'application/json')
"""
Retrieves all favorites for the current user account.
@return: The json containing a list of Favorite
"""
def get_favorites(self):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'users', self.user_id, 'favorites.json'])
return self._do_get(url, 'application/json')
"""
Create a new favorite for the current user account.
@param favorite_json:
The full json expected by the server
@return: The json containing all the informations of the created Favorite
"""
def create_favorite(self, favorite_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'users', self.user_id, 'favorites.json'])
return self._do_post(url, 'application/json', favorite_json, 'application/json')
"""
Update an existing favorite for the current user account.
@param favorite_id
The id of the favorite to be updated
@param favorite_params
The full json expected by the server
@return: The json containing all the informations of the updated Favorite
"""
def update_favorite(self, favorite_id, favorite_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'users', self.user_id, 'favorites', str(favorite_id) + '.json'])
return self._do_patch(url, 'application/json', favorite_json, 'application/json')
"""
Delete an existing favorite for the current user account.
@param favorite_id:
The id of the Favorite to be deleted
@return: Nothing
"""
def delete_favorite(self, favorite_id):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, 'users', self.user_id, 'favorites', str(favorite_id) + '.json'])
return self._do_delete(url, 'application/json')
"""
Create a new participant for a specific open safebox of the current user account.
@param safebox_guid:
The guid of the safebox to be updated
@param participant_json:
The full json expected by the server
@return: The json containing all the informations of the created Participant
"""
def create_participant(self, safebox_guid, participant_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'participants.json'])
return self._do_post(url, 'application/json', participant_json, 'application/json')
"""
Update an existing participant for a specific open safebox of the current user account.
@param safebox_guid:
The guid of the safebox to be updated
@param participant_id:
The id of the participant to be updated
@param participant_json
The full json expected by the server
@return: The json containing all the informations of the updated Participant
"""
def update_participant(self, safebox_guid, participant_id, participant_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'participants', participant_id + '.json'])
return self._do_patch(url, 'application/json', participant_json, 'application/json')
"""
Search the recipients for a safebox
@param term:
A Search term
@return: The json containing the search result
"""
def search_recipient(self, term):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/recipients/autocomplete?term=' + term])
return self._do_get(url, 'application/json')
"""
Reply to a specific safebox associated to the current user's account.
@param safebox_guid:
The guid of the safebox to be updated
@param reply_params:
The full json expected by the server
@return: The json containing the request result
"""
def reply(self, safebox_guid, reply_params):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, '/messages.json'])
return self._do_post(url, 'application/json', reply_params, 'application/json')
"""
Manually add time to expiration date for a specific open safebox of the current user account.
@param safebox_guid:
The guid of the safebox to be updated
@param add_time_json:
The full json expected by the server
@return: The json containing the new expiration date
"""
def add_time(self, safebox_guid, add_time_json):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'add_time.json'])
return self._do_patch(url, 'application/json', add_time_json, 'application/json')
"""
Manually close an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox to be closed
@return: The json containing the request result
"""
def close_safebox(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'close.json'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Manually delete the content of a closed safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the request result
"""
def delete_safebox_content(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'delete_content.json'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Manually mark as read an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the request result
"""
def mark_as_read(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'mark_as_read.json'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Manually mark as unread an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the request result
"""
def mark_as_unread(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'mark_as_unread.json'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Manually mark as read an existing message.
@param safebox_guid:
The guid of the safebox
@param message_id:
The id of the message to be marked as read
@return: The json containing the request result
"""
def mark_as_read_message(self, safebox_guid, message_id):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'messages', str(message_id), 'read'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Manually mark as unread an existing message.
@param safebox_guid:
The guid of the safebox
@param message_id:
The id of the message to be marked as unread
@return: The json containing the request result
"""
def mark_as_unread_message(self, safebox_guid, message_id):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'messages', str(message_id), 'unread'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Retrieve a specific file url of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@param document_guid:
The guid of the file
@param user_email:
The current user email
@return: The json containing the file url on the fileserver
"""
def get_file_url(self, safebox_guid, document_guid, user_email):
params = {'user_email': user_email}
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'documents', document_guid, 'url.json'], params)
return self._do_get(url, 'application/json')
"""
Retrieve the url of the audit record of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the url
"""
def get_audit_record_url(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'audit_record_pdf.json'])
return self._do_get(url, 'application/json')
"""
Retrieve the audit record of an existing safebox for the current user account.
@param url:
The url of the safebox audit record
@return: The pdf stream
"""
def get_audit_record_pdf(self, url):
return self._get(url, 'application/pdf')
"""
Retrieve a filtered list of safeboxes for the current user account.
@param url:
The complete search url
@param search_params:
The optional filtering parameters
@return: The json containing the count, previous page url, the next page url and a list of Safebox
"""
def get_safeboxes(self, url, search_params):
if url is None:
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes.json'], search_params)
return self._do_get(url, 'application/json')
"""
Retrieve all info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox to be updated
@param sections:
The string containing the list of sections to be retrieved
@return: The json containing all the informations on the specified sections.
If no sections are specified, it will return all safebox infos.
"""
def get_safebox_info(self, safebox_guid, sections):
params = ''
if sections:
params = {'sections': sections}
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid + '.json'], params)
return self._do_get(url, 'application/json')
"""
Retrieve all participants info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the list of participants
"""
def get_safebox_participants(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'participants.json'])
return self._do_get(url, 'application/json')
"""
Retrieve all messages info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the list of messages
"""
def get_safebox_messages(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'messages.json'])
return self._do_get(url, 'application/json')
"""
Retrieve all security options info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the Security Options
"""
def get_safebox_security_options(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'security_options.json'])
return self._do_get(url, 'application/json')
"""
Retrieve all download activity info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing the Download Activity
"""
def get_safebox_download_activity(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'download_activity.json'])
return self._do_get(url, 'application/json')
"""
Retrieve all event_history info of an existing safebox for the current user account.
@param safebox_guid:
The guid of the safebox
@return: The json containing a list of EventHistory
"""
def get_safebox_event_history(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, 'event_history.json'])
return self._do_get(url, 'application/json')
"""
Archive a specific safebox
@param safebox_guid:
The guid of the safebox
@param user_email:
The current user email
@return: The json containing the request result
"""
def archive_safebox(self, safebox_guid, user_email):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, '/tag/archive'])
return self._do_post(url, 'application/json', user_email, 'application/json')
"""
Remove the tag "archive" from the safebox
@param safebox_guid:
The guid of the safebox
@param user_email:
The current user email
@return: The json containing the request result
"""
def unarchive_safebox(self, safebox_guid, user_email):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, '/untag/archive'])
return self._do_post(url, 'application/json', user_email, 'application/json')
"""
Call to unfollow the SafeBox. By default, all new Safeboxes are "followed"
@param safebox:
A Safebox object
@return: An object containing the request result
"""
def unfollow(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, '/unfollow'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Call to follow the SafeBox (opposite of the unfollow call).
@param safebox:
A Safebox object
@return: An object containing the request result
"""
def follow(self, safebox_guid):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/safeboxes', safebox_guid, '/follow'])
return self._do_patch(url, 'application/json', '', 'application/json')
"""
Call to get the list of all the localized messages of a consent group.
@param consent_group_id:
The id of the consent group
@return: The json containing the list of all the localized messages
"""
def get_consent_group_messages(self, consent_group_id):
url = urljoin([self._get_sendsecure_endpoint(), 'api/v2/enterprises', self.enterprise_account, '/consent_message_groups', str(consent_group_id)])
return self._do_get(url, 'application/json')
def _get_sendsecure_endpoint(self):
if not self.sendsecure_endpoint:
url = urljoin([self.endpoint, 'services', self.enterprise_account, 'sendsecure/server/url'])
new_endpoint = self._get(url, 'text/plain')
self.sendsecure_endpoint = new_endpoint
return self.sendsecure_endpoint
def _do_get(self, url, accept):
params = {'locale': self.locale}
new_url = urljoin([url], params)
return self._get(new_url, accept)
def _get(self, url, accept):
(status_code, status_line, response_body) = http_get(url, accept, self.token)
if status_code >= 400:
raise SendSecureException(status_code, status_line, response_body)
return response_body
def _do_post(self, url, content_type, body, accept):
params = {'locale': self.locale}
(status_code, status_line, response_body) = http_post(urljoin([url], params), content_type, body, accept, self.token)
if status_code >= 400:
raise SendSecureException(status_code, status_line, response_body)
return response_body
def _do_patch(self, url, content_type, body, accept):
params = {'locale': self.locale}
(status_code, status_line, response_body) = http_patch(urljoin([url], params), content_type, body, accept, self.token)
if status_code >= 400:
raise SendSecureException(status_code, status_line, response_body)
return response_body
def _do_delete(self, url, accept):
params = {'locale': self.locale}
(status_code, status_line, response_body) = http_delete(urljoin([url], params), accept, self.token)
if status_code >= 400:
raise SendSecureException(status_code, status_line, response_body)
return response_body
def _is_file(self, obj):
return isinstance(obj, (io.TextIOBase, io.BufferedIOBase, io.RawIOBase, io.IOBase)) | 3.203125 | 3 |
mainpage/urls.py | eunjungleecub/jessica42 | 1 | 12760509 | <reponame>eunjungleecub/jessica42
from django.urls import path
from .views import MainView, AboutView
urlpatterns = [
path('', MainView.as_view()),
path('about/', AboutView.as_view()),
] | 1.914063 | 2 |
eclients/http_client.py | tinybees/eclients | 1 | 12760510 | #!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 18-7-1 上午10:08
"""
import atexit
from typing import Dict
import requests as sync_requests
from requests.exceptions import ConnectTimeout, ConnectionError, HTTPError, RequestException, Timeout
from eclients.err_msg import http_msg
from eclients.utils import verify_message
from .decorators import Singleton
from .exceptions import ClientConnectionError, ClientError, ClientResponseError
__all__ = ("HttpClient", "Response")
class Response(object):
"""
响应对象,需要重新封装对象
"""
__slots__ = ["status_code", "reason", "headers", "cookies", "resp_body", "content"]
def __init__(self, status_code: int, reason: str, headers: Dict, cookies: Dict, *, resp_body: Dict,
content: bytes):
"""
Args:
"""
self.status_code = status_code
self.reason = reason
self.headers = headers
self.cookies = cookies
self.resp_body = resp_body
self.content = content
def json(self, ):
"""
为了适配
Args:
Returns:
"""
return self.resp_body
class HttpClient(Singleton):
"""
基于requests的同步封装
"""
def __init__(self, app=None, *, timeout: int = 5 * 60, verify_ssl: bool = True, message: Dict = None,
use_zh: bool = True):
"""
基于requests的同步封装
Args:
app: app应用
timeout:request timeout
verify_ssl:verify ssl
message: 提示消息
use_zh: 消息提示是否使用中文,默认中文
"""
self.session = None
self.timeout = timeout
self.verify_ssl = verify_ssl
self.message = message or {}
self.use_zh = use_zh
self.msg_zh = None
if app is not None:
self.init_app(app, timeout=self.timeout, verify_ssl=self.verify_ssl, message=self.message,
use_zh=self.use_zh)
def init_app(self, app, *, timeout: int = None, verify_ssl: bool = None, message: Dict = None,
use_zh: bool = None):
"""
基于aiohttp的异步封装
Args:
app: app应用
timeout:request timeout
verify_ssl:verify ssl
message: 提示消息
use_zh: 消息提示是否使用中文,默认中文
Returns:
"""
self.timeout = timeout or app.config.get("ECLIENTS_HTTP_TIMEOUT", None) or self.timeout
self.verify_ssl = verify_ssl or app.config.get("ECLIENTS_HTTP_VERIFYSSL", None) or self.verify_ssl
message = message or app.config.get("ECLIENTS_HTTP_MESSAGE", None) or self.message
use_zh = use_zh or app.config.get("ECLIENTS_HTTP_MSGZH", None) or self.use_zh
self.message = verify_message(http_msg, message)
self.msg_zh = "msg_zh" if use_zh else "msg_en"
# 初始化session
self.session = sync_requests.Session()
@atexit.register
def close_connection():
"""
释放session连接池所有连接
Args:
Returns:
"""
if self.session:
self.session.close()
def init_session(self, *, timeout: int = None, verify_ssl: bool = None, message: Dict = None,
use_zh: bool = None):
"""
基于aiohttp的异步封装
Args:
timeout:request timeout
verify_ssl:verify ssl
message: 提示消息
use_zh: 消息提示是否使用中文,默认中文
Returns:
"""
self.timeout = timeout or self.timeout
self.verify_ssl = verify_ssl or self.verify_ssl
use_zh = use_zh or self.use_zh
self.message = verify_message(http_msg, message or self.message)
self.msg_zh = "msg_zh" if use_zh else "msg_en"
# 初始化session
self.session = sync_requests.Session()
@atexit.register
def close_connection():
"""
释放session连接池所有连接
Args:
Returns:
"""
if self.session:
self.session.close()
def _request(self, method: str, url: str, *, params: Dict = None, data: Dict = None, json: Dict = None,
headers: Dict = None, verify_ssl: bool = None, timeout: int = None, **kwargs) -> Response:
"""
Args:
method, url, *, params=None, data=None, json=None, headers=None, **kwargs
Returns:
"""
def _get():
"""
Args:
Returns:
"""
return self.session.get(url, params=params, verify=verify_ssl, headers=headers,
timeout=timeout, **kwargs)
def _post():
"""
Args:
Returns:
"""
res = self.session.post(url, params=params, data=data, json=json, headers=headers,
verify=verify_ssl, timeout=timeout, **kwargs)
return res
def _put():
"""
Args:
Returns:
"""
return self.session.put(url, params=params, data=data, json=json, headers=headers, verify=verify_ssl,
timeout=timeout, **kwargs)
def _patch():
"""
Args:
Returns:
"""
return self.session.patch(url, params=params, data=data, json=json, headers=headers, verify=verify_ssl,
timeout=timeout, **kwargs)
def _delete():
"""
Args:
Returns:
"""
return self.session.delete(url, params=params, data=data, json=json, headers=headers, verify=verify_ssl,
timeout=timeout, **kwargs)
get_resp = {"GET": _get, "POST": _post, "PUT": _put, "DELETE": _delete, "PATCH": _patch}
try:
resp = get_resp[method.upper()]()
resp.raise_for_status()
except KeyError as e:
raise ClientError(url=url, message="error method {0}".format(str(e)))
except (ConnectionError, ConnectTimeout) as e:
raise ClientConnectionError(url=url, message=str(e))
except (Timeout, HTTPError) as e:
resp = e.response
try:
resp_data = resp.json()
except (ValueError, TypeError):
resp_data = resp.text
raise ClientResponseError(url=url, status_code=resp.status_code, message=resp.reason, headers=resp.headers,
body=resp_data)
except RequestException as e:
raise ClientError(url=url, message="ClientError: {}".format(vars(e)))
with resp:
try:
resp_json = resp.json()
except (ValueError, TypeError):
context_type = resp.headers.get("Content-Type", "")
if "text" in context_type:
resp_text = resp.text
return Response(resp.status_code, resp.reason, resp.headers, resp.cookies, resp_body=resp_text,
content=b"")
else:
resp_bytes = resp.content
return Response(resp.status_code, resp.reason, resp.headers, resp.cookies, resp_body="",
content=resp_bytes)
else:
return Response(resp.status_code, resp.reason, resp.headers, resp.cookies, resp_body=resp_json,
content=b"")
def request(self, method: str, url: str, *, params: Dict = None, data: Dict = None, json: Dict = None,
headers: Dict = None, verify_ssl: bool = None, timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request(method, url, params=params, data=data, json=json, headers=headers,
verify_ssl=verify_ssl, timeout=timeout, **kwargs)
def get(self, url: str, *, params: Dict = None, headers: Dict = None, verify_ssl: bool = None,
timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request("GET", url, params=params, verify_ssl=verify_ssl, headers=headers,
timeout=timeout, **kwargs)
def post(self, url: str, *, params: Dict = None, data: Dict = None, json: Dict = None, headers: Dict = None,
verify_ssl: bool = None, timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request("POST", url, params=params, data=data, json=json, headers=headers, verify_ssl=verify_ssl,
timeout=timeout, **kwargs)
def put(self, url: str, *, params: Dict = None, data: Dict = None, json: Dict = None, headers: Dict = None,
verify_ssl: bool = None, timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request("PUT", url, params=params, data=data, json=json, headers=headers, verify_ssl=verify_ssl,
timeout=timeout, **kwargs)
def patch(self, url: str, *, params: Dict = None, data: Dict = None, json: Dict = None, headers: Dict = None,
verify_ssl: bool = None, timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request("PATCH", url, params=params, data=data, json=json, headers=headers, verify_ssl=verify_ssl,
timeout=timeout, **kwargs)
def delete(self, url: str, *, params: Dict = None, headers: Dict = None, verify_ssl: bool = None,
timeout: int = None, **kwargs) -> Response:
"""
Args:
Returns:
"""
verify_ssl = self.verify_ssl if verify_ssl is None else verify_ssl
timeout = self.timeout if timeout is None else timeout
return self._request("DELETE", url, params=params, verify_ssl=verify_ssl, headers=headers, timeout=timeout,
**kwargs)
def close(self, ):
"""
close
Args:
Returns:
"""
self.session.close()
| 2.421875 | 2 |
claims_to_quality/analyzer/submission/api_submitter.py | CMSgov/qpp-claims-to-quality-public | 13 | 12760511 | <reponame>CMSgov/qpp-claims-to-quality-public
"""Methods to submit a MeasurementSet object to Nava's API."""
import urllib.parse
from claims_to_quality.config import config
from claims_to_quality.lib.qpp_logging import logging_config
import newrelic.agent
import requests
from retrying import retry
logger = logging_config.get_logger(__name__)
STOP_MAX_ATTEMPT_NUMBER = 2
WAIT_FIXED_MILLISECONDS = 15 * 60 * 1000 # This must be >10 minutes due to rate limits.
STATUS_CODES_TO_RETRY_ON = {
403: 'Forbidden', # This is in fact the response for rate-limiting.
408: 'Request Timeout',
429: 'Too Many Requests',
500: 'Internal Server Error',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
}
class NoMatchingMeasurementSetsException(Exception):
"""Indicates that no C2Q measurement sets can be found within a QPP submission."""
class NoMatchingSubmissionsException(Exception):
"""Indicates that no matching QPP submissions exist in the submission API."""
def _retry_on_fixable_request_errors(exception):
"""
Return True for exceptions that could be fixed by retrying.
Used by the retrying module to attempt to re-submit for certain errors only.
"""
return (
isinstance(exception, requests.exceptions.HTTPError) and
exception.response.status_code in STATUS_CODES_TO_RETRY_ON
) or (
isinstance(exception, requests.exceptions.ConnectTimeout)
)
def _handle_http_error(response, message):
"""Handler for http errors."""
http_error = False
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
http_error = True
if http_error:
logger.warning(
'HTTP error {status_code} during {msg}.'.format(
status_code=response.status_code,
msg=message
)
)
raise requests.exceptions.HTTPError(
'HTTP error {status_code} during {msg}.'.format(
status_code=response.status_code,
msg=message
), response=response
)
@newrelic.agent.function_trace(name='submit-to-measurement-sets-api', group='Task')
@retry(stop_max_attempt_number=STOP_MAX_ATTEMPT_NUMBER, wait_fixed=WAIT_FIXED_MILLISECONDS)
def submit_to_measurement_sets_api(measurement_set, patch_update):
"""
Send the submission object to the appropriate API endpoint.
Retry using exponential backoff.
"""
return _submit_to_measurement_sets_api(measurement_set, patch_update=patch_update)
def _submit_to_measurement_sets_api(measurement_set, patch_update):
"""Send the submission object to the appropriate API endpoint."""
# TODO: Add a separate method to validate submission without sending it.
# Attempt to find existing measurement sets if any exist.
try:
matching_submission = get_existing_submissions(measurement_set)
measurement_set_id = get_measurement_set_id_from_submission(matching_submission)
except(NoMatchingSubmissionsException, NoMatchingMeasurementSetsException):
# If no measurement sets exist, we can safely POST.
response = _post_to_measurement_sets_api(measurement_set)
else:
# If a measurement set does exist, we use the existing id to PUT or PATCH.
if patch_update:
response = _patch_to_measurement_sets_api(measurement_set, measurement_set_id)
else:
response = _put_to_measurement_sets_api(measurement_set, measurement_set_id)
_handle_http_error(response, message='submit_to_measurement_sets_api')
return response
@newrelic.agent.function_trace(name='get-verify-submissions', group='Task')
@retry(stop_max_attempt_number=STOP_MAX_ATTEMPT_NUMBER, wait_fixed=WAIT_FIXED_MILLISECONDS)
def get_submissions(npi=None, tin=None, performance_year=None, start_index=0):
"""
Simple GET request to check if submissions have been made.
If NPI is provided, return submissions for this NPI.
If start_index is provided, return submissions after the start_index.
Else, simply return the first 10 submissions starting at start_index=0.
FIXME: Move this into a different file, this is not part of the api submitter.
"""
logger.debug('Making a simple GET request to verify submissions.')
endpoint_url = urllib.parse.urljoin(
config.get('submission.endpoint'),
'submissions'
)
params = {
'startIndex': start_index,
}
if npi:
params['nationalProviderIdentifier'] = npi
headers = get_headers()
if tin:
headers.update({'qpp-taxpayer-identification-number': tin})
if performance_year:
params['performanceYear'] = str(performance_year)
response = requests.get(endpoint_url, params=params, headers=headers)
# If the request failed, raise an error.
_handle_http_error(response, 'get_submissions')
return response.json()
@newrelic.agent.function_trace(name='get-existing-submissions', group='Task')
def get_existing_submissions(measurement_set):
"""
Check to see if a submission already exists for the given identifiers.
Returns None if no submission exists. Otherwise, returns the existing submissionId.
"""
logger.debug('Making GET request to the submissions API.')
endpoint_url = urllib.parse.urljoin(
config.get('submission.endpoint'),
'submissions'
)
headers = get_headers()
# Restrict attention to measurement sets with the same NPI, TIN, and performance year.
params = {
'itemsPerPage': 99999,
'nationalProviderIdentifier':
measurement_set.data['submission']['nationalProviderIdentifier'],
'performanceYear': str(measurement_set.data['submission']['performanceYear'])
}
headers.update(
{'qpp-taxpayer-identification-number':
measurement_set.data['submission']['taxpayerIdentificationNumber']}
)
# Look for matching submissions.
response = requests.get(endpoint_url, params=params, headers=headers)
# If the request failed, raise an error.
_handle_http_error(response, 'get_existing_submissions')
existing_submissions = response.json()['data']['submissions']
# If at least one submission already exists for performance year, return the first match.
if len(existing_submissions) > 0:
return existing_submissions[0]
# Otherwise, raise an exception.
else:
raise NoMatchingSubmissionsException
def get_measurement_set_id_from_submission(submission):
"""Return the C2Q measurement_set ID from a given JSON submission."""
measurement_sets = [
measurement_set for measurement_set in submission['measurementSets']
if (
measurement_set['category'] == 'quality' and
measurement_set['submissionMethod'] == 'claims'
)
]
if measurement_sets:
return measurement_sets[0]['id']
else:
raise NoMatchingMeasurementSetsException
def _patch_to_measurement_sets_api(measurement_set, existing_measurement_set_id):
logger.debug('Making PATCH request to the measurement-sets API.')
endpoint_url = urllib.parse.urljoin(config.get('submission.endpoint'), 'measurement-sets/')
url = urllib.parse.urljoin(endpoint_url, str(existing_measurement_set_id))
return requests.patch(
url=url,
data=measurement_set.to_json(),
headers=get_headers(),
)
def _put_to_measurement_sets_api(measurement_set, existing_measurement_set_id):
logger.debug('Making PUT request to the measurement-sets API.')
endpoint_url = urllib.parse.urljoin(config.get('submission.endpoint'), 'measurement-sets/')
url = urllib.parse.urljoin(endpoint_url, str(existing_measurement_set_id))
return requests.put(
url=url,
data=measurement_set.to_json(),
headers=get_headers(),
)
def delete_measurement_set_api(measurement_set_id):
"""Delete a measuremet set by id."""
logger.warning('DELETING measurement {} set using the measurement-sets API.'.format(
measurement_set_id)
)
endpoint_url = urllib.parse.urljoin(
config.get('submission.endpoint'),
'measurement-sets/'
)
url = urllib.parse.urljoin(endpoint_url, str(measurement_set_id))
response = requests.delete(
url=url,
headers=get_headers()
)
_handle_http_error(response, 'delete_measurement_set')
def _post_to_measurement_sets_api(measurement_set):
logger.debug('Making POST request to the measurement-sets API.')
endpoint_url = urllib.parse.urljoin(config.get('submission.endpoint'), 'measurement-sets/')
return requests.post(
url=endpoint_url,
data=measurement_set.to_json(),
headers=get_headers()
)
def get_headers():
"""Return base headers for Nava's APIs."""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {api_token}'.format(
api_token=config.get('submission.api_token'))
}
if config.get('submission.cookie'):
headers['Cookie'] = config.get('submission.cookie')
return headers
def get_scoring_preview(measurement_set):
"""Send the submission object to the appropriate API endpoint and get scoring preview."""
logger.debug('Sending measurement set to the score-preview endpoint.')
endpoint_url = urllib.parse.urljoin(
config.get('submission.endpoint'),
'submissions/score-preview'
)
response = requests.post(
url=endpoint_url,
data=measurement_set.prepare_for_scoring(),
headers=get_headers()
)
_handle_http_error(response, 'scoring_preview')
return response.json()
| 2.1875 | 2 |
example/blog/types_.py | njncalub/apistar-mongoengine | 0 | 12760512 | from apistar import validators
from apistar_mongoengine.types import Type
class PostType(Type):
message = validators.String()
| 1.890625 | 2 |
SubgraphCountingMatching/utils/scheduler.py | HKUST-KnowComp/DualMessagePassing | 12 | 12760513 | import math
from torch.optim.lr_scheduler import LambdaLR
PI = 3.141592653589793
INIT_STEPS = 600
SCHEDULE_STEPS = 10000
NUM_CYCLES = 2
MIN_PERCENT = 1e-3
class ConstantScheduler(LambdaLR):
def __init__(self):
pass
def set_optimizer(self, optimizer):
super(ConstantScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
return 1.0
class ConstantWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS
):
self.num_warmup_steps = num_warmup_steps
def set_optimizer(self, optimizer):
super(ConstantWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / max(1.0, float(self.num_warmup_steps))
return 1.0
class LinearScheduler(LambdaLR):
def __init__(
self,
num_schedule_steps=SCHEDULE_STEPS,
min_percent=MIN_PERCENT
):
self.num_schedule_steps = num_schedule_steps
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
return max(
self.min_percent,
float(self.num_schedule_steps - current_step) / \
float(max(1, self.num_schedule_steps))
)
class LinearWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
return max(
self.min_percent,
float(self.num_schedule_steps - current_step) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
)
class LinearWarmupRestartScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearWarmupRestartScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
if progress >= 1.0:
return self.min_percent
return max(self.min_percent, 1 - (float(self.num_cycles) * progress) % 1.0)
class CosineScheduler(LambdaLR):
def __init__(
self,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
progress = float(current_step) / float(max(1, self.num_schedule_steps))
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * float(self.num_cycles) * 2.0 * progress)))
class CosineWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * float(self.num_cycles) * 2.0 * progress)))
class CosineWarmupRestartScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineWarmupRestartScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
if progress >= 1.0:
return self.min_percent
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * ((float(self.num_cycles) * progress) % 1.0))))
supported_schedulers = {
"constant": ConstantScheduler(),
"constant_with_warmup": ConstantWarmupScheduler(),
"linear": LinearScheduler(),
"linear_with_warmup": LinearWarmupScheduler(),
"linear_with_warmup_and_restart": LinearWarmupRestartScheduler(),
"cosine": CosineScheduler(),
"cosine_with_warmup": CosineWarmupScheduler(),
"cosine_with_warmup_and_restart": CosineWarmupRestartScheduler(),
}
def map_scheduler_str_to_scheduler(scheduler, **kw):
if scheduler not in supported_schedulers:
raise NotImplementedError
sdlr = supported_schedulers[scheduler]
for k, v in kw.items():
if hasattr(sdlr, k):
try:
setattr(sdlr, k, v)
except:
pass
return sdlr
| 2.5 | 2 |
server.py | llongquoc/voxceleb_trainer | 0 | 12760514 | #!/usr/bin/python
#-*- coding: utf-8 -*-
from SpeakerNet import *
from utils import *
from DatasetLoader import loadWAV
import sys, time, os, argparse, socket
import yaml
import numpy
import pdb
import torch
import glob
import zipfile
import datetime
import os
import random
import subprocess
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import torch.nn.functional as F
from werkzeug.utils import secure_filename
from flask import Flask, request, jsonify
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Parse arguments
# ## ===== ===== ===== ===== ===== ===== ===== =====
parser = argparse.ArgumentParser(description = 'Prepare Data');
## Data loader
parser.add_argument('--max_frames', type=int, default=200, help='Input length to the network for training');
parser.add_argument('--eval_frames', type=int, default=400, help='Input length to the network for testing; 0 uses the whole files');
## Training details
parser.add_argument('--trainfunc', type=str, default='softmaxproto', help='Loss function');
## Optimizer
parser.add_argument('--optimizer', type=str, default='adam', help='sgd or adam');
## Loss functions
parser.add_argument('--hard_prob', type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
parser.add_argument('--hard_rank', type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
parser.add_argument('--nPerSpeaker', type=int, default=2, help='Number of utterances per speaker per batch, only for metric learning based losses');
parser.add_argument('--nClasses', type=int, default=400, help='Number of speakers in the softmax layer, only for softmax-based losses');
## Load
parser.add_argument('--model_path', type=str, default='model000000500.model', help='Path for model and logs');
## Model definition
parser.add_argument('--n_mels', type=int, default=64, help='Number of mel filterbanks');
parser.add_argument('--log_input', type=bool, default=True, help='Log input features')
parser.add_argument('--model', type=str, default='ResNetSE34V2', help='Name of model definition');
parser.add_argument('--encoder_type', type=str, default='ASP', help='Type of encoder');
parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
##Server 's params
parser.add_argument('--gpu', dest='gpu', action='store_true', help='Use GPU');
parser.add_argument('--threshold', type=float, default=-1.0831763744354248, help='Threshold');
parser.add_argument('--feats_path', type=str, default='feats.npy', help='Path for feats file');
args = parser.parse_args();
## Load models
if args.gpu == True:
s = SpeakerNet(**vars(args));
s = WrappedModel(s).cuda(0)
else:
s = SpeakerNetCPU(**vars(args));
s = WrappedModel(s).cpu()
## Load model weights
try:
loadParameters(args.model_path, s, args.gpu);
except:
raise Exception('Model path is wrong!')
print('Model %s loaded from previous state!'%args.model_path);
feats = np.load(args.feats_path, allow_pickle=True)[()]
def main_worker(file_path):
data = create_data(file_path, args.eval_frames)
feature_vector = s(data).detach().cpu()
normalized_vector = F.normalize(feature_vector, p=2, dim=1)
max_score = args.threshold
speaker = ''
for key, value in feats.items():
dist = F.pairwise_distance(normalized_vector.unsqueeze(-1), value.unsqueeze(-1).transpose(0,2)).detach().cpu().numpy();
score = -1 * np.mean(dist);
if score >= max_score:
max_score = score
speaker = key.split('/')[-2]
return speaker
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
audio_file = request.files['file']
file_name_1 = str(random.randint(0, 100000)) + '.' + secure_filename(audio_file.filename).split('.')[-1]
audio_file.save(file_name_1)
file_name_2 = str(random.randint(0, 100000)) + '.wav'
out = subprocess.call('ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 %s >/dev/null 2>/dev/null' %(file_name_1, file_name_2), shell=True)
if out != 0:
return 'Invalid format!'
speaker = main_worker(file_name_2)
os.remove(file_name_1)
os.remove(file_name_2)
result = {'speaker': speaker}
return jsonify(result)
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8080', debug=False) | 2.015625 | 2 |
attic/tests/flatten/test_XTP-1106.py | ska-telescope/skampi | 0 | 12760515 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_calc
----------------------------------
Acceptance tests for MVP.
"""
import sys, os
import pytest
import logging
from time import sleep
from assertpy import assert_that
from pytest_bdd import scenario, given, when, then
from concurrent import futures
#SUT
from ska.scripting.domain import Telescope, SubArray
#SUT infrastructure
from tango import DeviceProxy, DevState # type: ignore
## local imports
from resources.test_support.helpers import resource
from resources.test_support.sync_decorators import sync_assign_resources, sync_restart, sync_abort, sync_scan_oet
from resources.test_support.persistance_helping import update_resource_config_file
from resources.test_support.controls import set_telescope_to_standby,set_telescope_to_running,telescope_is_in_standby,take_subarray,restart_subarray, tmc_is_on
DEV_TEST_TOGGLE = os.environ.get('DISABLE_DEV_TESTS')
if DEV_TEST_TOGGLE == "False":
DISABLE_TESTS_UNDER_DEVELOPMENT = False
else:
DISABLE_TESTS_UNDER_DEVELOPMENT = True
LOGGER = logging.getLogger(__name__)
devices_to_log = [
'ska_mid/tm_subarray_node/1',
'mid_csp/elt/subarray_01',
'mid_csp_cbf/sub_elt/subarray_01',
'mid_sdp/elt/subarray_1',
'mid_d0001/elt/master',
'mid_d0002/elt/master',
'mid_d0003/elt/master',
'mid_d0004/elt/master']
non_default_states_to_check = {
'mid_d0001/elt/master' : 'pointingState',
'mid_d0002/elt/master' : 'pointingState',
'mid_d0003/elt/master' : 'pointingState',
'mid_d0004/elt/master' : 'pointingState'}
@pytest.fixture
def result():
return {}
@pytest.mark.select
@pytest.mark.skamid
@pytest.mark.quarantine
# @pytest.mark.skipif(DISABLE_TESTS_UNDER_DEVELOPMENT, reason="disabaled by local env")
# @pytest.mark.skip(reason="bug as repoted by SKB-20")
@scenario("XTP-1106.feature", "BDD test case for Restart functionality")
def test_subarray_restart():
"""RESTART Subarray"""
def assign():
LOGGER.info("Before starting the telescope checking if the TMC is in ON state")
assert(tmc_is_on())
LOGGER.info("Before starting the telescope checking if the telescope is in StandBy.")
assert(telescope_is_in_standby())
LOGGER.info("Telescope is in StandBy.")
LOGGER.info("Invoking Startup Telescope command on the telescope.")
set_telescope_to_running()
LOGGER.info("Telescope is started successfully.")
pilot, sdp_block = take_subarray(1).to_be_composed_out_of(2)
LOGGER.info("Resources are assigned successfully on Subarray.")
return sdp_block
def configure_ready(sdp_block):
LOGGER.info("Invoking configure command on the Subarray.")
take_subarray(1).and_configure_scan_by_file(sdp_block)
LOGGER.info("Configure command is invoked on Subarray.")
LOGGER.info("Subarray is moved to READY, Configure command is successful on Subarray.")
def scanning(fixture):
fixture['scans'] = '{"id":1}'
@sync_scan_oet
def scan():
LOGGER.info("Invoking scan command on Subarray.")
def send_scan(duration):
SubArray(1).scan()
LOGGER.info("Scan is invoked on Subarray 1")
executor = futures.ThreadPoolExecutor(max_workers=1)
LOGGER.info("Getting into executor block")
return executor.submit(send_scan,fixture['scans'])
fixture['future'] = scan()
LOGGER.info("obsState = Scanning of TMC-Subarray")
return fixture
@given("operator has a running telescope with a subarray in state <subarray_obsstate> and Subarray has transitioned into obsState ABORTED")
def set_up_telescope(subarray_obsstate : str):
if subarray_obsstate == 'IDLE':
assign()
LOGGER.info("Abort command can be invoked on Subarray with Subarray obsState as 'IDLE'")
elif subarray_obsstate == 'READY':
sdp_block = assign()
LOGGER.info("Resources are assigned successfully and configuring the subarray now")
configure_ready(sdp_block)
LOGGER.info("Abort command can be invoked on Subarray with Subarray obsState as 'READY'")
elif subarray_obsstate == 'SCANNING':
sdp_block = assign()
LOGGER.info("Resources are assigned successfully and configuring the subarray now")
configure_ready(sdp_block)
LOGGER.info("Subarray is configured and executing a scan on subarray")
scanning(sdp_block)
LOGGER.info("Abort command can be invoked on Subarray with Subarray obsState as 'SCANNING'")
else:
msg = 'obsState {} is not settable with command methods'
raise ValueError(msg.format(subarray_obsstate))
def abort_subarray():
@sync_abort(200)
def abort():
LOGGER.info("Invoking ABORT command.")
SubArray(1).abort()
LOGGER.info("Abort command is invoked on subarray")
abort()
LOGGER.info("Abort is completed on Subarray")
abort_subarray()
@when("I invoke Restart command")
def restart():
@sync_restart(200)
def command_restart():
LOGGER.info("Invoking Restart command on the Subarray.")
SubArray(1).restart()
LOGGER.info("Restart command is invoked on subarray")
command_restart()
LOGGER.info("Subarray is restarted successfully.")
@then("subarray changes its obsState to EMPTY")
def check_empty_state():
assert_that(resource('mid_sdp/elt/subarray_1').get('obsState')).is_equal_to('EMPTY')
assert_that(resource('mid_csp/elt/subarray_01').get('obsState')).is_equal_to('EMPTY')
assert_that(resource('ska_mid/tm_subarray_node/1').get('obsState')).is_equal_to('EMPTY')
def teardown_function(function):
""" teardown any state that was previously setup with a setup_function
call.
"""
if (resource('ska_mid/tm_subarray_node/1').get('State') == "ON"):
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "IDLE"):
LOGGER.info("tearing down composed subarray (IDLE)")
take_subarray(1).and_release_all_resources()
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "CONFIGURING"):
LOGGER.warn("Subarray is still in CONFIFURING! Please restart MVP manually to complete tear down")
restart_subarray(1)
raise Exception("Unable to tear down test setup")
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "READY"):
LOGGER.info("tearing down configured subarray (READY)")
take_subarray(1).and_end_sb_when_ready().and_release_all_resources()
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "ABORTING"):
LOGGER.warn("Subarray is still in ABORTING! Please restart MVP manually to complete tear down")
restart_subarray(1)
raise Exception("Unable to tear down test setup")
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "SCANNING"):
LOGGER.warn("Subarray is still in SCANNING! Please restart MVP manually to complete tear down")
restart_subarray(1)
raise Exception("Unable to tear down test setup")
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "RESTARTING"):
LOGGER.warn("Subarray is still in RESTARTING! Please restart MVP manually to complete tear down")
restart_subarray(1)
raise Exception("Unable to tear down test setup")
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "EMPTY"):
LOGGER.info("Subarray is in EMPTY state.")
LOGGER.info("Put Telescope back to standby")
set_telescope_to_standby()
LOGGER.info("Telescope is in standby")
| 1.859375 | 2 |
src/TowerDefence/Dispatcher/dispatcher_graphics.py | sevashasla/TowerDefence | 0 | 12760516 | <gh_stars>0
from .dispatcher import Dispatcher
import pygame
import sys
from ..Game.coordinates import Coordinates
from ..Game.interface import Interface
from ..Command.levels_menu import LevelsMenuCommand
from ..Command.rules_menu import RulesMenuCommand
from ..Command.forced_exit import ForcedExitCommand
from ..Command.place_tower import PlaceTowerCommand
from ..Command.choose_level import ChooseLevelCommand
from ..Command.quit_page import QuitPageCommand
class DispatcherGraphics(Dispatcher):
def __init__(self, interface, game_path):
super().__init__()
self.interface = interface
self.last_chosen_type_of_tower = None
self.game_path = game_path
def start(self):
pass
def finish(self):
pygame.quit()
def get_events(self) -> list:
events = []
for event in pygame.event.get():
if event.type == pygame.QUIT:
events.append(ForcedExitCommand())
elif event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
print(pos)
coordinates_of_click = Coordinates(coordinates=pos)
clicked = False
for button in self.interface.buttons:
if button.clicked(coordinates_of_click):
if button.task == "Exit":
events.append(QuitPageCommand())
elif button.task == "LevelsMenu":
events.append(LevelsMenuCommand())
elif button.task == "RulesMenu":
events.append(RulesMenuCommand())
elif button.task.startswith("level"):
events.append(ChooseLevelCommand(button.task))
elif button.task.endswith("Tower"):
self.last_chosen_type_of_tower = button.task
clicked = True
break
if not clicked and not self.last_chosen_type_of_tower is None:
events.append(PlaceTowerCommand(self.last_chosen_type_of_tower,
coordinates_of_click))
return events
| 2.59375 | 3 |
prompt412/round-111/c.py | honux77/algorithm | 2 | 12760517 | t = int(input())
def solve():
candy = 0
x = input()
r, c = map(int, input().split())
a = []
for _ in range(r):
a.append(input())
for i in range(r):
for j in range(c - 2):
if a[i][j] == '>' and a[i][j + 1] == 'o' and a[i][j + 2] == '<':
candy += 1
for i in range(r - 2):
for j in range(c):
if a[i][j] == 'v' and a[i + 1][j] == 'o' and a[i + 2][j] == '^':
candy += 1
print(candy)
for _ in range(t):
solve() | 3.140625 | 3 |
pozytywnie_facebook/facebook_project/settings.py | riklaunim/django-examples | 17 | 12760518 | <reponame>riklaunim/django-examples<filename>pozytywnie_facebook/facebook_project/settings.py
"""
Django settings for facebook_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'kombu.transport.django',
'facebook_auth',
'facebook_datastore',
'facebook_javascript_sdk',
'facebook_javascript_authentication',
'facebook_signed_request',
'javascript_settings',
'social_metadata',
'sslserver',
'blog',
'canvas_example',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'facebook_javascript_authentication.middlewares.P3PMiddleware',
'facebook_signed_request.middleware.SignedRequestMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'facebook_signed_request.middleware.FacebookLoginMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'facebook_auth.backends.FacebookBackend',
'facebook_auth.backends.FacebookJavascriptBackend',
)
ROOT_URLCONF = 'facebook_project.urls'
WSGI_APPLICATION = 'facebook_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
BASE_URL = 'http://localhost:8000'
STATIC_URL = '/static/'
STATIC_ROOT = '/tmp/facebook_example/'
MEDIA_URL = 'http://localhost:8000/media/'
MEDIA_ROOT = 'media/'
FACEBOOK_APP_SECRET = 'SECRET_HERE'
FACEBOOK_APP_ID = 'APP_ID_HERE'
BROKER_URL = 'django://'
CELERY_ACCEPT_CONTENT = ['json']
import djcelery
djcelery.setup_loader()
| 2.125 | 2 |
gs/group/messages/add/base/__init__.py | groupserver/gs.group.messages.add.base | 0 | 12760519 | <reponame>groupserver/gs.group.messages.add.base<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
#lint:disable
from .addapost import add_a_post
from .base import ListInfoForm
#lint:enable
| 1.195313 | 1 |
tests/func/test_api.py | n3hrox/dvc | 0 | 12760520 | <reponame>n3hrox/dvc<gh_stars>0
import os
import shutil
import pytest
from .test_data_cloud import _should_test_aws
from .test_data_cloud import _should_test_azure
from .test_data_cloud import _should_test_gcp
from .test_data_cloud import _should_test_hdfs
from .test_data_cloud import _should_test_oss
from .test_data_cloud import _should_test_ssh
from .test_data_cloud import get_aws_url
from .test_data_cloud import get_azure_url
from .test_data_cloud import get_gcp_url
from .test_data_cloud import get_hdfs_url
from .test_data_cloud import get_local_url
from .test_data_cloud import get_oss_url
from .test_data_cloud import get_ssh_url
from dvc import api
from dvc.exceptions import FileMissingError
from dvc.main import main
from dvc.path_info import URLInfo
from dvc.remote.config import RemoteConfig
# NOTE: staticmethod is only needed in Python 2
class Local:
should_test = staticmethod(lambda: True)
get_url = staticmethod(get_local_url)
class S3:
should_test = staticmethod(_should_test_aws)
get_url = staticmethod(get_aws_url)
class GCP:
should_test = staticmethod(_should_test_gcp)
get_url = staticmethod(get_gcp_url)
class Azure:
should_test = staticmethod(_should_test_azure)
get_url = staticmethod(get_azure_url)
class OSS:
should_test = staticmethod(_should_test_oss)
get_url = staticmethod(get_oss_url)
class SSH:
should_test = staticmethod(_should_test_ssh)
get_url = staticmethod(get_ssh_url)
class HDFS:
should_test = staticmethod(_should_test_hdfs)
get_url = staticmethod(get_hdfs_url)
remote_params = [S3, GCP, Azure, OSS, SSH, HDFS]
all_remote_params = [Local] + remote_params
@pytest.fixture
def remote_url(request):
if not request.param.should_test():
raise pytest.skip()
return request.param.get_url()
def run_dvc(*argv):
assert main(argv) == 0
@pytest.mark.parametrize("remote_url", remote_params, indirect=True)
def test_get_url(repo_dir, dvc_repo, remote_url):
run_dvc("remote", "add", "-d", "upstream", remote_url)
dvc_repo.add(repo_dir.FOO)
expected_url = URLInfo(remote_url) / "ac/bd18db4cc2f85cedef654fccc4a4d8"
assert api.get_url(repo_dir.FOO) == expected_url
@pytest.mark.parametrize("remote_url", remote_params, indirect=True)
def test_get_url_external(repo_dir, dvc_repo, erepo, remote_url):
_set_remote_url_and_commit(erepo.dvc, remote_url)
# Using file url to force clone to tmp repo
repo_url = "file://" + erepo.dvc.root_dir
expected_url = URLInfo(remote_url) / "ac/bd18db4cc2f85cedef654fccc4a4d8"
assert api.get_url(repo_dir.FOO, repo=repo_url) == expected_url
@pytest.mark.parametrize("remote_url", all_remote_params, indirect=True)
def test_open(repo_dir, dvc_repo, remote_url):
run_dvc("remote", "add", "-d", "upstream", remote_url)
dvc_repo.add(repo_dir.FOO)
run_dvc("push")
# Remove cache to force download
shutil.rmtree(dvc_repo.cache.local.cache_dir)
with api.open(repo_dir.FOO) as fd:
assert fd.read() == repo_dir.FOO_CONTENTS
@pytest.mark.parametrize("remote_url", all_remote_params, indirect=True)
def test_open_external(repo_dir, dvc_repo, erepo, remote_url):
erepo.dvc.scm.checkout("branch")
_set_remote_url_and_commit(erepo.dvc, remote_url)
erepo.dvc.scm.checkout("master")
_set_remote_url_and_commit(erepo.dvc, remote_url)
erepo.dvc.push(all_branches=True)
# Remove cache to force download
shutil.rmtree(erepo.dvc.cache.local.cache_dir)
# Using file url to force clone to tmp repo
repo_url = "file://" + erepo.dvc.root_dir
with api.open("version", repo=repo_url) as fd:
assert fd.read() == "master"
assert api.read("version", repo=repo_url, rev="branch") == "branch"
@pytest.mark.parametrize("remote_url", all_remote_params, indirect=True)
def test_missing(repo_dir, dvc_repo, remote_url):
run_dvc("add", repo_dir.FOO)
run_dvc("remote", "add", "-d", "upstream", remote_url)
# Remove cache to make foo missing
shutil.rmtree(dvc_repo.cache.local.cache_dir)
with pytest.raises(FileMissingError):
api.read(repo_dir.FOO)
def _set_remote_url_and_commit(repo, remote_url):
rconfig = RemoteConfig(repo.config)
rconfig.modify("upstream", "url", remote_url)
repo.scm.add([repo.config.config_file])
repo.scm.commit("modify remote")
def test_open_scm_controlled(dvc_repo, repo_dir):
stage, = dvc_repo.add(repo_dir.FOO)
stage_content = open(stage.path, "r").read()
with api.open(stage.path) as fd:
assert fd.read() == stage_content
def test_open_not_cached(dvc_repo):
metric_file = "metric.txt"
metric_content = "0.6"
metric_code = "open('{}', 'w').write('{}')".format(
metric_file, metric_content
)
dvc_repo.run(
metrics_no_cache=[metric_file],
cmd=('python -c "{}"'.format(metric_code)),
)
with api.open(metric_file) as fd:
assert fd.read() == metric_content
os.remove(metric_file)
with pytest.raises(FileMissingError):
api.read(metric_file)
| 1.9375 | 2 |
arucoDetection/src/svgfig/svgfig/_viewer.py | LavaHawk0123/Artmis-Drone | 0 | 12760521 | #!/usr/bin/env python
import cairo
import rsvg
import gtk
class View:
def __init__(self):
self.string = """<svg width="800" height="600"></svg>"""
self.svg = rsvg.Handle(data=self.string)
self.win = gtk.Window()
self.da = gtk.DrawingArea()
self.win.add(self.da)
self.da.set_size_request(800, 600)
self.da.connect("expose-event", self.expose_cairo)
self.win.connect("destroy", self.destroy)
self.win.show_all()
self.win.present()
def expose_cairo(self, win, event):
self.svg = rsvg.Handle(data=self.string)
cr = self.da.window.cairo_create()
self.svg.render_cairo(cr)
def destroy(self, widget, data=None):
gtk.main_quit()
def renderSVG(self, text):
x, y, w, h = self.win.allocation
self.da.window.invalidate_rect((0,0,w,h), False)
self.string = text
| 2.453125 | 2 |
main-alert.py | xshellinc/smart-sense | 1 | 12760522 | <reponame>xshellinc/smart-sense<gh_stars>1-10
from sense_hat import SenseHat as Sensor
#from sense_emu import SenseHat as Sensor
#from dht22 import DHT22 as Sensor
from ifttt import Webhook
from wbgt import *
from flu import *
import time
from datetime import datetime
IFTTT_KEY = ''
TEMP_DELTA = 0
CHECK_SPAN = 60 * 10
sense = Sensor()
# イベント名は後で変更するので最初は何でも良い
webhook = Webhook('demo', IFTTT_KEY)
def alert(message):
payload = {
"value1": message
}
res = webhook.post(event='send_message', payload=payload)
if not res.ok:
print('Request failed with status code', res.status_code)
def main():
wbgt_level = 0
vh_level = 0
while True:
now = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
temp = sense.get_temperature() - TEMP_DELTA
hum = sense.get_humidity()
payload = {
"value1": now,
"value2": temp,
"value3": hum,
}
res = webhook.post(event='send_data', payload=payload)
if not res.ok:
print('Request failed with status code', res.status_code)
'''
熱中症警戒度が3以上に変化したらアラートを出す
'''
wbgt = get_wbgt(temp, hum)
level, msg = get_wbgt_level(wbgt)
if (wbgt_level < 3 and level >= 3) or (wbgt_level < 4 and level >= 4):
alert(f'【{msg}】熱中症にご注意ください!現在の暑さ指数は{round(wbgt)}です。')
wbgt_level = level
'''
インフルエンザ警戒度が3以上に変化したらアラートを出す
'''
vh = get_vh(temp, hum)
level, msg = get_flu_level(vh)
if (vh_level < 3 and level >= 3) or (vh_level < 4 and level >= 4):
alert(f'【{msg}】インフルエンザにご注意ください。現在の絶対湿度は{round(vh)}g/m3です。加湿しましょう。')
vh_level = level
time.sleep(CHECK_SPAN)
if __name__ == '__main__':
main()
| 2.71875 | 3 |
SciFiReaders/__version__.py | sumner-harris/SciFiReaders | 0 | 12760523 | <gh_stars>0
version = '0.0.1'
time = '2021-02-07 10:00:25'
| 1.085938 | 1 |
Test/sense.py | Brumawen/power | 0 | 12760524 | from gpiozero import LightSensor
from time import sleep
pulseCount = 0
def lightPulse():
global pulseCount
pulseCount = pulseCount + 1
print("Pulse ", pulseCount)
ldr = LightSensor(19,queue_len=1)
ldr.when_light = lightPulse
ldr.threshold = 0.1
while True:
#print(ldr.value)
sleep(1)
| 3.15625 | 3 |
crash_course/ch07/exec/rental_car.py | dantin/python-by-example | 0 | 12760525 | <reponame>dantin/python-by-example<gh_stars>0
car = input('What kind of rental car you would like? ')
print('Let me see if I can find you a ' + car.lower().title() + '.')
| 3.78125 | 4 |
app/models.py | Nexus357ZA/upgraded-couscous | 0 | 12760526 | from flask import current_app, url_for
import json
#from app import db
class Article():
# #TODO - This is just a skeleton
# id = db.Column(db.Integer, primary_key=True)
# source = db.Column(db.String(160))
# author = db.Column(db.String(160))
# title = db.Column(db.String(160))
# description = db.Column(db.String(160))
# url = db.Column(db.String())
# urlToImage = db.Column(db.String())
# publishedAt = db.Column(db.String(160))
# content = db.Column(db.String())
#
# def __repr__(self):
# return '<Article {}>'.format(self.title) | 2.90625 | 3 |
buybacks/views/stats.py | buahaha/aa-buybacks | 0 | 12760527 | from django.contrib.auth.decorators import login_required, permission_required
from django.shortcuts import render
from ..models import Contract
@login_required
@permission_required("buybacks.basic_access")
def my_stats(request):
contracts = Contract.objects.filter(
character__user=request.user,
)
context = {
"contracts": contracts,
"mine": True,
}
return render(request, "buybacks/stats.html", context)
@login_required
@permission_required("buybacks.basic_access")
def program_stats(request, program_pk):
contracts = Contract.objects.filter(
program__pk=program_pk,
)
context = {
"contracts": contracts,
"mine": False,
}
return render(request, "buybacks/stats.html", context)
| 2.046875 | 2 |
tests/resources/test_resources.py | Glignos/invenio-records-resources | 0 | 12760528 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Service tests."""
import json
import pytest
from invenio_search import current_search, current_search_client
from mock_module.api import Record
@pytest.fixture()
def input_data():
"""Input data (as coming from the view layer)."""
return {
'metadata': {
'title': 'Test'
},
}
def test_simple_flow(app, client, input_data, headers):
"""Test a simple REST API flow."""
idx = 'records-record-v1.0.0'
h = headers
# Create a record
res = client.post('/mocks', headers=h, data=json.dumps(input_data))
assert res.status_code == 201
id_ = res.json['id']
assert res.json['metadata'] == input_data['metadata']
# Read the record
res = client.get(f'/mocks/{id_}', headers=h)
assert res.status_code == 200
assert res.json['metadata'] == input_data['metadata']
# TODO: Should this be part of the service? we don't know the index easily
Record.index.refresh()
# Search it
res = client.get('/mocks', query_string={'q': f'id:{id_}'}, headers=h)
assert res.status_code == 200
assert res.json['hits']['total'] == 1
assert res.json['hits']['hits'][0]['metadata'] == input_data['metadata']
data = res.json['hits']['hits'][0]
data['metadata']['title'] = 'New title'
# Update it
res = client.put(f'/mocks/{id_}', headers=h, data=json.dumps(data))
assert res.status_code == 200
assert res.json['metadata']['title'] == 'New title'
# Delete it
res = client.delete(f'/mocks/{id_}')
assert res.status_code == 204
assert res.get_data(as_text=True) == ''
Record.index.refresh()
# Try to get it again
res = client.get(f'/mocks/{id_}', headers=h)
assert res.status_code == 410
# Try to get search it again
res = client.get('/mocks', query_string={'q': f'id:{id_}'}, headers=h)
assert res.status_code == 200
assert res.json['hits']['total'] == 0
def test_search_empty_query_string(client, input_data, headers):
idx = 'records-record-v1.0.0'
# Create a record
res = client.post('/mocks', headers=headers, data=json.dumps(input_data))
assert res.status_code == 201
# TODO: Should this be part of the service? we don't know the index easily
Record.index.refresh()
# Search it
res = client.get('/mocks', headers=headers)
assert res.status_code == 200
assert res.json['hits']['total'] == 1
assert res.json['hits']['hits'][0]['metadata'] == input_data['metadata']
# Search it
res = client.get('/mocks', query_string={'q': ''}, headers=headers)
assert res.status_code == 200
assert res.json['hits']['total'] == 1
assert res.json['hits']['hits'][0]['metadata'] == input_data['metadata']
| 2.265625 | 2 |
performers/export.py | Jiyuan-Yang/paper_lookup | 1 | 12760529 | from configs.meta_params import get_db_list
from utils.parser.bib_parser import bib_gen
def export_exec(paper_id):
for item in get_db_list():
if item['id'] == paper_id:
print(bib_gen(item))
break
| 2.0625 | 2 |
rframe/interfaces/json.py | jmosbacher/rframe | 0 | 12760530 | from functools import singledispatch
import json
import fsspec
from toolz import groupby
from loguru import logger
from typing import Any, List, Union
from pydantic.datetime_parse import datetime_re
from pydantic.validators import parse_datetime
import numpy as np
from ..types import Interval
from ..indexes import Index, InterpolatingIndex, IntervalIndex, MultiIndex
from ..utils import jsonable, singledispatchmethod, hashable_doc, unhashable_doc
from .base import BaseDataQuery, DatasourceInterface
class JsonBaseQuery(BaseDataQuery):
def __init__(self, index, data, field: str, label: Any) -> None:
self.index = index
self.data = data
self.field = field
self.label = label
@property
def labels(self):
return {self.field: self.label}
def filter(self, record: dict):
raise NotImplementedError
def apply_selection(self, records):
return list(filter(self.filter, records))
def execute(self, limit: int = None, skip: int = None, sort=None):
logger.debug("Applying pandas dataframe selection")
if not self.data:
return []
if sort is None:
data = self.data
else:
if isinstance(sort, str):
sort = [sort]
data = [hashable_doc(d) for d in self.data]
data = sorted(data, key=lambda d: tuple(d[s] for s in sort))
data = [unhashable_doc(d) for d in data]
docs = self.apply_selection(data)
if limit is not None:
start = skip * self.index.DOCS_PER_LABEL if skip is not None else 0
limit = start + limit * self.index.DOCS_PER_LABEL
docs = docs[start:limit]
docs = self.index.reduce(docs, self.labels)
docs = from_json(docs)
logger.debug(f"Done. Found {len(docs)} documents.")
return docs
def min(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [d[field] for d in docs]
results[field] = min(values)
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def max(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [d[field] for d in docs]
results[field] = max(values)
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def unique(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [doc[field] for doc in docs]
values = set([hashable_doc(v) for v in values])
values = [unhashable_doc(v) for v in values]
results[field] = values
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def count(self):
docs = self.apply_selection(self.data)
return len(docs)
class JsonSimpleQuery(JsonBaseQuery):
def filter(self, record: dict):
if self.label is None:
return True
if self.field not in record:
raise KeyError(self.field)
label = self.label
if isinstance(label, slice):
if label.step is None:
ge = record[self.field] >= label.start
lt = record[self.field] < label.stop
return ge and lt
else:
label = list(range(label.start, label.stop, label.step))
if isinstance(label, list):
return record[self.field] in label
else:
return record[self.field] == label
class JsonIntervalQuery(JsonBaseQuery):
def filter(self, record: dict):
if self.label is None:
return record
if self.field not in record:
raise KeyError(self.field)
interval = self.label
if isinstance(interval, tuple):
left, right = interval
elif isinstance(interval, dict):
left, right = interval["left"], interval["right"]
elif isinstance(interval, slice):
left, right = interval.start, interval.stop
elif hasattr(interval, "left") and hasattr(interval, "right"):
left, right = interval.left, interval.right
else:
left = right = interval
left, right = to_json(left), to_json(right)
return (record[self.field]["left"] < right) and (
record[self.field]["right"] > left
)
class JsonInterpolationQuery(JsonBaseQuery):
def apply_selection(self, records, limit=1):
if self.label is None:
return records
if not all(self.field in record for record in records):
raise KeyError(self.field)
field_values = np.array([record[self.field] for record in records])
before_mask = field_values <= self.label
before_values = field_values[before_mask]
after_mask = field_values > self.label
after_values = field_values[after_mask]
before_idxs = np.argsort(np.abs(before_values) - self.label)[:limit]
before_records = [records[i] for i in np.flatnonzero(before_mask)]
before_values = [before_records[i] for i in before_idxs]
after_idxs = np.argsort(np.abs(after_values) - self.label)[:limit]
after_records = [records[i] for i in np.flatnonzero(after_mask)]
after_values = [after_records[i] for i in after_idxs]
return before_values + after_values
class JsonMultiQuery(JsonBaseQuery):
def __init__(self, index, data, queries: List[JsonBaseQuery]) -> None:
self.index = index
self.data = data
self.queries = queries
@property
def labels(self):
return {query.field: query.label for query in self.queries}
def apply_selection(self, records):
if len(self.queries) == 1:
return self.queries[0].apply_selection(records)
for query in self.queries:
if isinstance(query, JsonInterpolationQuery):
selections = []
others = [q.field for q in self.queries if q is not query]
if not others:
records = query.apply_selection(records)
continue
for _, docs in groupby(others, records):
selection = query.apply_selection(docs).reset_index()
selections.extend(selection)
if selections:
records = selections
else:
records = []
else:
records = query.apply_selection(records)
return records
@DatasourceInterface.register_interface(list)
class JsonInterface(DatasourceInterface):
@classmethod
def from_url(cls, url: str, jsonpath="", **kwargs):
if url.endswith(".json"):
with fsspec.open(url, **kwargs) as f:
data = json.load(f)
for p in jsonpath.split("."):
data = data[p] if p else data
if not isinstance(data, list):
raise ValueError("JSON file must contain a list of documents")
return cls(data)
raise NotImplementedError
@singledispatchmethod
def compile_query(self, index, label):
raise NotImplementedError(
f"{self.__class__.__name__} does not support {type(index)} indexes."
)
@compile_query.register(Index)
@compile_query.register(str)
def simple_query(self, index, label):
if isinstance(index, str):
index, name = Index(), index
index.name = name
label = to_json(label)
return JsonSimpleQuery(index, self.source, index.name, label)
@compile_query.register(IntervalIndex)
def interval_query(self, index, label):
label = to_json(label)
return JsonIntervalQuery(index, self.source, index.name, label)
@compile_query.register(InterpolatingIndex)
def interpolating_query(self, index, label):
label = to_json(label)
return JsonInterpolationQuery(index, self.source, index.name, label)
@compile_query.register(list)
@compile_query.register(tuple)
@compile_query.register(MultiIndex)
def multi_query(self, index, labels):
if not isinstance(index, MultiIndex):
index = MultiIndex(*index)
queries = [self.compile_query(idx, labels[idx.name]) for idx in index.indexes]
return JsonMultiQuery(index, self.source, queries)
def _find(self, doc):
for i, d in enumerate(self.source):
if doc.same_index(doc.__class__(**d)):
return i
else:
raise KeyError(doc.index_labels)
def insert(self, doc):
doc = to_json(doc.dict())
self.source.append(doc)
def update(self, doc):
for i, d in enumerate(self.source):
if doc.same_index(doc.__class__(**d)):
self.source[i] = to_json(doc.dict())
break
else:
from rframe.schema import UpdateError
raise UpdateError(f"No document with index {doc.index} found.")
def delete(self, doc):
del self.source[self._find(doc)]
def to_json(obj):
return jsonable(obj)
@singledispatch
def from_json(obj):
return obj
@from_json.register(str)
def from_json_str(obj):
match = datetime_re.match(obj) # type: ignore
if match is None:
return obj
return parse_datetime(obj)
@from_json.register(list)
def from_json_list(obj):
return [from_json(v) for v in obj]
@from_json.register(tuple)
def from_json_tuple(obj):
return tuple(from_json(v) for v in obj)
@from_json.register(dict)
def from_json_dict(obj):
if len(obj) == 2 and "left" in obj and "right" in obj:
left, right = from_json((obj["left"], obj["right"]))
return Interval[left, right]
return {k: from_json(v) for k, v in obj.items()}
| 2.125 | 2 |
GameText.py | RenaKunisaki/GhidraScripts | 4 | 12760531 | #Follow GameText array and auto assign names and enums
#@author
#@category StarFox
#@keybinding
#@menupath
#@toolbar
import jarray
from array import array
import re
listing = currentProgram.getListing()
AF = currentProgram.getAddressFactory()
DT = currentProgram.getDataTypeManager()
mem = currentProgram.getMemory()
reEnumName = re.compile(r'[^a-zA-Z0-9_]+')
def addrToInt(addr):
return int(str(addr), 16)
def intToAddr(addr):
return AF.getAddress("0x%08X" % addr)
def createEnum(name, values, size=None):
if name is None: name = "autoEnum"
if size is None:
size = 1
if len(values) > 0xFFFF: size = 4
elif len(values) > 0xFF: size = 2
enum = ghidra.program.model.data.EnumDataType(name, size)
for name, val in values.items():
name = reEnumName.sub('', name)
while True:
try:
enum.add(name, val)
break
except java.lang.IllegalArgumentException:
name = "%s_%X" % (name, val)
DT.addDataType(enum, ghidra.program.model.data.DataTypeConflictHandler.REPLACE_EMPTY_STRUCTS_OR_RENAME_AND_ADD_HANDLER)
return enum
def readAddr(addr):
arr = jarray.zeros(4, "b")
mem.getBytes(addr, arr)
v = (((arr[0] & 0xFF) << 24) |
((arr[1] & 0xFF) << 16) |
((arr[2] & 0xFF) << 8) |
(arr[3] & 0xFF))
return intToAddr(v)
def readString(addr):
if type(addr) is int:
addr = intToAddr(addr)
data = []
while True:
try:
b = mem.getByte(addr)
except ghidra.program.model.mem.MemoryAccessException:
printf("Error: can't read string from address 0x%X\n", addrToInt(addr))
b = 0
if b == 0: break
elif b < 0x7F:
data.append(b & 0xFF)
addr = addr.add(1)
a = array('B', data)
return a.tostring()#.decode('shift-jis')
data = listing.getDataAt(currentAddress)
struc = data.getComponent(0).dataType
sLen = struc.getLength()
numTexts = data.length / sLen
texts = {}
for i in range(numTexts):
entry = data.getComponent(i)
id = entry.getComponent(0).value.value
numPhrases = entry.getComponent(1).value.value
#language = entry.getComponent(5).value.value
phrases = entry.getComponent(6)
strs = []
for j in range(numPhrases):
res = readString(readAddr(phrases.value.add(j*4)))
strs.append(res)
text = '_'.join(filter(lambda s: s != "" and not s.isspace(), strs))
label = text.replace(' ', '_')
# add a comment
entry.setComment(entry.EOL_COMMENT,
"[%04X] %s" % (id, '\n'.join(strs)))
# add a label
try:
createLabel(phrases.value, "GameText%04X_%s" % (id, label, False))
except:
pass # probably invalid characters
texts["%04X_%s" % (id, label)] = id
printf("%04X: %s\n", id, text)
createEnum("GameTextId", texts)
createEnum("GameTextId32", texts, 4)
| 2.390625 | 2 |
restaurants/urls.py | PatrickELee/RestaurantSolver | 0 | 12760532 | <reponame>PatrickELee/RestaurantSolver
from django.urls import path
from . import views
app_name = 'restaurants'
urlpatterns = [
#Home Page
#path('/<int:restaurant_id>/', views.index, name='index'),
path('', views.index, name='index'),
#Restaurants Page
path('restaurants/',views.restaurants, name='restaurants'),
path('restaurants/<int:restaurant_id>/', views.restaurant, name='restaurant'),
path('edit_restaurant/<int:restaurant_id>/', views.edit_restaurant, name='edit_restaurant'),
path('new_restaurant/', views.new_restaurant, name='new_restaurant'),
path('random_restaurant/', views.random_restaurant, name='random_restaurant'),
#FAQ Page
path('faq/', views.faq, name='faq'),
#About Page
path('about/', views.about, name='about')
] | 2.03125 | 2 |
api/momo_devc_app/views/profile_views.py | tranminhduc4796/devc_backend | 0 | 12760533 | from rest_framework.generics import ListCreateAPIView, UpdateAPIView
from rest_framework.permissions import IsAuthenticated
from ..models import Profile, User
from ..serializers import ProfileSerializer
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
class ListCreate(ListCreateAPIView):
serializer_class = ProfileSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
profile = Profile.objects.get(user=user)
serializer = self.serializer_class(profile, read_only=True)
return Response(serializer.data)
def perform_create(self, serializer):
user = self.request.user
serializer.save(user=user)
class Update(UpdateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
queryset = self.queryset
user_id = self.kwargs['user_pk']
user = get_object_or_404(User, pk=user_id)
obj = get_object_or_404(Profile, user=user)
self.check_object_permissions(self.request, obj)
return obj
| 2.046875 | 2 |
octavia/image/image_base.py | zhangi/octavia | 129 | 12760534 | <reponame>zhangi/octavia
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
class ImageBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_image_id_by_tag(self, image_tag, image_owner=None):
"""Get image ID by image tag and owner.
:param image_tag: image tag
:param image_owner: optional image owner
:raises: ImageGetException if no images found with given tag
:return: image id
"""
| 1.859375 | 2 |
quant/observers/t_example.py | doubleDragon/QuantBot | 7 | 12760535 | <reponame>doubleDragon/QuantBot<filename>quant/observers/t_example.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from quant.observers.basicbot import BasicBot
class T_Example(BasicBot):
"""
python -m quant.cli -mBitfinex_BCH_USD,Liqui_BCC_BTC,Bitfinex_BTC_USD -oT_Example -f=example -d
"""
def __init__(self):
super(T_Example, self).__init__()
def tick(self, depths):
logging.debug("t_test tick invoke")
| 1.953125 | 2 |
protogen/library/python/compiler.py | connermarzen/proto_gen_compiler | 0 | 12760536 | import os
from io import TextIOWrapper
from typing import List
import protogen.util as util
from protogen.compiler import Compiler
from protogen.compiler import tab as tab
from protogen.library.python.std import ACCEPTED_TYPES, PYTHON_TYPES
from protogen.util import PGFile, PyClass
class PythonCompiler(Compiler):
def __init__(self, inFiles: List[str], outDir: str, verbose: bool = False):
super().__init__(inFiles, outDir, verbose)
def compile(self):
import shutil
shutil.copyfile(os.path.join(os.path.dirname(__file__),
'message.py'), self.outDir+'/message.py')
for item in self.files:
print('Compiling {} into {}/{}_proto.py'
''.format(item.filename, self.outDir, item.header))
file = open(self.outDir + '/' + item.header + '_proto.py', 'w')
self.generateCode(out=file, file=item)
file.write(os.linesep)
file.close()
def printClass(self, out: TextIOWrapper, file: PGFile, pyClass: PyClass,
indent: int, root: bool):
if root:
out.write(f"\nclass {pyClass.name}(Serializable, Printable):\n")
else:
out.write(
f"\n{tab*indent}class {pyClass.name}(Serializable, Printable):\n")
out.write(
f"\n{tab*(indent+1)}def __init__(self, data: dict = None):\n")
self.printAttributes(out, file, pyClass, indent+1)
out.write(f"\n{tab*(indent+2)}if data is not None:\n")
for item in file.declarations:
if util.inferParentClass(item) == pyClass.fqname:
short = util.inferShortName(item)
v_type, required = file.declarations[item]
if v_type in ACCEPTED_TYPES:
out.write(
f"{tab*(indent+3)}self.data['{short}'][0] = data['{short}']\n")
# local, nested class (needs 'self')
elif v_type in pyClass.gatherSubclasses('name'):
out.write(
f"{tab*(indent+3)}self.data['{short}'][0] = self.{v_type}(data['{short}'])\n")
# local, non-nested class (doesn't need 'self')
else:
out.write(
f"{tab*(indent+3)}self.data['{short}'][0] = {v_type}(data['{short}'])\n")
for item in pyClass.subclasses:
self.printClass(out, file, item, indent+1, False)
self.printMethods(out, file, pyClass, indent+1)
out.write(f"{tab*indent}# End Class {pyClass.name}\n")
def printAttributes(self, out: TextIOWrapper, file: PGFile, pyClass: PyClass, indent: int):
out.write(f'{tab*(indent+1)}self.data = {{\n')
for item in file.declarations:
if util.inferParentClass(item) == pyClass.fqname:
v_type, required = file.declarations[item]
short = util.inferShortName(item)
# primitive data type
if v_type == 'list':
out.write(
f'{tab*(indent+2)}\'{short}\': [[], {required}, False],\n')
elif v_type == 'map':
out.write(
f'{tab*(indent+2)}\'{short}\': [{{}}, {required}, False],\n')
elif v_type in ACCEPTED_TYPES:
out.write(
f'{tab*(indent+2)}\'{short}\': [None, {required}, False],\n')
# local, nested class (needs 'self')
elif v_type in pyClass.gatherSubclasses('name'):
out.write(
f'{tab*(indent+2)}\'{short}\': [self.{v_type}(), {required}, True],\n')
# local, non-nested class (doesn't need 'self')
else:
out.write(
f'{tab*(indent+2)}\'{short}\': [{v_type}(), {required}, True],\n')
out.write(f'{tab*(indent+1)}}}\n')
def printMethods(self, out: TextIOWrapper, file: PGFile, pyClass: PyClass, indent: int):
for item in file.declarations:
if util.inferParentClass(item) == pyClass.fqname:
v_type, req = file.declarations[item]
short = util.inferShortName(item)
# Get methods
if v_type in ACCEPTED_TYPES:
out.write(
f'\n{tab*indent}def get_{short}(self) -> {PYTHON_TYPES[v_type]}:\n')
else:
out.write(
f'\n{tab*indent}def get_{short}(self) -> {v_type}:\n')
out.write(
f'{tab*(indent+1)}return self.data[\'{short}\'][0]\n')
# Set methods
if v_type in PYTHON_TYPES:
out.write(
f'\n{tab*indent}def set_{short}(self, {short}: {PYTHON_TYPES[v_type]}) -> \'{pyClass.name}\':\n'
f'{tab*(indent+1)}self._assertType("{short}", {short}, {PYTHON_TYPES[v_type]}, "{v_type}")\n')
else:
out.write(
f'\n{tab*indent}def set_{short}(self, {short}: {v_type}) -> \'{pyClass.name}\':\n')
out.write(
f'{tab*(indent+1)}self.data[\'{short}\'][0] = {short}\n'
f'{tab*(indent+1)}return self\n')
def printFactory(self, out: TextIOWrapper, file: PGFile):
outString = (
"\n\nclass {}Factory(object):\n"
" @staticmethod\n"
" def deserialize(data: bytes):\n"
" data = Serializable.deserialize(data)\n"
" if len(data) > 1:\n"
" raise AttributeError('This is likely not a Protogen packet.')\n"
"\n"
" packetType = None\n"
" for item in data:\n"
" packetType = item[item.rfind('.')+1:]\n"
)
out.write(outString.format(file.header))
for item in file.classes:
if item.parent is None: # root-level class
out.write(f'{tab*3}if packetType == \'{item.name}\':\n'
f'{tab*4}return {item.name}(data[item])\n')
out.write(
" else:\n"
" raise AttributeError('Respective class not found.')\n")
def generateCode(self, out: TextIOWrapper, file: PGFile):
# out.write("from protogen.library.python.message import Serializable\n"),
# out.write("from protogen.library.python.message import Printable\n\n")
out.write("from .message import Printable, Serializable")
for item in file.classes:
if item.parent is None:
self.printClass(out, file, item, 0, True)
self.printFactory(out, file)
| 2.578125 | 3 |
AirzoneCloud/Device.py | max13fr/airzonecloud | 0 | 12760537 | <reponame>max13fr/airzonecloud
import logging
import time
from typing import Union
from . import AirzoneCloud, Group
from .constants import MODES_CONVERTER
_LOGGER = logging.getLogger(__name__)
class Device:
"""Manage a AirzoneCloud device (thermostat)"""
_api: "AirzoneCloud" = None
_group: "Group" = None
_data: dict = {}
_state: dict = {}
def __init__(self, api: "AirzoneCloud", group: "Group", data: dict) -> None:
self._api = api
self._group = group
self._data = data
# load state
self.refresh()
# log
_LOGGER.info("Init {}".format(self.str_verbose))
_LOGGER.debug(data)
def __str__(self) -> str:
return "Device(name={}, is_connected={}, is_on={}, mode={}, current_temp={}, target_temp={})".format(
self.name,
self.is_connected,
self.is_on,
self.mode,
self.current_temperature,
self.target_temperature,
)
@property
def str_verbose(self) -> str:
"""More verbose description of current device"""
return "Device(name={}, is_connected={}, is_on={}, mode={}, current_temp={}, target_temp={}, id={}, ws_id={})".format(
self.name,
self.is_connected,
self.is_on,
self.mode,
self.current_temperature,
self.target_temperature,
self.id,
self.ws_id,
)
@property
def all_properties(self) -> dict:
"""Return all group properties values"""
result = {}
for prop in [
"id",
"name",
"type",
"ws_id",
"system_number",
"zone_number",
"is_connected",
"is_on",
"mode_id",
"mode",
"mode_generic",
"mode_description",
"modes_availables",
"modes_availables_generics",
"current_humidity",
"current_temperature",
"target_temperature",
"min_temperature",
"max_temperature",
"step_temperature",
]:
result[prop] = getattr(self, prop)
return result
#
# getters
#
@property
def id(self) -> str:
"""Return device id"""
return self._data.get("device_id")
@property
def name(self) -> str:
"""Return device name"""
return self._data.get("name")
@property
def type(self) -> str:
"""Return device type (az_zone┃aidoo)"""
return self._data.get("type")
@property
def ws_id(self) -> str:
"""Return device webserver id (mac address)"""
return self._data.get("ws_id")
@property
def system_number(self) -> str:
"""Return device system_number"""
return self._data.get("meta", {}).get("system_number")
@property
def zone_number(self) -> str:
"""Return device zone_number"""
return self._data.get("meta", {}).get("zone_number")
@property
def is_connected(self) -> bool:
"""Return if the device is online (True) or offline (False)"""
return self._state.get("isConnected", False)
@property
def is_on(self) -> bool:
"""Return True if the device is on"""
return self._state.get("power", False)
@property
def is_master(self) -> bool:
"""Return True if the device is a master thermostat (allowed to update the mode of all devices)"""
return len(self.modes_availables_ids) > 0
@property
def mode_id(self) -> int:
"""Return device current id mode (0┃1┃2┃3┃4┃5┃6┃7┃8┃9┃10┃11┃12)"""
return self._state.get("mode", 0)
@property
def mode(self) -> str:
"""Return device current mode name (stop | auto | cooling | heating | ventilation | dehumidify | emergency-heating | air-heating | radiant-heating | combined-heating | air-cooling | radiant-cooling | combined-cooling)"""
return MODES_CONVERTER.get(str(self.mode_id), {}).get("name")
@property
def mode_generic(self) -> str:
"""Return device current generic mode (stop | auto | cooling | heating | ventilation | dehumidify | emergency)"""
return MODES_CONVERTER.get(str(self.mode_id), {}).get("generic")
@property
def mode_description(self) -> str:
"""Return device current mode description (pretty name to display)"""
return MODES_CONVERTER.get(str(self.mode_id), {}).get("description")
@property
def modes_availables_ids(self) -> "list[int]":
"""Return device availables modes list ([0┃1┃2┃3┃4┃5┃6┃7┃8┃9┃10┃11┃12, ...])"""
return self._state.get("mode_available", [])
@property
def modes_availables(self) -> "list[str]":
"""Return device availables modes names list ([stop | auto | cooling | heating | ventilation | dehumidify | emergency-heating | air-heating | radiant-heating | combined-heating | air-cooling | radiant-cooling | combined-cooling, ...])"""
return [
MODES_CONVERTER.get(str(mode_id), {}).get("name")
for mode_id in self.modes_availables_ids
]
@property
def modes_availables_generics(self) -> "list[str]":
"""Return device availables modes generics list ([stop | auto | cooling | heating | ventilation | dehumidify | emergency, ...])"""
return list(
set(
[
MODES_CONVERTER.get(str(mode_id), {}).get("generic")
for mode_id in self.modes_availables_ids
]
)
)
@property
def current_temperature(self) -> float:
"""Return device current temperature in °C"""
return float(self._state.get("local_temp", {}).get("celsius", 0))
@property
def current_humidity(self) -> int:
"""Return device current humidity in percentage (0-100)"""
return int(self._state.get("humidity", 0))
@property
def target_temperature(self) -> float:
"""Return device target temperature for current mode"""
key = MODES_CONVERTER.get(str(self.mode_id), {}).get("setpoint_key")
return float(self._state.get(key, {}).get("celsius", 0))
@property
def min_temperature(self) -> float:
"""Return device minimal temperature for current mode"""
key = MODES_CONVERTER.get(str(self.mode_id), {}).get("range_key_prefix") + "min"
return float(self._state.get(key, {}).get("celsius", 0))
@property
def max_temperature(self) -> float:
"""Return device maximal temperature for current mode"""
key = MODES_CONVERTER.get(str(self.mode_id), {}).get("range_key_prefix") + "max"
return float(self._state.get(key, {}).get("celsius", 0))
@property
def step_temperature(self) -> float:
"""Return device step temperature (minimum increase/decrease step)"""
return float(self._state.get("step", {}).get("celsius", 0.5))
#
# setters
#
def turn_on(self, auto_refresh: bool = True, delay_refresh: int = 1) -> "Device":
"""Turn device on"""
_LOGGER.info("call turn_on() on {}".format(self.str_verbose))
self._set("power", True)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh()
return self
def turn_off(self, auto_refresh: bool = True, delay_refresh: int = 1) -> "Device":
"""Turn device off"""
_LOGGER.info("call turn_off() on {}".format(self.str_verbose))
self._set("power", False)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh()
return self
def set_temperature(
self, temperature: float, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Device":
"""Set target_temperature for current device (degrees celsius)"""
_LOGGER.info(
"call set_temperature({}) on {}".format(temperature, self.str_verbose)
)
if self.min_temperature is not None and temperature < self.min_temperature:
temperature = self.min_temperature
if self.max_temperature is not None and temperature > self.max_temperature:
temperature = self.max_temperature
self._set("setpoint", temperature)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh()
return self
def set_mode(
self, mode_name: str, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Device":
"""Set mode of the device"""
_LOGGER.info("call set_mode({}) on {}".format(mode_name, self.str_verbose))
# search mode id
mode_id_found = None
for mode_id, mode in MODES_CONVERTER.items():
if mode["name"] == mode_name:
mode_id_found = int(mode_id)
break
if mode_id_found is None:
raise ValueError(
'mode name "{}" not found for {}'.format(mode_name, self.str_verbose)
)
if mode_id_found not in self.modes_availables_ids:
if len(self.modes_availables_ids) == 0:
raise ValueError(
'mode name "{}" (id: {}) not availables for {} : only master thermostat device can set the mode'.format(
mode_name,
mode_id_found,
self.str_verbose,
self.modes_availables,
)
)
raise ValueError(
'mode name "{}" (id: {}) not availables for {}. Allowed values: {}'.format(
mode_name, mode_id_found, self.str_verbose, self.modes_availables
)
)
self._set("mode", mode_id_found)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh()
return self
#
# parent group
#
@property
def group(self) -> Group:
"""Get parent group"""
return self._group
#
# Refresh
#
def refresh(self) -> "Device":
"""Refresh current device states"""
_LOGGER.debug("call refresh() on {}".format(self.str_verbose))
self._state = self._api._api_get_device_state(
self.id, self.group.installation.id
)
_LOGGER.debug(self._state)
return self
#
# private
#
def _set(self, param: str, value: Union[str, int, float, bool]) -> "Device":
"""Execute a command to the current device (power, mode, setpoint, ...)"""
_LOGGER.debug("call _set({}, {}) on {}".format(param, value, self.str_verbose))
self._api._api_patch_device(
self.id, self.group.installation.id, param, value, {"units": 0}
)
return self
def _set_data_refreshed(self, data: dict) -> "Device":
"""Set data refreshed (called by parent AirzoneCloud on refresh_devices())"""
self._data = data
_LOGGER.info("Data refreshed for {}".format(self.str_verbose))
return self
#
# device raw data example
#
# {
# "device_id": "60f5cb9...",
# "meta": {
# "system_number": 1,
# "zone_number": 6
# },
# "type": "az_zone",
# "ws_id": "AA:BB:CC:DD:EE:FF",
# "name": "Salon"
# }
#
# device raw state example
#
# {
# "active": null,
# "aq_active": null,
# "aq_mode_conf": null,
# "aq_mode_values": [],
# "aq_quality": null,
# "aqpm1_0": null,
# "aqpm2_5": null,
# "aqpm10": null,
# "auto_mode": null,
# "connection_date": "2021-11-17T08:44:04.000Z",
# "disconnection_date": "2021-11-16T06:24:11.499Z",
# "eco_conf": "off",
# "eco_values": [
# "off",
# "manual",
# "a",
# "a_p",
# "a_pp"
# ],
# "humidity": 48,
# "isConnected": true,
# "local_temp": {
# "celsius": 20.7,
# "fah": 69
# },
# "mode": 3,
# "mode_available": [
# 2,
# 3,
# 4,
# 5,
# 0
# ],
# "name": "Salon",
# "power": true,
# "range_sp_cool_air_max": {
# "fah": 86,
# "celsius": 30
# },
# "range_sp_cool_air_min": {
# "celsius": 18,
# "fah": 64
# },
# "range_sp_dry_air_max": {
# "fah": 86,
# "celsius": 30
# },
# "range_sp_dry_air_min": {
# "celsius": 18,
# "fah": 64
# },
# "range_sp_emerheat_air_max": {
# "celsius": 30,
# "fah": 86
# },
# "range_sp_emerheat_air_min": {
# "fah": 59,
# "celsius": 15
# },
# "range_sp_hot_air_max": {
# "celsius": 30,
# "fah": 86
# },
# "range_sp_hot_air_min": {
# "fah": 59,
# "celsius": 15
# },
# "range_sp_stop_air_max": {
# "fah": 86,
# "celsius": 30
# },
# "range_sp_stop_air_min": {
# "fah": 59,
# "celsius": 15
# },
# "range_sp_vent_air_max": {
# "fah": 86,
# "celsius": 30
# },
# "range_sp_vent_air_min": {
# "fah": 59,
# "celsius": 15
# },
# "setpoint_air_heat": {
# "celsius": 20,
# "fah": 68
# },
# "setpoint_air_stop": {
# "celsius": 20,
# "fah": 68
# },
# "sleep": 0,
# "sleep_values": [
# 0,
# 30,
# 60,
# 90
# ],
# "speed_conf": null,
# "speed_type": null,
# "speed_values": [],
# "step": {
# "fah": 1,
# "celsius": 0.5
# },
# "usermode_conf": null,
# "warnings": [],
# "zone_sched_available": false
# }
| 2.375 | 2 |
financialaid/migrations/0017_countryincomethreshold_unique.py | Wassaf-Shahzad/micromasters | 32 | 12760538 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-13 20:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('financialaid', '0016_load_country_income_thresholds'),
]
operations = [
migrations.AlterField(
model_name='countryincomethreshold',
name='country_code',
field=models.CharField(max_length=2, unique=True),
),
]
| 1.414063 | 1 |
newproject/mainapp/admin.py | Floou/new-django-project | 0 | 12760539 | <reponame>Floou/new-django-project<filename>newproject/mainapp/admin.py
from django.contrib import admin
from mainapp.models import Team, Trainer,Player , Match
admin.site.register(Team)
admin.site.register(Trainer)
admin.site.register(Match)
admin.site.register(Player)
| 1.476563 | 1 |
vice/yields/ccsne/S16/__init__.py | astrobeard/VICEdev | 0 | 12760540 | <reponame>astrobeard/VICEdev<filename>vice/yields/ccsne/S16/__init__.py
r"""
Sukhbold et al. (2016), ApJ, 821, 38 core collapse supernova yields
**Signature**: from vice.yields.ccsne import S16
Importing this module will automatically set the CCSN yield settings for all
elements to the IMF-averaged yields calculated with the Sukhbold et al. (2016)
yield table for [M/H] = 0 stars. This will adopt an upper mass limit of
120 :math:`M_\odot`.
.. tip:: By importing this module, the user does not sacrifice the ability to
specify their yield settings directly.
.. note:: This module is not imported with a simple ``import vice`` statement.
Contents
--------
set_params : <function>
Update the parameters with which the yields are calculated.
engines : module
Stellar explodability as a function of stellar mass under various
explosion engines as determined by the Sukhbold et al. (2016) study.
"""
from __future__ import absolute_import
try:
__VICE_SETUP__
except NameError:
__VICE_SETUP__ = False
if not __VICE_SETUP__:
__all__ = ["engines", "set_params", "test"]
from ...._globals import _RECOGNIZED_ELEMENTS_
from .. import fractional as __fractional
from .. import settings as __settings
from . import engines
from .tests import test
def set_params(**kwargs):
r"""
Update the parameters with which the yields are calculated from the
Sukhbold et al. (2016) [1]_ data.
**Signature**: vice.yields.ccsne.S16.set_params(\*\*kwargs)
Parameters
----------
kwargs : varying types
Keyword arguments to pass to vice.yields.ccsne.fractional.
Raises
------
* TypeError
- Recieved a keyword argument "study". This will always be
"S16/W18" when called from this module.
Other exceptions are raised by vice.yields.ccsne.fractional.
Example Code
------------
>>> import vice
>>> from vice.yields.ccsne import S16
>>> S16.set_params(m_lower = 0.3, m_upper = 45, IMF = "salpeter")
.. seealso:: vice.yields.ccsne.fractional
.. [1] Sukhbold et al. (2016), ApJ, 821, 38
"""
if "study" in kwargs.keys():
raise TypeError("Got an unexpected keyword argument: 'study'")
else:
for i in _RECOGNIZED_ELEMENTS_:
__settings[i] = __fractional(i, study = "S16/W18", **kwargs)[0]
set_params(m_upper = 120)
else:
pass
| 1.609375 | 2 |
test_knot_injector.py | rdidyk/injector | 0 | 12760541 | import pytest
import knot_injector
class TestInjector(object):
@pytest.fixture()
def provider(self):
def test_provider(container=None):
return {'name': 'test'}
return test_provider
@pytest.fixture()
def container(self):
return knot_injector.Container()
def test_adding_service(self, container, provider):
container.service()(provider)
assert container.provide('test_provider')() == {'name': 'test'}
def test_adding_factory(self, container, provider):
container.factory()(provider)
assert container.provide('test_provider')() == {'name': 'test'}
def test_adding_non_unique_provider(self, container, provider):
container.service()(provider)
with pytest.raises(AttributeError) as ex:
container.service()(provider)
assert str(ex) == 'Provider name must be unique'
def test_non_callable_provider(self, container):
import random
container.factory(name='test_prov')(random)
assert container('test_prov') == random
def test_injection(self, container, provider):
import random
container.service(name='test_provider')(provider)
container.factory(name='rnd')(random)
@container.inject
def test_func(test_provider, rnd, msg, **kwargs):
return test_provider, rnd, msg, kwargs
res = test_func(msg="don't obey", answer=42)
assert res == (
{'name': 'test'},
random,
"don't obey",
{'answer': 42},
)
| 2.515625 | 3 |
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/redirect.py | bidhata/EquationGroupLeaks | 9 | 12760542 | <gh_stars>1-10
import ops.cmd
import dsz
import dsz.cmd
import util.ip
from ops.cmd import OpsCommandException
TCP = 'tcp'
UDP = 'udp'
IMPLANTLISTEN = 'implantlisten'
LPLISTEN = 'lplisten'
VALID_OPTIONS = ['lplisten', 'implantlisten', 'target', 'tcp', 'udp', 'portsharing', 'connections', 'limitconnections', 'sendnotify', 'packetsize']
class RedirectCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='redirect', lplisten=None, implantlisten=None, target=None, **optdict):
self._listenport = (-1)
self._bindAddr = '0.0.0.0'
self._direction = None
self._clientPort = (-1)
self._clientAddr = '0.0.0.0'
self._targetAddr = '0.0.0.0'
self._targetPort = (-1)
self._sourceAddr = '0.0.0.0'
self._sourcePort = (-1)
self._limitAddr = '0.0.0.0'
self._limitMask = '0.0.0.0'
self.optdict = optdict
if ('protocol' in optdict):
self.protocol = optdict['protocol']
del optdict['protocol']
elif ('tcp' in optdict):
self.protocol = 'tcp'
elif ('udp' in optdict):
self.protocol = 'udp'
if ((lplisten is not None) and (implantlisten is not None)):
raise OpsCommandException('You can only set one of lplisten and implantlisten')
elif (lplisten is not None):
if ((type(lplisten) == bool) and lplisten):
self.direction = 'lplisten'
else:
self.lplisten = lplisten
elif (implantlisten is not None):
if ((type(implantlisten) == bool) and implantlisten):
self.direction = 'implantlisten'
else:
self.implantlisten = implantlisten
self.target = target
delmark = []
for key in optdict:
if ((not (key in VALID_OPTIONS)) or (key in ['lplisten', 'implantlisten', 'target'])):
delmark.append(key)
for deler in delmark:
del optdict[deler]
ops.cmd.DszCommand.__init__(self, plugin=plugin, **optdict)
def validateInput(self):
if (self.target_address == '0.0.0.0'):
return False
if ((self.target_port < 0) or (self.target_port > 65535)):
return False
if ((self.listen_port < 0) or (self.listen_port > 65535)):
return False
if ((self.lplisten is None) and (self.implantlisten is None)):
return False
if (self.protocol is None):
return False
for port in [self.source_port, self.client_port]:
if ((port < (-1)) or (port > 65535)):
return False
return True
def __str__(self):
cmdstr = ''
for prefix in self.prefixes:
cmdstr += ('%s ' % prefix)
cmdstr += ('%s -%s -target %s' % (self.plugin, self.protocol, self.target))
if (self.lplisten is not None):
cmdstr += (' -lplisten %s' % self.lplisten)
elif (self.implantlisten is not None):
cmdstr += (' -implantlisten %s' % self.implantlisten)
if self.port_sharing:
cmdstr += (' -portsharing %s' % self.port_sharing)
if self.limit_connections:
cmdstr += (' -limitconnections %s' % self.limit_connections)
for optkey in self.optdict:
if (optkey in ['tcp', 'udp']):
continue
if (self.optdict[optkey] == True):
cmdstr += (' -%s' % optkey)
else:
cmdstr += (' -%s %s' % (optkey, self.optdict[optkey]))
if self.dszquiet:
x = dsz.control.Method()
dsz.control.echo.Off()
return cmdstr
def _getProtocol(self):
if self.tcp:
return 'tcp'
elif self.udp:
return 'udp'
else:
return None
def _setProtocol(self, val):
if (val == TCP):
self.tcp = True
elif (val == UDP):
self.udp = True
else:
raise OpsCommandException('Protocol must be tcp or udp')
protocol = property(_getProtocol, _setProtocol)
def _getTCP(self):
if (('tcp' in self.optdict) and self.optdict['tcp']):
return True
else:
return False
def _setTCP(self, val):
if (((val is None) or (val is False)) and ('tcp' in self.optdict)):
del self.optdict['tcp']
elif (val is True):
self.optdict['tcp'] = val
if ('udp' in self.optdict):
del self.optdict['udp']
tcp = property(_getTCP, _setTCP)
def _getUDP(self):
if (('udp' in self.optdict) and self.optdict['udp']):
return True
else:
return False
def _setUDP(self, val):
if (((val is None) or (val is False)) and ('udp' in self.optdict)):
del self.optdict['udp']
elif (val is True):
self.optdict['udp'] = val
if ('tcp' in self.optdict):
del self.optdict['tcp']
udp = property(_getUDP, _setUDP)
def _getDirection(self):
return self._direction
def _setDirection(self, val):
if (not (val in [IMPLANTLISTEN, LPLISTEN])):
raise OpsCommandException('redirect command: direction must be one of lplisten or implantlisten')
self._direction = val
direction = property(_getDirection, _setDirection)
def _getListenPort(self):
return self._listenport
def _setListenPort(self, val):
val = int(val)
if ((val < 0) or (val > 65535)):
raise OpsCommandException('Listen port must be an integer between 0-65535')
self._listenport = val
listen_port = property(_getListenPort, _setListenPort)
def _getBindAddr(self):
return self._bindAddr
def _setBindAddr(self, val):
if (val is None):
self._bindAddr = '0.0.0.0'
elif util.ip.validate(val):
self._bindAddr = val
bind_address = property(_getBindAddr, _setBindAddr)
def _getLplisten(self):
if (self.direction == LPLISTEN):
retval = str(self.listen_port)
if (self.bind_address != '0.0.0.0'):
retval += (' %s' % self.bind_address)
return retval
else:
return None
def _setLplisten(self, value):
if (value is None):
self.direction = IMPLANTLISTEN
self.direction = LPLISTEN
if (type(value) == str):
options = value.split(' ')
if (len(options) == 2):
(self.listen_port, self.bind_address) = (options[0], options[1])
elif (len(options) == 1):
self.listen_port = options[0]
elif (type(value) == int):
self.listen_port = value
lplisten = property(_getLplisten, _setLplisten)
def _getImplantlisten(self):
if (self.direction == IMPLANTLISTEN):
retval = str(self.listen_port)
if (self.bind_address != '0.0.0.0'):
retval += (' %s' % self.bind_address)
return retval
else:
return None
def _setImplantlisten(self, value):
if (value is None):
self.direction = LPLISTEN
self.direction = IMPLANTLISTEN
if (type(value) == str):
options = value.split(' ')
if (len(options) == 2):
(self.listen_port, self.bind_address) = (options[0], options[1])
elif (len(options) == 1):
self.listen_port = options[0]
elif (type(value) == int):
self.listen_port = value
implantlisten = property(_getImplantlisten, _setImplantlisten)
def _getTargetAddr(self):
return self._targetAddr
def _setTargetAddr(self, value):
value = value.strip()
if util.ip.validate(value):
self._targetAddr = value
else:
raise OpsCommandException('Invalid target IP address')
target_address = property(_getTargetAddr, _setTargetAddr)
def _getTargetPort(self):
return self._targetPort
def _setTargetPort(self, value):
try:
value = int(value)
except ValueError:
raise OpsCommandException('Invalid target port, must be an integer between 0-65535')
self._targetPort = value
target_port = property(_getTargetPort, _setTargetPort)
def _getSourceAddr(self):
return self._sourceAddr.strip()
def _setSourceAddr(self, value):
value = value.strip()
if util.ip.validate(value):
self._sourceAddr = value
else:
raise OpsCommandException(('Invalid source IP address %s' % value))
source_address = property(_getSourceAddr, _setSourceAddr)
def _getSourcePort(self):
return self._sourcePort
def _setSourcePort(self, value):
try:
value = int(value)
if ((value < (-1)) or (value > 65535)):
raise OpsCommandException('Invalid source port, must be an integer between 0-65535 or -1 for unspecified')
except ValueError:
raise OpsCommandException('Invalid source port, must be an integer between 0-65535')
self._sourcePort = value
source_port = property(_getSourcePort, _setSourcePort)
def _getTarget(self):
retval = ('%s %d' % (self.target_address, self.target_port))
if (self.source_address != '0.0.0.0'):
retval += (' %s' % self.source_address)
if (self.source_port != (-1)):
retval += (' %d' % self.source_port)
return retval
def _setTarget(self, value):
if (value is None):
self.target_address = '0.0.0.0'
self.target_port = (-1)
return
parts = value.split(' ')
if (len(parts) < 2):
raise OpsCommandException('You must specify at least a target address and target port')
self.target_address = parts[0]
self.target_port = parts[1]
if (len(parts) >= 3):
self.source_address = parts[2]
if (len(parts) == 4):
self.source_port = parts[3]
target = property(_getTarget, _setTarget)
def _getClientAddr(self):
return self._clientAddr
def _setClientAddr(self, value):
value = value.strip()
if (value == '0.0.0.0'):
raise OpsCommandException('Invalid client IP address 0.0.0.0')
elif util.ip.validate(value):
self._clientAddr = value
else:
raise OpsCommandException(('Invalid client IP address %s' % value))
client_address = property(_getClientAddr, _setClientAddr)
def _getClientPort(self):
return self._clientPort
def _setClientPort(self, value):
try:
value = int(value)
if ((value < 0) or (value > 65535)):
raise OpsCommandException('Invalid client port, must be an integer between 0-65535')
except ValueError:
raise OpsCommandException('Invalid client port, must be an integer between 0-65535')
self._clientPort = value
client_port = property(_getClientPort, _setClientPort)
def _getPortsharing(self):
if (self.client_port > (-1)):
return ('%d %s' % (self.client_port, self.client_address))
else:
return None
def _setPortsharing(self, value):
if (value is None):
(self.client_address == '0.0.0.0')
self.client_port = (-1)
else:
parts = value.split(' ')
if (len(parts) != 2):
raise OpsCommandException('You must specify client source address and client source port and nothing else when using port sharing')
self.client_address = parts[1]
self.client_port = parts[0]
port_sharing = property(_getPortsharing, _setPortsharing)
def _getLimitAddr(self):
return self._limitAddr
def _setLimitAddr(self, value):
value = value.strip()
if (value == '0.0.0.0'):
raise OpsCommandException('Invalid limit IP address 0.0.0.0')
elif util.ip.validate(value):
self._limitAddr = value
else:
raise OpsCommandException(('Invalid limit IP address %s' % value))
limit_address = property(_getLimitAddr, _setLimitAddr)
def _getLimitMask(self):
return self._limitMask
def _setLimitMask(self, value):
value = value.strip()
if util.ip.validate(value):
self._limitMask = value
else:
raise OpsCommandException(('Invalid limit mask %s' % value))
limit_mask = property(_getLimitMask, _setLimitMask)
def _getLimitConnections(self):
if (self.limit_address != '0.0.0.0'):
return ('%s %s' % (self.limit_address, self.limit_mask))
else:
return None
def _setLimitConnections(self, value):
if (value is None):
self.limit_address = '0.0.0.0'
self.limit_mask = '0.0.0.0'
else:
parts = value.split(' ')
if (len(parts) != 2):
raise OpsCommandException('You must specify limit address and limit mask and nothing else when using connection limiting')
self.limit_mask = parts[1]
self.limit_address = parts[0]
limit_connections = property(_getLimitConnections, _setLimitConnections)
def _getConnections(self):
if ('connections' in self.optdict):
return self.optdict['connections']
else:
return 0
def _setConnections(self, value):
if (value is not None):
try:
value = int(value)
self.optdict['connections'] = value
except ValueError:
raise OpsCommandException('Max connections for a redirect command must be an integer >= 0')
else:
self.optdict['connections'] = 0
connections = property(_getConnections, _setConnections)
def _getPacketsize(self):
if ('packetsize' in self.optdict):
return self.optdict['packetsize']
else:
return 0
def _setPacketsize(self, value):
if (value is not None):
try:
value = int(value)
self.optdict['packetsize'] = value
except ValueError:
raise OpsCommandException('Packetsize for a redirect command must be an integer > 0')
elif ('packetsize' in self.optdict):
del self.optdict['packetsize']
packetsize = property(_getPacketsize, _setPacketsize)
def _getRedirNotify(self):
if (('sendnotify' in self.optdict) and self.optdict['sendnotify']):
return True
else:
return False
def _setRedirNotify(self, val):
if (((val is None) or (val is False)) and ('sendnotify' in self.optdict)):
del self.optdict['sendnotify']
elif (val is True):
self.optdict['sendnotify'] = val
redir_notify = property(_getRedirNotify, _setRedirNotify)
ops.cmd.command_classes['redirect'] = RedirectCommand
ops.cmd.aliasoptions['redirect'] = VALID_OPTIONS | 2.34375 | 2 |
string-method/src/system_setup/string_finding/pnas_simulation_loader.py | delemottelab/gpcr-string-method-2019 | 0 | 12760543 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
from utils.helpfunc import *
logger = logging.getLogger("pnas_loader")
# plt.style.use('ggplot')
refstruct_dir = "/home/oliverfl/git/string-method/gpcr/reference_structures/"
active_traj = md.load(refstruct_dir + "active_CA.pdb")
logger.info("loaded active structure, %s", active_traj)
# protein database file, inactive structure with g-protein
inactive_traj = md.load(refstruct_dir + "inactive_CA.pdb")
logger.info("loaded inactive active structure, %s", inactive_traj)
def load_simulations(simulation_conditions, stride=1, timestep_size=timestep_size, simulation_dir="simulations"):
"""
-Load all simulations
"""
result = []
for condition, number in simulation_conditions:
simulation = Simulation({
"condition": condition,
"number": number,
"name": "all",
"timestep": timestep_size * stride
})
traj = md.load(
simulation_dir + simulation.path + simulation.name + ".dcd",
top=simulation_dir + simulation.path + simulation.name + ".pdb",
stride=stride)
# traj = traj[::timestep]
simulation.traj = traj
result.append(simulation)
return result
def load_freemd(directory,
traj_filename,
top_filename,
timestep_size=1,
stride=1):
"""
-Load FREE MD simulations
"""
simulation = Simulation({
"condition": "FREE",
"number": "MD",
"name": "",
"timestep": timestep_size * stride
})
traj = md.load(
directory + traj_filename,
top=directory + top_filename,
stride=stride)
simulation.traj = traj
return [simulation]
def evaluate_simulations(simulations, cvs):
"""
-evaluate CVs of the trajectories
-combine results with simulations into tuples
"""
result = []
for simulation in simulations:
cv_values = np.empty((len(cvs), len(simulation.traj)))
for i, cv in enumerate(cvs):
val = cv.eval(simulation.traj)
if len(val.shape) > 1:
val = val[:,0]
cv_values[i, :] = val
result.append((simulation, cv_values))
return result
| 2.03125 | 2 |
Python/Algorithms/SearchingAlgorithms/binarysearch.py | mystery2828/AlgorithmsAndDataStructure | 0 | 12760544 | from bisect import bisect_left,bisect
def binsearchright(a,k):
c = bisect(a, k)
return c
def binsearchleft(a,k):
b = bisect_left(a,k)
return b
a = [1,1,2,2,2,4,4,4,4,4,4,8,8]
k = int(8)
res = binsearchleft(a, k)
res1 = binsearchright(a,k)
print("{} is present {} times in the array".format(k,abs(res-res1))) | 3.78125 | 4 |
numpy/distutils/tests/test_exec_command.py | novaya/numpy | 1 | 12760545 | <filename>numpy/distutils/tests/test_exec_command.py<gh_stars>1-10
import os
import sys
from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
from numpy.testing import tempdir, assert_, assert_warns
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
from io import StringIO
class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
# note: closing sys.stderr won't close it.
self._stderr.close()
class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# The code has a special case for posix systems, so if we are on posix test
# both that the special case works and that the generic code works.
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
class TestExecCommand:
def setup(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
s, o = exec_command.exec_command('cmd /C echo path=%path%')
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
assert_(s == 0)
assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
assert_(s == 0)
assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
assert_(s == 0)
assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
assert_(s == 0)
assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
assert_(s == 0)
assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
assert_(s == 15)
assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
f = open(tmpfile, 'w')
f.write('Hello')
f.close()
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
assert_(s == 0)
assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
if os.name == "posix":
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif os.name == "nt":
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1)
| 2.25 | 2 |
src/mbi/__init__.py | siddhant-pradhan/private-pgm | 0 | 12760546 | <reponame>siddhant-pradhan/private-pgm<gh_stars>0
from mbi.domain import Domain
from mbi.dataset import Dataset
from mbi.factor import Factor
from mbi.graphical_model import GraphicalModel
from mbi.inference import FactoredInference
from mbi.lbp import LBP | 1.054688 | 1 |
backend/core/auth.py | dominikbullo/SportAgenda | 0 | 12760547 | from django.conf.urls import url
from django.urls import path, include
from apps.users.api.views import CustomConfirmEmailView
from django.urls import path, include
from . import views as acc_views
urlpatterns = [
path("", include('rest_auth.urls')),
# The django-rest-passwordreset urls to request a token and confirm pw-reset
path('reset-password/', include('django_rest_passwordreset.urls', namespace='password_reset')),
# overrides register with custom view
# must be in the front of rest_auth.registration.urls
# RES: https://github.com/Tivix/django-rest-auth/issues/292
# RES: https://gist.github.com/iMerica/a6a7efd80d49d6de82c7928140676957
url(r'^register/account-confirm-email/(?P<key>[-:\w]+)/$', CustomConfirmEmailView.as_view(),
name='account_confirm_email'),
path("register/", include('rest_auth.registration.urls')),
# RES PASSWROD RESET : https://stackoverflow.com/questions/53945056/django-rest-auth-password-reset
# url(r'^', include('django.contrib.auth.urls')),
]
| 2.046875 | 2 |
tests/integration/factories/daemons/proxy/test_proxy_minion.py | danielrobbins/pytest-salt-factories | 0 | 12760548 | <filename>tests/integration/factories/daemons/proxy/test_proxy_minion.py
import sys
import pytest
from saltfactories.utils import platform
from saltfactories.utils import random_string
pytestmark = pytest.mark.skipif(
sys.platform.lower().startswith("win"),
reason="Disabled on windows because of multiprocessing pickle spawning issues",
)
@pytest.fixture(scope="module")
def master(salt_factories):
factory = salt_factories.get_salt_master_daemon(random_string("master-"))
with factory.started():
yield factory
@pytest.fixture(scope="module")
def proxy_minion(master):
factory = master.get_salt_proxy_minion_daemon(random_string("proxy-minion-"))
with factory.started():
yield factory
@pytest.fixture
def salt_cli(master):
return master.get_salt_cli()
@pytest.fixture
def salt_call_cli(proxy_minion):
return proxy_minion.get_salt_call_cli()
def test_proxy_minion(proxy_minion, salt_cli):
assert proxy_minion.is_running()
ret = salt_cli.run("test.ping", minion_tgt=proxy_minion.id)
assert ret.exitcode == 0, ret
assert ret.json is True
def test_no_match(proxy_minion, salt_cli):
assert proxy_minion.is_running()
ret = salt_cli.run("test.ping", minion_tgt="proxy-minion-2")
assert ret.exitcode == 2, ret
assert not ret.json
def test_show_jid(proxy_minion, salt_cli):
if platform.is_darwin() and sys.version_info[:2] == (3, 7):
pytest.skip(
"This test passes on Darwin under Py3.7, it has the expected output "
"and yet, it times out. Will investigate later."
)
assert proxy_minion.is_running()
ret = salt_cli.run("--show-jid", "test.ping", minion_tgt=proxy_minion.id)
assert ret.exitcode == 0, ret
assert ret.json is True
def test_proxy_minion_salt_call(proxy_minion, salt_call_cli):
assert proxy_minion.is_running()
ret = salt_call_cli.run("test.ping")
assert ret.exitcode == 0, ret
assert ret.json is True
# Now with --local
ret = salt_call_cli.run("--proxyid={}".format(proxy_minion.id), "test.ping")
assert ret.exitcode == 0, ret
assert ret.json is True
| 1.796875 | 2 |
azure/multicloud_azure/tests/test_aria_view.py | onap/archive-multicloud-azure | 1 | 12760549 | <filename>azure/multicloud_azure/tests/test_aria_view.py
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import unittest
import mock
import json
from rest_framework import status
from aria.cli.core import aria
from multicloud_azure.swagger.views.infra_workload.views import InfraWorkload
from multicloud_azure.swagger.views.infra_workload.views import GetStackView
from multicloud_azure.pub.aria.service import AriaServiceImpl
class InfraViewTest(unittest.TestCase):
def setUp(self):
self.fsv = InfraWorkload()
def tearDown(self):
pass
def test_service_get_fail(self):
req = mock.Mock()
dict = {'infra-template': 'aria', 'infra-payload': json.dumps(
{'name': 'abc', 'template_data': {'stack_name': 'stack'}})}
req.data = dict
resp = self.fsv.post(req, "abc", "def")
self.assertEqual(status.HTTP_500_INTERNAL_SERVER_ERROR,
resp.status_code)
class StackViewTest(unittest.TestCase):
def setUp(self):
self.fsv = GetStackView()
def tearDown(self):
pass
def test_service_get_fail(self):
class Request:
def __init__(self, query_params):
self.query_params = query_params
req = Request({'k': 'v'})
self.assertNotEqual(status.HTTP_500_INTERNAL_SERVER_ERROR,
self.fsv.get(req, "abc", "def", 123))
class WorkoadViewTest(unittest.TestCase):
def setUp(self):
self.fsv = AriaServiceImpl()
def tearDown(self):
pass
@mock.patch.object(AriaServiceImpl, 'deploy_service')
def test_deploy_service(self, mock_service_info):
class Service:
def __init__(self, name, body, input, logger):
self.name = name
self.body = body
self.input = input
self.logger = logger
s = Service("abc", "def", "ghi", "OK")
mock_service_info.return_value = s
service_op = AriaServiceImpl()
self.assertNotEqual(200, service_op.deploy_service("a1", "b1", "c1",
"OK"))
@mock.patch.object(AriaServiceImpl, 'install_template_private')
@aria.pass_model_storage
@aria.pass_resource_storage
@aria.pass_plugin_manager
@aria.pass_logger
def test_install_template(self, mock_template_info, model_storage,
resource_storage, plugin_manager, logger):
class Workload:
def __init__(self, name, body):
self.name = name
self.body = body
service = Workload("a", "w1")
mock_template_info.return_value = service
class Request:
def __init__(self, query_params):
self.query_params = query_params
req = Request({'k': 'v'})
self.assertNotEqual(200,
self.fsv.install_template_private(req, "a1", "b1",
model_storage,
resource_storage,
plugin_manager,
logger))
@mock.patch.object(AriaServiceImpl, 'create_service')
@aria.pass_model_storage
@aria.pass_resource_storage
@aria.pass_plugin_manager
@aria.pass_logger
def test_create_service(self, mock_template_info, model_storage,
resource_storage, plugin_manager, logger):
class Workload:
def __init__(self, id, name, input):
self.id = id
self.name = name
self.input = input
f1 = Workload(1, "a", "w1")
f2 = Workload(2, "b", "w2")
service = [f1, f2]
mock_template_info.return_value = service
class Request:
def __init__(self, query_params):
self.query_params = query_params
req = Request({'k': 'v'})
self.assertNotEqual(200,
self.fsv.create_service(req, 123, "a1", "b1",
model_storage,
resource_storage,
plugin_manager,
logger))
def test_show_execution(self):
service_op = AriaServiceImpl()
self.assertNotEqual(200,
service_op.show_execution(123))
| 2.15625 | 2 |
hwt/serializer/verilog/context.py | ufo2011/hwt | 134 | 12760550 | <reponame>ufo2011/hwt
from hdlConvertorAst.to.verilog.constants import SIGNAL_TYPE
class SignalTypeSwap():
"""
An object which is used as a context manager for signalType
inside of :class:`hwt.serializer.verilog.serializer.ToHdlAstVerilog`
"""
def __init__(self, ctx, signalType: SIGNAL_TYPE):
self.ctx = ctx
self.signalType = signalType
def __enter__(self):
self.orig = self.ctx.signalType
self.ctx.signalType = self.signalType
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.signalType = self.orig
| 2.421875 | 2 |