_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q32800 | Helper.RawBytesToScriptHash | train | def RawBytesToScriptHash(raw):
"""
Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160:
"""
rawh = binascii.unhexlify(raw)
rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))
return UInt160(data=rawhashstr) | python | {
"resource": ""
} |
q32801 | Helper.VerifyScripts | train | def VerifyScripts(verifiable):
"""
Verify the scripts of the provided `verifiable` object.
Args:
verifiable (neo.IO.Mixins.VerifiableMixin):
Returns:
bool: True if verification is successful. False otherwise.
"""
try:
hashes = verifiable.GetScriptHashesForVerifying()
except Exception as e:
logger.debug("couldn't get script hashes %s " % e)
return False
if len(hashes) != len(verifiable.Scripts):
logger.debug(f"hash - verification script length mismatch ({len(hashes)}/{len(verifiable.Scripts)})")
return False
blockchain = GetBlockchain()
for i in range(0, len(hashes)):
verification = verifiable.Scripts[i].VerificationScript
if len(verification) == 0:
sb = ScriptBuilder()
sb.EmitAppCall(hashes[i].Data)
verification = sb.ms.getvalue()
else:
verification_hash = Crypto.ToScriptHash(verification, unhex=False)
if hashes[i] != verification_hash:
logger.debug(f"hash {hashes[i]} does not match verification hash {verification_hash}")
return False
state_reader = GetStateReader()
script_table = CachedScriptTable(DBCollection(blockchain._db, DBPrefix.ST_Contract, ContractState))
engine = ApplicationEngine(TriggerType.Verification, verifiable, script_table, state_reader, Fixed8.Zero())
engine.LoadScript(verification)
invocation = verifiable.Scripts[i].InvocationScript
engine.LoadScript(invocation)
try:
success = engine.Execute()
state_reader.ExecutionCompleted(engine, success)
except Exception as e:
state_reader.ExecutionCompleted(engine, False, e)
if engine.ResultStack.Count != 1 or not engine.ResultStack.Pop().GetBoolean():
Helper.EmitServiceEvents(state_reader)
if engine.ResultStack.Count > 0:
logger.debug(f"Result stack failure! Count: {engine.ResultStack.Count} bool value: {engine.ResultStack.Pop().GetBoolean()}")
else:
logger.debug(f"Result stack failure! Count: {engine.ResultStack.Count}")
return False
Helper.EmitServiceEvents(state_reader)
return True | python | {
"resource": ""
} |
q32802 | AssetState.GetName | train | def GetName(self):
"""
Get the asset name based on its type.
Returns:
str: 'NEO' or 'NEOGas'
"""
if self.AssetType == AssetType.GoverningToken:
return "NEO"
elif self.AssetType == AssetType.UtilityToken:
return "NEOGas"
if type(self.Name) is bytes:
return self.Name.decode('utf-8')
return self.Name | python | {
"resource": ""
} |
q32803 | Blockchain.GenesisBlock | train | def GenesisBlock() -> Block:
"""
Create the GenesisBlock.
Returns:
BLock:
"""
prev_hash = UInt256(data=bytearray(32))
timestamp = int(datetime(2016, 7, 15, 15, 8, 21, tzinfo=pytz.utc).timestamp())
index = 0
consensus_data = 2083236893 # Pay tribute To Bitcoin
next_consensus = Blockchain.GetConsensusAddress(Blockchain.StandbyValidators())
script = Witness(bytearray(0), bytearray(PUSHT))
mt = MinerTransaction()
mt.Nonce = 2083236893
output = TransactionOutput(
Blockchain.SystemShare().Hash,
Blockchain.SystemShare().Amount,
Crypto.ToScriptHash(Contract.CreateMultiSigRedeemScript(int(len(Blockchain.StandbyValidators()) / 2) + 1,
Blockchain.StandbyValidators()))
)
it = IssueTransaction([], [output], [], [script])
return Block(prev_hash, timestamp, index, consensus_data,
next_consensus, script,
[mt, Blockchain.SystemShare(), Blockchain.SystemCoin(), it],
True) | python | {
"resource": ""
} |
q32804 | Blockchain.Default | train | def Default() -> 'Blockchain':
"""
Get the default registered blockchain instance.
Returns:
obj: Currently set to `neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain`.
"""
if Blockchain._instance is None:
Blockchain._instance = Blockchain()
Blockchain.GenesisBlock().RebuildMerkleRoot()
return Blockchain._instance | python | {
"resource": ""
} |
q32805 | Blockchain.GetConsensusAddress | train | def GetConsensusAddress(validators):
"""
Get the script hash of the consensus node.
Args:
validators (list): of Ellipticcurve.ECPoint's
Returns:
UInt160:
"""
vlen = len(validators)
script = Contract.CreateMultiSigRedeemScript(vlen - int((vlen - 1) / 3), validators)
return Crypto.ToScriptHash(script) | python | {
"resource": ""
} |
q32806 | Blockchain.GetSysFeeAmountByHeight | train | def GetSysFeeAmountByHeight(self, height):
"""
Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int:
"""
hash = self.GetBlockHash(height)
return self.GetSysFeeAmount(hash) | python | {
"resource": ""
} |
q32807 | Blockchain.DeregisterBlockchain | train | def DeregisterBlockchain():
"""
Remove the default blockchain instance.
"""
Blockchain.SECONDS_PER_BLOCK = 15
Blockchain.DECREMENT_INTERVAL = 2000000
Blockchain.GENERATION_AMOUNT = [8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
Blockchain._blockchain = None
Blockchain._validators = []
Blockchain._genesis_block = None
Blockchain._instance = None
Blockchain._blockrequests = set()
Blockchain._paused = False
Blockchain.BlockSearchTries = 0
Blockchain.CACHELIM = 4000
Blockchain.CMISSLIM = 5
Blockchain.LOOPTIME = .1
Blockchain.PersistCompleted = Events()
Blockchain.Notify = Events()
Blockchain._instance = None | python | {
"resource": ""
} |
q32808 | LogManager.config_stdio | train | def config_stdio(self, log_configurations: Optional[List[LogConfiguration]] = None, default_level=logging.INFO) -> None:
"""
Configure the stdio `StreamHandler` levels on the specified loggers.
If no log configurations are specified then the `default_level` will be applied to all handlers.
Args:
log_configurations: a list of (component name, log level) tuples
default_level: logging level to apply when no log_configurations are specified
"""
# no configuration specified, apply `default_level` to the stdio handler of all known loggers
if not log_configurations:
for logger in self.loggers.values():
self._restrict_output(logger, default_level)
# only apply specified configuration to the stdio `StreamHandler` of the specific component
else:
for component, level in log_configurations:
try:
logger = self.loggers[self.root + component]
except KeyError:
raise ValueError("Failed to configure component. Invalid name: {}".format(component))
self._restrict_output(logger, level) | python | {
"resource": ""
} |
q32809 | LogManager.getLogger | train | def getLogger(self, component_name: str = None) -> logging.Logger:
"""
Get the logger instance matching ``component_name`` or create a new one if non-existent.
Args:
component_name: a neo-python component name. e.g. network, vm, db
Returns:
a logger for the specified component.
"""
logger_name = self.root + (component_name if component_name else 'generic')
_logger = self.loggers.get(logger_name)
if not _logger:
_logger = logging.getLogger(logger_name)
stdio_handler = logging.StreamHandler()
stdio_handler.setFormatter(LogFormatter())
stdio_handler.setLevel(logging.INFO)
_logger.addHandler(stdio_handler)
_logger.setLevel(logging.DEBUG)
self.loggers[logger_name] = _logger
return _logger | python | {
"resource": ""
} |
q32810 | ExecutionEngine.write_log | train | def write_log(self, message):
"""
Write a line to the VM instruction log file.
Args:
message (str): string message to write to file.
"""
if self._is_write_log and self.log_file and not self.log_file.closed:
self.log_file.write(message + '\n') | python | {
"resource": ""
} |
q32811 | ShowUnspentCoins | train | def ShowUnspentCoins(wallet, asset_id=None, from_addr=None, watch_only=False, do_count=False):
"""
Show unspent coin objects in the wallet.
Args:
wallet (neo.Wallet): wallet to show unspent coins from.
asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.
from_addr (UInt160): a bytearray (len 20) representing an address.
watch_only (bool): indicate if this shows coins that are in 'watch only' addresses.
do_count (bool): if True only show a count of unspent assets.
Returns:
list: a list of unspent ``neo.Wallet.Coin`` in the wallet
"""
if wallet is None:
print("Please open a wallet.")
return
watch_only_flag = 64 if watch_only else 0
if asset_id:
unspents = wallet.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, watch_only_val=watch_only_flag)
else:
unspents = wallet.FindUnspentCoins(from_addr=from_addr, watch_only_val=watch_only)
if do_count:
print('\n-----------------------------------------------')
print('Total Unspent: %s' % len(unspents))
return unspents
for unspent in unspents:
print('\n-----------------------------------------------')
print(json.dumps(unspent.ToJson(), indent=4))
if not unspents:
print("No unspent assets matching the arguments.")
return unspents | python | {
"resource": ""
} |
q32812 | NEP5Token.FromDBInstance | train | def FromDBInstance(db_token):
"""
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
"""
hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))
hash_ar.reverse()
hash = UInt160(data=hash_ar)
token = NEP5Token(script=None)
token.SetScriptHash(hash)
token.name = db_token.Name
token.symbol = db_token.Symbol
token.decimals = db_token.Decimals
return token | python | {
"resource": ""
} |
q32813 | NEP5Token.Address | train | def Address(self):
"""
Get the wallet address associated with the token.
Returns:
str: base58 encoded string representing the wallet address.
"""
if self._address is None:
self._address = Crypto.ToAddress(self.ScriptHash)
return self._address | python | {
"resource": ""
} |
q32814 | NEP5Token.GetBalance | train | def GetBalance(self, wallet, address, as_string=False):
"""
Get the token balance.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
address (str): public address of the account to get the token balance of.
as_string (bool): whether the return value should be a string. Default is False, returning an integer.
Returns:
int/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.
"""
addr = PromptUtils.parse_param(address, wallet)
if isinstance(addr, UInt160):
addr = addr.Data
sb = ScriptBuilder()
sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr])
tx, fee, results, num_ops, engine_success = test_invoke(sb.ToArray(), wallet, [])
if engine_success:
try:
val = results[0].GetBigInteger()
precision_divisor = pow(10, self.decimals)
balance = Decimal(val) / Decimal(precision_divisor)
if as_string:
formatter_str = '.%sf' % self.decimals
balance_str = format(balance, formatter_str)
return balance_str
return balance
except Exception as e:
logger.error("could not get balance: %s " % e)
traceback.print_stack()
else:
addr_str = Crypto.ToAddress(UInt160(data=addr))
logger.error(
f"Could not get balance of address {addr_str} for token contract {self.ScriptHash}. VM execution failed. Make sure the contract exists on the network and that it adheres to the NEP-5 standard")
return 0 | python | {
"resource": ""
} |
q32815 | NEP5Token.Transfer | train | def Transfer(self, wallet, from_addr, to_addr, amount, tx_attributes=None):
"""
Transfer a specified amount of the NEP5Token to another address.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
from_addr (str): public address of the account to transfer the given amount from.
to_addr (str): public address of the account to transfer the given amount to.
amount (int): quantity to send.
tx_attributes (list): a list of TransactionAtribute objects.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluationstack results.
"""
if not tx_attributes:
tx_attributes = []
sb = ScriptBuilder()
sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'transfer',
[PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet),
PromptUtils.parse_param(amount)])
tx, fee, results, num_ops, engine_success = test_invoke(sb.ToArray(), wallet, [], from_addr=from_addr, invoke_attrs=tx_attributes)
return tx, fee, results | python | {
"resource": ""
} |
q32816 | NEP5Token.TransferFrom | train | def TransferFrom(self, wallet, from_addr, to_addr, amount):
"""
Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr`
if the originator `wallet` has been approved to do so.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
from_addr (str): public address of the account to transfer the given amount from.
to_addr (str): public address of the account to transfer the given amount to.
amount (int): quantity to send.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'transferFrom',
[PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]]
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True)
return tx, fee, results | python | {
"resource": ""
} |
q32817 | NEP5Token.Allowance | train | def Allowance(self, wallet, owner_addr, requestor_addr):
"""
Return the amount of tokens that the `requestor_addr` account can transfer from the `owner_addr` account.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
owner_addr (str): public address of the account to transfer the given amount from.
requestor_addr (str): public address of the account that requests the transfer.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'allowance',
[PromptUtils.parse_param(owner_addr, wallet), PromptUtils.parse_param(requestor_addr, wallet)]]
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True)
return tx, fee, results | python | {
"resource": ""
} |
q32818 | NEP5Token.Mint | train | def Mint(self, wallet, mint_to_addr, attachment_args, invoke_attrs=None):
"""
Call the "mintTokens" function of the smart contract.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
mint_to_addr (str): public address of the account to mint the tokens to.
attachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3']
invoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'mintTokens', []]
invoke_args = invoke_args + attachment_args
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True, from_addr=mint_to_addr, invoke_attrs=invoke_attrs)
return tx, fee, results | python | {
"resource": ""
} |
q32819 | NEP5Token.CrowdsaleRegister | train | def CrowdsaleRegister(self, wallet, register_addresses, from_addr=None):
"""
Register for a crowd sale.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
register_addresses (list): list of public addresses to register for the sale.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'crowdsale_register',
[PromptUtils.parse_param(p, wallet) for p in register_addresses]]
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True, from_addr)
return tx, fee, results | python | {
"resource": ""
} |
q32820 | SpentCoinState.DeleteIndex | train | def DeleteIndex(self, index):
"""
Remove a spent coin based on its index.
Args:
index (int):
"""
to_remove = None
for i in self.Items:
if i.index == index:
to_remove = i
if to_remove:
self.Items.remove(to_remove) | python | {
"resource": ""
} |
q32821 | JsonRpcApi.get_peers | train | def get_peers(self):
"""Get all known nodes and their 'state' """
node = NodeLeader.Instance()
result = {"connected": [], "unconnected": [], "bad": []}
connected_peers = []
for peer in node.Peers:
result['connected'].append({"address": peer.host,
"port": peer.port})
connected_peers.append("{}:{}".format(peer.host, peer.port))
for addr in node.DEAD_ADDRS:
host, port = addr.rsplit(':', 1)
result['bad'].append({"address": host, "port": port})
# "UnconnectedPeers" is never used. So a check is needed to
# verify that a given address:port does not belong to a connected peer
for addr in node.KNOWN_ADDRS:
host, port = addr.rsplit(':', 1)
if addr not in connected_peers:
result['unconnected'].append({"address": host,
"port": int(port)})
return result | python | {
"resource": ""
} |
q32822 | JsonRpcApi.list_address | train | def list_address(self):
"""Get information about all the addresses present on the open wallet"""
result = []
for addrStr in self.wallet.Addresses:
addr = self.wallet.GetAddress(addrStr)
result.append({
"address": addrStr,
"haskey": not addr.IsWatchOnly,
"label": None,
"watchonly": addr.IsWatchOnly,
})
return result | python | {
"resource": ""
} |
q32823 | Contract.CreateSignatureContract | train | def CreateSignatureContract(publicKey):
"""
Create a signature contract.
Args:
publicKey (edcsa.Curve.point): e.g. KeyPair.PublicKey.
Returns:
neo.SmartContract.Contract: a Contract instance.
"""
script = Contract.CreateSignatureRedeemScript(publicKey)
params = b'\x00'
encoded = publicKey.encode_point(True)
pubkey_hash = Crypto.ToScriptHash(encoded, unhex=True)
return Contract(script, params, pubkey_hash) | python | {
"resource": ""
} |
q32824 | BlockBase.Hash | train | def Hash(self):
"""
Get the hash value of the Blockbase.
Returns:
UInt256: containing the hash of the data.
"""
if not self.__hash:
hashdata = self.RawData()
ba = bytearray(binascii.unhexlify(hashdata))
hash = bin_dbl_sha256(ba)
self.__hash = UInt256(data=hash)
return self.__hash | python | {
"resource": ""
} |
q32825 | BlockBase.DeserializeUnsigned | train | def DeserializeUnsigned(self, reader):
"""
Deserialize unsigned data only.
Args:
reader (neo.IO.BinaryReader):
"""
self.Version = reader.ReadUInt32()
self.PrevHash = reader.ReadUInt256()
self.MerkleRoot = reader.ReadUInt256()
self.Timestamp = reader.ReadUInt32()
self.Index = reader.ReadUInt32()
self.ConsensusData = reader.ReadUInt64()
self.NextConsensus = reader.ReadUInt160() | python | {
"resource": ""
} |
q32826 | BlockBase.SerializeUnsigned | train | def SerializeUnsigned(self, writer):
"""
Serialize unsigned data only.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteUInt32(self.Version)
writer.WriteUInt256(self.PrevHash)
writer.WriteUInt256(self.MerkleRoot)
writer.WriteUInt32(self.Timestamp)
writer.WriteUInt32(self.Index)
writer.WriteUInt64(self.ConsensusData)
writer.WriteUInt160(self.NextConsensus) | python | {
"resource": ""
} |
q32827 | BlockBase.GetScriptHashesForVerifying | train | def GetScriptHashesForVerifying(self):
"""
Get the script hash used for verification.
Raises:
Exception: if the verification script is invalid, or no header could be retrieved from the Blockchain.
Returns:
list: with a single UInt160 representing the next consensus node.
"""
# if this is the genesis block, we dont have a prev hash!
if self.PrevHash.Data == bytearray(32):
# logger.info("verificiation script %s" %(self.Script.ToJson()))
if type(self.Script.VerificationScript) is bytes:
return [bytearray(self.Script.VerificationScript)]
elif type(self.Script.VerificationScript) is bytearray:
return [self.Script.VerificationScript]
else:
raise Exception('Invalid Verification script')
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if prev_header is None:
raise Exception('Invalid operation')
return [prev_header.NextConsensus] | python | {
"resource": ""
} |
q32828 | BlockBase.Verify | train | def Verify(self):
"""
Verify block using the verification script.
Returns:
bool: True if valid. False otherwise.
"""
if not self.Hash.ToBytes() == GetGenesis().Hash.ToBytes():
return False
bc = GetBlockchain()
if not bc.ContainsBlock(self.Index):
return False
if self.Index > 0:
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if prev_header is None:
return False
if prev_header.Index + 1 != self.Index:
return False
if prev_header.Timestamp >= self.Timestamp:
return False
# this should be done to actually verify the block
if not Helper.VerifyScripts(self):
return False
return True | python | {
"resource": ""
} |
q32829 | Block.FullTransactions | train | def FullTransactions(self):
"""
Get the list of full Transaction objects.
Note: Transactions can be trimmed to contain only the header and the hash. This will get the full data if
trimmed transactions are found.
Returns:
list: of neo.Core.TX.Transaction.Transaction objects.
"""
is_trimmed = False
try:
tx = self.Transactions[0]
if type(tx) is str:
is_trimmed = True
except Exception as e:
pass
if not is_trimmed:
return self.Transactions
txs = []
for hash in self.Transactions:
tx, height = GetBlockchain().GetTransaction(hash)
txs.append(tx)
self.Transactions = txs
return self.Transactions | python | {
"resource": ""
} |
q32830 | Block.Header | train | def Header(self):
"""
Get the block header.
Returns:
neo.Core.Header:
"""
if not self._header:
self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp,
self.Index, self.ConsensusData, self.NextConsensus, self.Script)
return self._header | python | {
"resource": ""
} |
q32831 | Block.TotalFees | train | def TotalFees(self):
"""
Get the total transaction fees in the block.
Returns:
Fixed8:
"""
amount = Fixed8.Zero()
for tx in self.Transactions:
amount += tx.SystemFee()
return amount | python | {
"resource": ""
} |
q32832 | Block.FromTrimmedData | train | def FromTrimmedData(byts):
"""
Deserialize a block from raw bytes.
Args:
byts:
Returns:
Block:
"""
block = Block()
block.__is_trimmed = True
ms = StreamManager.GetStream(byts)
reader = BinaryReader(ms)
block.DeserializeUnsigned(reader)
reader.ReadByte()
witness = Witness()
witness.Deserialize(reader)
block.Script = witness
bc = GetBlockchain()
tx_list = []
for tx_hash in reader.ReadHashes():
tx = bc.GetTransaction(tx_hash)[0]
if not tx:
raise Exception("Could not find transaction!\n Are you running code against a valid Blockchain instance?\n Tests that accesses transactions or size of a block but inherit from NeoTestCase instead of BlockchainFixtureTestCase will not work.")
tx_list.append(tx)
if len(tx_list) < 1:
raise Exception("Invalid block, no transactions found for block %s " % block.Index)
block.Transactions = tx_list
StreamManager.ReleaseStream(ms)
return block | python | {
"resource": ""
} |
q32833 | Block.RebuildMerkleRoot | train | def RebuildMerkleRoot(self):
"""Rebuild the merkle root of the block"""
logger.debug("Rebuilding merkle root!")
if self.Transactions is not None and len(self.Transactions) > 0:
self.MerkleRoot = MerkleTree.ComputeRoot([tx.Hash for tx in self.Transactions]) | python | {
"resource": ""
} |
q32834 | Block.Trim | train | def Trim(self):
"""
Returns a byte array that contains only the block header and transaction hash.
Returns:
bytes:
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
self.SerializeUnsigned(writer)
writer.WriteByte(1)
self.Script.Serialize(writer)
writer.WriteHashes([tx.Hash.ToBytes() for tx in self.Transactions])
retVal = ms.ToArray()
StreamManager.ReleaseStream(ms)
return retVal | python | {
"resource": ""
} |
q32835 | Block.Verify | train | def Verify(self, completely=False):
"""
Verify the integrity of the block.
Args:
completely: (Not functional at this time).
Returns:
bool: True if valid. False otherwise.
"""
res = super(Block, self).Verify()
if not res:
return False
from neo.Blockchain import GetBlockchain, GetConsensusAddress
# first TX has to be a miner transaction. other tx after that cant be miner tx
if self.Transactions[0].Type != TransactionType.MinerTransaction:
return False
for tx in self.Transactions[1:]:
if tx.Type == TransactionType.MinerTransaction:
return False
if completely:
bc = GetBlockchain()
if self.NextConsensus != GetConsensusAddress(bc.GetValidators(self.Transactions).ToArray()):
return False
for tx in self.Transactions:
if not tx.Verify():
pass
logger.error("Blocks cannot be fully validated at this moment. please pass completely=False")
raise NotImplementedError()
# do this below!
# foreach(Transaction tx in Transactions)
# if (!tx.Verify(Transactions.Where(p = > !p.Hash.Equals(tx.Hash)))) return false;
# Transaction tx_gen = Transactions.FirstOrDefault(p= > p.Type == TransactionType.MinerTransaction);
# if (tx_gen?.Outputs.Sum(p = > p.Value) != CalculateNetFee(Transactions)) return false;
return True | python | {
"resource": ""
} |
q32836 | get_token | train | def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token':
"""
Try to get a NEP-5 token based on the symbol or script_hash
Args:
wallet: wallet instance
token_str: symbol or script_hash (accepts script hash with or without 0x prefix)
Raises:
ValueError: if token is not found
Returns:
NEP5Token instance if found.
"""
if token_str.startswith('0x'):
token_str = token_str[2:]
token = None
for t in wallet.GetTokens().values():
if token_str in [t.symbol, t.ScriptHash.ToString()]:
token = t
break
if not isinstance(token, NEP5Token.NEP5Token):
raise ValueError("The given token argument does not represent a known NEP5 token")
return token | python | {
"resource": ""
} |
q32837 | NodeLeader.Instance | train | def Instance(reactor=None):
"""
Get the local node instance.
Args:
reactor: (optional) custom reactor to use in NodeLeader.
Returns:
NodeLeader: instance.
"""
if NodeLeader._LEAD is None:
NodeLeader._LEAD = NodeLeader(reactor)
return NodeLeader._LEAD | python | {
"resource": ""
} |
q32838 | NodeLeader.Setup | train | def Setup(self):
"""
Initialize the local node.
Returns:
"""
self.Peers = [] # active nodes that we're connected to
self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes
self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to
self.MissionsGlobal = []
self.NodeId = random.randint(1294967200, 4294967200) | python | {
"resource": ""
} |
q32839 | NodeLeader.check_bcr_catchup | train | def check_bcr_catchup(self):
"""we're exceeding data request speed vs receive + process"""
logger.debug(f"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}")
# test, perhaps there's some race condition between slow startup and throttle sync, otherwise blocks will never go down
for peer in self.Peers: # type: NeoNode
peer.stop_block_loop(cancel=False)
peer.stop_peerinfo_loop(cancel=False)
peer.stop_header_loop(cancel=False)
if len(BC.Default().BlockRequests) > 0:
for peer in self.Peers:
peer.keep_alive()
peer.health_check(HEARTBEAT_BLOCKS)
peer_bcr_len = len(peer.myblockrequests)
# if a peer has cleared its queue then reset heartbeat status to avoid timing out when resuming from "check_bcr" if there's 1 or more really slow peer(s)
if peer_bcr_len == 0:
peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0
print(f"{peer.prefix} request count: {peer_bcr_len}")
if peer_bcr_len == 1:
next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1)
print(f"{peer.prefix} {peer.myblockrequests} {next_hash}")
else:
# we're done catching up. Stop own loop and restart peers
self.stop_check_bcr_loop()
self.check_bcr_loop = None
logger.debug("BlockRequests have caught up...resuming sync")
for peer in self.Peers:
peer.ProtocolReady() # this starts all loops again
# give a little bit of time between startup of peers
time.sleep(2) | python | {
"resource": ""
} |
q32840 | NodeLeader.Start | train | def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None:
"""
Start connecting to the seed list.
Args:
seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json`
skip_seeds: skip connecting to seed list
"""
if not seed_list:
seed_list = settings.SEED_LIST
logger.debug("Starting up nodeleader")
if not skip_seeds:
logger.debug("Attempting to connect to seed list...")
for bootstrap in seed_list:
if not is_ip_address(bootstrap):
host, port = bootstrap.split(':')
bootstrap = f"{hostname_to_ip(host)}:{port}"
addr = Address(bootstrap)
self.KNOWN_ADDRS.append(addr)
self.SetupConnection(addr)
logger.debug("Starting up nodeleader: starting peer, mempool, and blockheight check loops")
# check in on peers every 10 seconds
self.start_peer_check_loop()
self.start_memcheck_loop()
self.start_blockheight_loop()
if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running:
class OneShotFactory(Factory):
def __init__(self, leader):
self.leader = leader
def buildProtocol(self, addr):
print(f"building new protocol for addr: {addr}")
self.leader.AddKnownAddress(Address(f"{addr.host}:{addr.port}"))
p = NeoNode(incoming_client=True)
p.factory = self
return p
def listen_err(err):
print(f"Failed start listening server for reason: {err.value}")
def listen_ok(value):
self.incoming_server_running = True
logger.debug(f"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}")
server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)
listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))
listenport_deferred.addCallback(listen_ok)
listenport_deferred.addErrback(listen_err) | python | {
"resource": ""
} |
q32841 | NodeLeader.Shutdown | train | def Shutdown(self):
"""Disconnect all connected peers."""
logger.debug("Nodeleader shutting down")
self.stop_peer_check_loop()
self.peer_check_loop_deferred = None
self.stop_check_bcr_loop()
self.check_bcr_loop_deferred = None
self.stop_memcheck_loop()
self.memcheck_loop_deferred = None
self.stop_blockheight_loop()
self.blockheight_loop_deferred = None
for p in self.Peers:
p.Disconnect() | python | {
"resource": ""
} |
q32842 | NodeLeader.AddConnectedPeer | train | def AddConnectedPeer(self, peer):
"""
Add a new connect peer to the known peers list.
Args:
peer (NeoNode): instance.
"""
# if present
self.RemoveFromQueue(peer.address)
self.AddKnownAddress(peer.address)
if len(self.Peers) > settings.CONNECTED_PEER_MAX:
peer.Disconnect("Max connected peers reached", isDead=False)
if peer not in self.Peers:
self.Peers.append(peer)
else:
# either peer is already in the list and it has reconnected before it timed out on our side
# or it's trying to connect multiple times
# or we hit the max connected peer count
self.RemoveKnownAddress(peer.address)
peer.Disconnect() | python | {
"resource": ""
} |
q32843 | NodeLeader.RemoveConnectedPeer | train | def RemoveConnectedPeer(self, peer):
"""
Remove a connected peer from the known peers list.
Args:
peer (NeoNode): instance.
"""
if peer in self.Peers:
self.Peers.remove(peer) | python | {
"resource": ""
} |
q32844 | NodeLeader._monitor_for_zero_connected_peers | train | def _monitor_for_zero_connected_peers(self):
"""
Track if we lost connection to all peers.
Give some retries threshold to allow peers that are in the process of connecting or in the queue to be connected to run
"""
if len(self.Peers) == 0 and len(self.connection_queue) == 0:
if self.peer_zero_count > 2:
logger.debug("Peer count 0 exceeded max retries threshold, restarting...")
self.Restart()
else:
logger.debug(
f"Peer count is 0, allow for retries or queued connections to be established {self.peer_zero_count}")
self.peer_zero_count += 1 | python | {
"resource": ""
} |
q32845 | NodeLeader.InventoryReceived | train | def InventoryReceived(self, inventory):
"""
Process a received inventory.
Args:
inventory (neo.Network.Inventory): expect a Block type.
Returns:
bool: True if processed and verified. False otherwise.
"""
if inventory.Hash.ToBytes() in self._MissedBlocks:
self._MissedBlocks.remove(inventory.Hash.ToBytes())
if inventory is MinerTransaction:
return False
if type(inventory) is Block:
if BC.Default() is None:
return False
if BC.Default().ContainsBlock(inventory.Index):
return False
if not BC.Default().AddBlock(inventory):
return False
else:
if not inventory.Verify(self.MemPool.values()):
return False | python | {
"resource": ""
} |
q32846 | NodeLeader.AddTransaction | train | def AddTransaction(self, tx):
"""
Add a transaction to the memory pool.
Args:
tx (neo.Core.TX.Transaction): instance.
Returns:
bool: True if successfully added. False otherwise.
"""
if BC.Default() is None:
return False
if tx.Hash.ToBytes() in self.MemPool.keys():
return False
if BC.Default().ContainsTransaction(tx.Hash):
return False
if not tx.Verify(self.MemPool.values()):
logger.error("Verifying tx result... failed")
return False
self.MemPool[tx.Hash.ToBytes()] = tx
return True | python | {
"resource": ""
} |
q32847 | NodeLeader.RemoveTransaction | train | def RemoveTransaction(self, tx):
"""
Remove a transaction from the memory pool if it is found on the blockchain.
Args:
tx (neo.Core.TX.Transaction): instance.
Returns:
bool: True if successfully removed. False otherwise.
"""
if BC.Default() is None:
return False
if not BC.Default().ContainsTransaction(tx.Hash):
return False
if tx.Hash.ToBytes() in self.MemPool:
del self.MemPool[tx.Hash.ToBytes()]
return True
return False | python | {
"resource": ""
} |
q32848 | NodeLeader.BlockheightCheck | train | def BlockheightCheck(self):
"""
Checks the current blockheight and finds the peer that prevents advancement
"""
if self.CurrentBlockheight == BC.Default().Height:
if len(self.Peers) > 0:
logger.debug("Blockheight is not advancing ...")
next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1)
culprit_found = False
for peer in self.Peers:
if next_hash in peer.myblockrequests:
culprit_found = True
peer.Disconnect()
break
# this happens when we're connecting to other nodes that are stuck themselves
if not culprit_found:
for peer in self.Peers:
peer.Disconnect()
else:
self.CurrentBlockheight = BC.Default().Height | python | {
"resource": ""
} |
q32849 | WSSHBridge.open | train | def open(self, hostname, port=22, username=None, password=None,
private_key=None, key_passphrase=None,
allow_agent=False, timeout=None):
""" Open a connection to a remote SSH server
In order to connect, either one of these credentials must be
supplied:
* Password
Password-based authentication
* Private Key
Authenticate using SSH Keys.
If the private key is encrypted, it will attempt to
load it using the passphrase
* Agent
Authenticate using the *local* SSH agent. This is the
one running alongside wsshd on the server side.
"""
try:
pkey = None
if private_key:
pkey = self._load_private_key(private_key, key_passphrase)
self._ssh.connect(
hostname=hostname,
port=port,
username=username,
password=password,
pkey=pkey,
timeout=timeout,
allow_agent=allow_agent,
look_for_keys=False)
except socket.gaierror as e:
self._websocket.send(json.dumps({'error':
'Could not resolve hostname {0}: {1}'.format(
hostname, e.args[1])}))
raise
except Exception as e:
self._websocket.send(json.dumps({'error': e.message or str(e)}))
raise | python | {
"resource": ""
} |
q32850 | WSSHBridge._bridge | train | def _bridge(self, channel):
""" Full-duplex bridge between a websocket and a SSH channel """
channel.setblocking(False)
channel.settimeout(0.0)
self._tasks = [
gevent.spawn(self._forward_inbound, channel),
gevent.spawn(self._forward_outbound, channel)
]
gevent.joinall(self._tasks) | python | {
"resource": ""
} |
q32851 | WSSHBridge.close | train | def close(self):
""" Terminate a bridge session """
gevent.killall(self._tasks, block=True)
self._tasks = []
self._ssh.close() | python | {
"resource": ""
} |
q32852 | WSSHBridge.shell | train | def shell(self, term='xterm'):
""" Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
"""
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() | python | {
"resource": ""
} |
q32853 | command | train | def command(engine, format, filepath=None, renderer=None, formatter=None):
"""Return args list for ``subprocess.Popen`` and name of the rendered file."""
if formatter is not None and renderer is None:
raise RequiredArgumentError('formatter given without renderer')
if engine not in ENGINES:
raise ValueError('unknown engine: %r' % engine)
if format not in FORMATS:
raise ValueError('unknown format: %r' % format)
if renderer is not None and renderer not in RENDERERS:
raise ValueError('unknown renderer: %r' % renderer)
if formatter is not None and formatter not in FORMATTERS:
raise ValueError('unknown formatter: %r' % formatter)
format_arg = [s for s in (format, renderer, formatter) if s is not None]
suffix = '.'.join(reversed(format_arg))
format_arg = ':'.join(format_arg)
cmd = [engine, '-T%s' % format_arg]
rendered = None
if filepath is not None:
cmd.extend(['-O', filepath])
rendered = '%s.%s' % (filepath, suffix)
return cmd, rendered | python | {
"resource": ""
} |
q32854 | render | train | def render(engine, format, filepath, renderer=None, formatter=None, quiet=False):
"""Render file with Graphviz ``engine`` into ``format``, return result filename.
Args:
engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format: The output format used for rendering (``'pdf'``, ``'png'``, ...).
filepath: Path to the DOT source file to render.
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool): Suppress ``stderr`` output.
Returns:
The (possibly relative) path of the rendered file.
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
"""
cmd, rendered = command(engine, format, filepath, renderer, formatter)
run(cmd, capture_output=True, check=True, quiet=quiet)
return rendered | python | {
"resource": ""
} |
q32855 | pipe | train | def pipe(engine, format, data, renderer=None, formatter=None, quiet=False):
"""Return ``data`` piped through Graphviz ``engine`` into ``format``.
Args:
engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format: The output format used for rendering (``'pdf'``, ``'png'``, ...).
data: The binary (encoded) DOT source string to render.
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool): Suppress ``stderr`` output.
Returns:
Binary (encoded) stdout of the layout command.
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
"""
cmd, _ = command(engine, format, None, renderer, formatter)
out, _ = run(cmd, input=data, capture_output=True, check=True, quiet=quiet)
return out | python | {
"resource": ""
} |
q32856 | version | train | def version():
"""Return the version number tuple from the ``stderr`` output of ``dot -V``.
Returns:
Two or three ``int`` version ``tuple``.
Raises:
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
RuntimmeError: If the output cannot be parsed into a version number.
"""
cmd = ['dot', '-V']
out, _ = run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
info = out.decode('ascii')
ma = re.search(r'graphviz version (\d+\.\d+(?:\.\d+)?) ', info)
if ma is None:
raise RuntimeError
return tuple(int(d) for d in ma.group(1).split('.')) | python | {
"resource": ""
} |
q32857 | File.pipe | train | def pipe(self, format=None, renderer=None, formatter=None):
"""Return the source piped through the Graphviz layout command.
Args:
format: The output format used for rendering (``'pdf'``, ``'png'``, etc.).
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
Returns:
Binary (encoded) stdout of the layout command.
Raises:
ValueError: If ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
"""
if format is None:
format = self._format
data = text_type(self.source).encode(self._encoding)
out = backend.pipe(self._engine, format, data, renderer, formatter)
return out | python | {
"resource": ""
} |
q32858 | File.save | train | def save(self, filename=None, directory=None):
"""Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file.
"""
if filename is not None:
self.filename = filename
if directory is not None:
self.directory = directory
filepath = self.filepath
tools.mkdirs(filepath)
data = text_type(self.source)
with io.open(filepath, 'w', encoding=self.encoding) as fd:
fd.write(data)
if not data.endswith(u'\n'):
fd.write(u'\n')
return filepath | python | {
"resource": ""
} |
q32859 | File.render | train | def render(self, filename=None, directory=None, view=False, cleanup=False,
format=None, renderer=None, formatter=None):
"""Save the source to file and render with the Graphviz engine.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
view (bool): Open the rendered result with the default application.
cleanup (bool): Delete the source file after rendering.
format: The output format used for rendering (``'pdf'``, ``'png'``, etc.).
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
Returns:
The (possibly relative) path of the rendered file.
Raises:
ValueError: If ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
RuntimeError: If viewer opening is requested but not supported.
"""
filepath = self.save(filename, directory)
if format is None:
format = self._format
rendered = backend.render(self._engine, format, filepath, renderer, formatter)
if cleanup:
os.remove(filepath)
if view:
self._view(rendered, self._format)
return rendered | python | {
"resource": ""
} |
q32860 | File.view | train | def view(self, filename=None, directory=None, cleanup=False):
"""Save the source to file, open the rendered result in a viewer.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
cleanup (bool): Delete the source file after rendering.
Returns:
The (possibly relative) path of the rendered file.
Raises:
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
RuntimeError: If opening the viewer is not supported.
Short-cut method for calling :meth:`.render` with ``view=True``.
"""
return self.render(filename=filename, directory=directory, view=True,
cleanup=cleanup) | python | {
"resource": ""
} |
q32861 | File._view | train | def _view(self, filepath, format):
"""Start the right viewer based on file format and platform."""
methodnames = [
'_view_%s_%s' % (format, backend.PLATFORM),
'_view_%s' % backend.PLATFORM,
]
for name in methodnames:
view_method = getattr(self, name, None)
if view_method is not None:
break
else:
raise RuntimeError('%r has no built-in viewer support for %r '
'on %r platform' % (self.__class__, format, backend.PLATFORM))
view_method(filepath) | python | {
"resource": ""
} |
q32862 | Source.from_file | train | def from_file(cls, filename, directory=None,
format=None, engine=None, encoding=File._encoding):
"""Return an instance with the source string read from the given file.
Args:
filename: Filename for loading/saving the source.
directory: (Sub)directory for source loading/saving and rendering.
format: Rendering output format (``'pdf'``, ``'png'``, ...).
engine: Layout command used (``'dot'``, ``'neato'``, ...).
encoding: Encoding for loading/saving the source.
"""
filepath = os.path.join(directory or '', filename)
if encoding is None:
encoding = locale.getpreferredencoding()
with io.open(filepath, encoding=encoding) as fd:
source = fd.read()
return cls(source, filename, directory, format, engine, encoding) | python | {
"resource": ""
} |
q32863 | quote | train | def quote(identifier,
html=HTML_STRING.match, valid_id=ID.match, dot_keywords=KEYWORDS):
"""Return DOT identifier from string, quote if needed.
>>> quote('')
'""'
>>> quote('spam')
'spam'
>>> quote('spam spam')
'"spam spam"'
>>> quote('-4.2')
'-4.2'
>>> quote('.42')
'.42'
>>> quote('<<b>spam</b>>')
'<<b>spam</b>>'
>>> quote(nohtml('<>'))
'"<>"'
"""
if html(identifier) and not isinstance(identifier, NoHtml):
pass
elif not valid_id(identifier) or identifier.lower() in dot_keywords:
return '"%s"' % identifier.replace('"', '\\"')
return identifier | python | {
"resource": ""
} |
q32864 | quote_edge | train | def quote_edge(identifier):
"""Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam')
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s'
"""
node, _, rest = identifier.partition(':')
parts = [quote(node)]
if rest:
port, _, compass = rest.partition(':')
parts.append(quote(port))
if compass:
parts.append(compass)
return ':'.join(parts) | python | {
"resource": ""
} |
q32865 | a_list | train | def a_list(label=None, kwargs=None, attributes=None):
"""Return assembled DOT a_list string.
>>> a_list('spam', {'spam': None, 'ham': 'ham ham', 'eggs': ''})
'label=spam eggs="" ham="ham ham"'
"""
result = ['label=%s' % quote(label)] if label is not None else []
if kwargs:
items = ['%s=%s' % (quote(k), quote(v))
for k, v in tools.mapping_items(kwargs) if v is not None]
result.extend(items)
if attributes:
if hasattr(attributes, 'items'):
attributes = tools.mapping_items(attributes)
items = ['%s=%s' % (quote(k), quote(v))
for k, v in attributes if v is not None]
result.extend(items)
return ' '.join(result) | python | {
"resource": ""
} |
q32866 | attr_list | train | def attr_list(label=None, kwargs=None, attributes=None):
"""Return assembled DOT attribute list string.
Sorts ``kwargs`` and ``attributes`` if they are plain dicts (to avoid
unpredictable order from hash randomization in Python 3 versions).
>>> attr_list()
''
>>> attr_list('spam spam', kwargs={'eggs': 'eggs', 'ham': 'ham ham'})
' [label="spam spam" eggs=eggs ham="ham ham"]'
>>> attr_list(kwargs={'spam': None, 'eggs': ''})
' [eggs=""]'
"""
content = a_list(label, kwargs, attributes)
if not content:
return ''
return ' [%s]' % content | python | {
"resource": ""
} |
q32867 | Dot.edge | train | def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs):
"""Create an edge between two nodes.
Args:
tail_name: Start node identifier.
head_name: End node identifier.
label: Caption to be displayed near the edge.
attrs: Any additional edge attributes (must be strings).
"""
tail_name = self._quote_edge(tail_name)
head_name = self._quote_edge(head_name)
attr_list = self._attr_list(label, attrs, _attributes)
line = self._edge % (tail_name, head_name, attr_list)
self.body.append(line) | python | {
"resource": ""
} |
q32868 | Dot.edges | train | def edges(self, tail_head_iter):
"""Create a bunch of edges.
Args:
tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.
"""
edge = self._edge_plain
quote = self._quote_edge
lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)
self.body.extend(lines) | python | {
"resource": ""
} |
q32869 | mkdirs | train | def mkdirs(filename, mode=0o777):
"""Recursively create directories up to the path of ``filename`` as needed."""
dirname = os.path.dirname(filename)
if not dirname:
return
_compat.makedirs(dirname, mode=mode, exist_ok=True) | python | {
"resource": ""
} |
q32870 | mapping_items | train | def mapping_items(mapping, _iteritems=_compat.iteritems):
"""Return an iterator over the ``mapping`` items, sort if it's a plain dict.
>>> list(mapping_items({'spam': 0, 'ham': 1, 'eggs': 2}))
[('eggs', 2), ('ham', 1), ('spam', 0)]
>>> from collections import OrderedDict
>>> list(mapping_items(OrderedDict(enumerate(['spam', 'ham', 'eggs']))))
[(0, 'spam'), (1, 'ham'), (2, 'eggs')]
"""
if type(mapping) is dict:
return iter(sorted(_iteritems(mapping)))
return _iteritems(mapping) | python | {
"resource": ""
} |
q32871 | ServiceCaller.get_adapted_session | train | def get_adapted_session(adapter):
"""
Mounts an adapter capable of communication over HTTP or HTTPS to the supplied session.
:param adapter:
A :class:`requests.adapters.HTTPAdapter` instance
:return:
The adapted :class:`requests.Session` instance
"""
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
return session | python | {
"resource": ""
} |
q32872 | Endpoint.get_formatted_path | train | def get_formatted_path(self, **kwargs):
"""
Format this endpoint's path with the supplied keyword arguments
:return:
The fully-formatted path
:rtype:
str
"""
self._validate_path_placeholders(self.path_placeholders, kwargs)
return self.path.format(**kwargs) | python | {
"resource": ""
} |
q32873 | Endpoint.path_placeholders | train | def path_placeholders(self):
"""
The formattable placeholders from this endpoint's path, in the order they appear.
Example:
>>> endpoint = Endpoint(path='/api/{foo}/{bar}')
>>> endpoint.path_placeholders
['foo', 'bar']
"""
parser = string.Formatter()
return [placeholder_name for _, placeholder_name, _, _ in parser.parse(self.path) if placeholder_name] | python | {
"resource": ""
} |
q32874 | Endpoint.get_merged_params | train | def get_merged_params(self, supplied_params=None):
"""
Merge this endpoint's default parameters with the supplied parameters
:param dict supplied_params:
A dictionary of query parameter, value pairs
:return:
A dictionary of this endpoint's default parameters, merged with the supplied parameters.
Any default parameters which have a value supplied are overridden.
:rtype:
dict
:raises apiron.exceptions.UnfulfilledParameterException:
When a required parameter for this endpoint is not a default param and is not supplied by the caller
"""
supplied_params = supplied_params or {}
empty_params = {
param: supplied_params[param] for param in supplied_params if supplied_params[param] in (None, "")
}
if empty_params:
warnings.warn(
"The {path} endpoint "
"was called with empty parameters: {empty_params}".format(path=self.path, empty_params=empty_params),
RuntimeWarning,
stacklevel=5,
)
unfulfilled_params = {
param for param in self.required_params if param not in supplied_params and param not in self.default_params
}
if unfulfilled_params:
raise UnfulfilledParameterException(self.path, unfulfilled_params)
merged_params = self.default_params.copy()
merged_params.update(supplied_params)
return merged_params | python | {
"resource": ""
} |
q32875 | JsonEndpoint.format_response | train | def format_response(self, response):
"""
Extracts JSON data from the response
:param requests.Response response:
The original response from :mod:`requests`
:return:
The response's JSON content
:rtype:
:class:`dict` if ``preserve_order`` is ``False``
:rtype:
:class:`collections.OrderedDict` if ``preserve_order`` is ``True``
"""
return response.json(object_pairs_hook=collections.OrderedDict if self.preserve_order else None) | python | {
"resource": ""
} |
q32876 | pre_build_check | train | def pre_build_check():
"""
Try to verify build tools
"""
if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
return True
try:
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.dist import Distribution
# base build_ext just to emulate compiler option setup
be = build_ext(Distribution())
be.initialize_options()
be.finalize_options()
# First, make sure we have a Python include directory
have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs)
if not have_python_include:
sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,))
return False
compiler = new_compiler(compiler=be.compiler)
customize_compiler(compiler)
try:
# We must be able to initialize the compiler if it has that method
if hasattr(compiler, "initialize"):
compiler.initialize()
except:
return False
executables = []
if compiler.compiler_type in ('unix', 'cygwin'):
executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')]
elif compiler.compiler_type == 'nt':
executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]
if executables:
from distutils.spawn import find_executable
for exe in executables:
if not find_executable(exe):
sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type))
return False
except Exception as exc:
sys.stderr.write('%s\n' % str(exc))
sys.stderr.write("Failed pre-build check. Attempting anyway.\n")
# if we are unable to positively id the compiler type, or one of these assumptions fails,
# just proceed as we would have without the check
return True | python | {
"resource": ""
} |
q32877 | BatchQuery.add_callback | train | def add_callback(self, fn, *args, **kwargs):
"""Add a function and arguments to be passed to it to be executed after the batch executes.
A batch can support multiple callbacks.
Note, that if the batch does not execute, the callbacks are not executed.
A callback, thus, is an "on batch success" handler.
:param fn: Callable object
:type fn: callable
:param \*args: Positional arguments to be passed to the callback at the time of execution
:param \*\*kwargs: Named arguments to be passed to the callback at the time of execution
"""
if not callable(fn):
raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn)))
self._callbacks.append((fn, args, kwargs)) | python | {
"resource": ""
} |
q32878 | AbstractQuerySet._fill_result_cache | train | def _fill_result_cache(self):
"""
Fill the result cache with all results.
"""
idx = 0
try:
while True:
idx += 1000
self._fill_result_cache_to_idx(idx)
except StopIteration:
pass
self._count = len(self._result_cache) | python | {
"resource": ""
} |
q32879 | AbstractQuerySet.batch | train | def batch(self, batch_obj):
"""
Set a batch object to run the query on.
Note: running a select query with a batch object will raise an exception
"""
if self._connection:
raise CQLEngineException("Cannot specify the connection on model in batch mode.")
if batch_obj is not None and not isinstance(batch_obj, BatchQuery):
raise CQLEngineException('batch_obj must be a BatchQuery instance or None')
clone = copy.deepcopy(self)
clone._batch = batch_obj
return clone | python | {
"resource": ""
} |
q32880 | AbstractQuerySet.count | train | def count(self):
"""
Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*
"""
if self._batch:
raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode")
if self._count is None:
query = self._select_query()
query.count = True
result = self._execute(query)
count_row = result.one().popitem()
self._count = count_row[1]
return self._count | python | {
"resource": ""
} |
q32881 | AbstractQuerySet.distinct | train | def distinct(self, distinct_fields=None):
"""
Returns the DISTINCT rows matched by this query.
distinct_fields default to the partition key fields if not specified.
*Note: distinct_fields must be a partition key or a static column*
.. code-block:: python
class Automobile(Model):
manufacturer = columns.Text(partition_key=True)
year = columns.Integer(primary_key=True)
model = columns.Text(primary_key=True)
price = columns.Decimal()
sync_table(Automobile)
# create rows
Automobile.objects.distinct()
# or
Automobile.objects.distinct(['manufacturer'])
"""
clone = copy.deepcopy(self)
if distinct_fields:
clone._distinct_fields = distinct_fields
else:
clone._distinct_fields = [x.column_name for x in self.model._partition_keys.values()]
return clone | python | {
"resource": ""
} |
q32882 | AbstractQuerySet.fetch_size | train | def fetch_size(self, v):
"""
Sets the number of rows that are fetched at a time.
*Note that driver's default fetch size is 5000.*
.. code-block:: python
for user in User.objects().fetch_size(500):
print(user)
"""
if not isinstance(v, six.integer_types):
raise TypeError
if v == self._fetch_size:
return self
if v < 1:
raise QueryException("fetch size less than 1 is not allowed")
clone = copy.deepcopy(self)
clone._fetch_size = v
return clone | python | {
"resource": ""
} |
q32883 | ModelQuerySet.values_list | train | def values_list(self, *fields, **kwargs):
""" Instructs the query set to return tuples, not model instance """
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self.only(fields)
clone._values_list = True
clone._flat_values_list = flat
return clone | python | {
"resource": ""
} |
q32884 | ModelQuerySet.timestamp | train | def timestamp(self, timestamp):
"""
Allows for custom timestamps to be saved with the record.
"""
clone = copy.deepcopy(self)
clone._timestamp = timestamp
return clone | python | {
"resource": ""
} |
q32885 | ModelQuerySet.if_not_exists | train | def if_not_exists(self):
"""
Check the existence of an object before insertion.
If the insertion isn't applied, a LWTException is raised.
"""
if self.model._has_counter:
raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing counter columns')
clone = copy.deepcopy(self)
clone._if_not_exists = True
return clone | python | {
"resource": ""
} |
q32886 | ModelQuerySet.if_exists | train | def if_exists(self):
"""
Check the existence of an object before an update or delete.
If the update or delete isn't applied, a LWTException is raised.
"""
if self.model._has_counter:
raise IfExistsWithCounterColumn('if_exists cannot be used with tables containing counter columns')
clone = copy.deepcopy(self)
clone._if_exists = True
return clone | python | {
"resource": ""
} |
q32887 | BatchStatement.clear | train | def clear(self):
"""
This is a convenience method to clear a batch statement for reuse.
*Note:* it should not be used concurrently with uncompleted execution futures executing the same
``BatchStatement``.
"""
del self._statements_and_parameters[:]
self.keyspace = None
self.routing_key = None
if self.custom_payload:
self.custom_payload.clear() | python | {
"resource": ""
} |
q32888 | BaseUserType.type_name | train | def type_name(cls):
"""
Returns the type name if it's been defined
otherwise, it creates it from the class name
"""
if cls.__type_name__:
type_name = cls.__type_name__.lower()
else:
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2)), s)
type_name = ccase(cls.__name__)
# trim to less than 48 characters or cassandra will complain
type_name = type_name[-48:]
type_name = type_name.lower()
type_name = re.sub(r'^_+', '', type_name)
cls.__type_name__ = type_name
return type_name | python | {
"resource": ""
} |
q32889 | BaseValueManager.changed | train | def changed(self):
"""
Indicates whether or not this value has changed.
:rtype: boolean
"""
if self.explicit:
return self.value != self.previous_value
if isinstance(self.column, BaseContainerColumn):
default_value = self.column.get_default()
if self.column._val_is_null(default_value):
return not self.column._val_is_null(self.value) and self.value != self.previous_value
elif self.previous_value is None:
return self.value != default_value
return self.value != self.previous_value
return False | python | {
"resource": ""
} |
q32890 | Ascii.validate | train | def validate(self, value):
""" Only allow ASCII and None values.
Check against US-ASCII, a.k.a. 7-bit ASCII, a.k.a. ISO646-US, a.k.a.
the Basic Latin block of the Unicode character set.
Source: https://github.com/apache/cassandra/blob
/3dcbe90e02440e6ee534f643c7603d50ca08482b/src/java/org/apache/cassandra
/serializers/AsciiSerializer.java#L29
"""
value = super(Ascii, self).validate(value)
if value:
charset = value if isinstance(
value, (bytearray, )) else map(ord, value)
if not set(range(128)).issuperset(charset):
raise ValidationError(
'{!r} is not an ASCII string.'.format(value))
return value | python | {
"resource": ""
} |
q32891 | Boolean.validate | train | def validate(self, value):
""" Always returns a Python boolean. """
value = super(Boolean, self).validate(value)
if value is not None:
value = bool(value)
return value | python | {
"resource": ""
} |
q32892 | _get_context | train | def _get_context(keyspaces, connections):
"""Return all the execution contexts"""
if keyspaces:
if not isinstance(keyspaces, (list, tuple)):
raise ValueError('keyspaces must be a list or a tuple.')
if connections:
if not isinstance(connections, (list, tuple)):
raise ValueError('connections must be a list or a tuple.')
keyspaces = keyspaces if keyspaces else [None]
connections = connections if connections else [None]
return product(connections, keyspaces) | python | {
"resource": ""
} |
q32893 | create_keyspace_simple | train | def create_keyspace_simple(name, replication_factor, durable_writes=True, connections=None):
"""
Creates a keyspace with SimpleStrategy for replica placement
If the keyspace already exists, it will not be modified.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
:param str name: name of keyspace to create
:param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy`
:param bool durable_writes: Write log is bypassed if set to False
:param list connections: List of connection names
"""
_create_keyspace(name, durable_writes, 'SimpleStrategy',
{'replication_factor': replication_factor}, connections=connections) | python | {
"resource": ""
} |
q32894 | create_keyspace_network_topology | train | def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True, connections=None):
"""
Creates a keyspace with NetworkTopologyStrategy for replica placement
If the keyspace already exists, it will not be modified.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
:param str name: name of keyspace to create
:param dict dc_replication_map: map of dc_names: replication_factor
:param bool durable_writes: Write log is bypassed if set to False
:param list connections: List of connection names
"""
_create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map, connections=connections) | python | {
"resource": ""
} |
q32895 | drop_keyspace | train | def drop_keyspace(name, connections=None):
"""
Drops a keyspace, if it exists.
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
:param str name: name of keyspace to drop
:param list connections: List of connection names
"""
if not _allow_schema_modification():
return
if connections:
if not isinstance(connections, (list, tuple)):
raise ValueError('Connections must be a list or a tuple.')
def _drop_keyspace(name, connection=None):
cluster = get_cluster(connection)
if name in cluster.metadata.keyspaces:
execute("DROP KEYSPACE {0}".format(metadata.protect_name(name)), connection=connection)
if connections:
for connection in connections:
_drop_keyspace(name, connection)
else:
_drop_keyspace(name) | python | {
"resource": ""
} |
q32896 | _get_index_name_by_column | train | def _get_index_name_by_column(table, column_name):
"""
Find the index name for a given table and column.
"""
protected_name = metadata.protect_name(column_name)
possible_index_values = [protected_name, "values(%s)" % protected_name]
for index_metadata in table.indexes.values():
options = dict(index_metadata.index_options)
if options.get('target') in possible_index_values:
return index_metadata.name | python | {
"resource": ""
} |
q32897 | _update_options | train | def _update_options(model, connection=None):
"""Updates the table options for the given model if necessary.
:param model: The model to update.
:param connection: Name of the connection to use
:return: `True`, if the options were modified in Cassandra,
`False` otherwise.
:rtype: bool
"""
ks_name = model._get_keyspace()
msg = format_log_context("Checking %s for option differences", keyspace=ks_name, connection=connection)
log.debug(msg, model)
model_options = model.__options__ or {}
table_meta = _get_table_metadata(model, connection=connection)
# go to CQL string first to normalize meta from different versions
existing_option_strings = set(table_meta._make_option_strings(table_meta.options))
existing_options = _options_map_from_strings(existing_option_strings)
model_option_strings = metadata.TableMetadataV3._make_option_strings(model_options)
model_options = _options_map_from_strings(model_option_strings)
update_options = {}
for name, value in model_options.items():
try:
existing_value = existing_options[name]
except KeyError:
msg = format_log_context("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection)
raise KeyError(msg % (name, existing_options.keys()))
if isinstance(existing_value, six.string_types):
if value != existing_value:
update_options[name] = value
else:
try:
for k, v in value.items():
if existing_value[k] != v:
update_options[name] = value
break
except KeyError:
update_options[name] = value
if update_options:
options = ' AND '.join(metadata.TableMetadataV3._make_option_strings(update_options))
query = "ALTER TABLE {0} WITH {1}".format(model.column_family_name(), options)
execute(query, connection=connection)
return True
return False | python | {
"resource": ""
} |
q32898 | drop_table | train | def drop_table(model, keyspaces=None, connections=None):
"""
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
"""
context = _get_context(keyspaces, connections)
for connection, keyspace in context:
with query.ContextQuery(model, keyspace=keyspace) as m:
_drop_table(m, connection=connection) | python | {
"resource": ""
} |
q32899 | TwistedConnectionProtocol.dataReceived | train | def dataReceived(self, data):
"""
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
"""
self.connection._iobuf.write(data)
self.connection.handle_read() | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.