INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Get Gnosis Safe Master contract. It should be used to access Safe methods on Proxy contracts.: param w3: Web3 instance: param address: address of the safe contract/ proxy contract: return: Safe Contract
def get_safe_contract(w3: Web3, address=None): """ Get Gnosis Safe Master contract. It should be used to access Safe methods on Proxy contracts. :param w3: Web3 instance :param address: address of the safe contract/proxy contract :return: Safe Contract """ return w3.eth.contract(address, abi=GNOSIS_SAFE_INTERFACE['abi'], bytecode=GNOSIS_SAFE_INTERFACE['bytecode'])
Get Old Gnosis Safe Master contract. It should be used to access Safe methods on Proxy contracts.: param w3: Web3 instance: param address: address of the safe contract/ proxy contract: return: Safe Contract
def get_old_safe_contract(w3: Web3, address=None): """ Get Old Gnosis Safe Master contract. It should be used to access Safe methods on Proxy contracts. :param w3: Web3 instance :param address: address of the safe contract/proxy contract :return: Safe Contract """ return w3.eth.contract(address, abi=OLD_GNOSIS_SAFE_INTERFACE['abi'], bytecode=OLD_GNOSIS_SAFE_INTERFACE['bytecode'])
Get Paying Proxy Contract. This should be used just for contract creation/ changing master_copy If you want to call Safe methods you should use get_safe_contract with the Proxy address so you can access every method of the Safe: param w3: Web3 instance: param address: address of the proxy contract: return: Paying Proxy Contract
def get_paying_proxy_contract(w3: Web3, address=None): """ Get Paying Proxy Contract. This should be used just for contract creation/changing master_copy If you want to call Safe methods you should use `get_safe_contract` with the Proxy address, so you can access every method of the Safe :param w3: Web3 instance :param address: address of the proxy contract :return: Paying Proxy Contract """ return w3.eth.contract(address, abi=PAYING_PROXY_INTERFACE['abi'], bytecode=PAYING_PROXY_INTERFACE['bytecode'])
Get ERC20 interface: param w3: Web3 instance: param address: address of the proxy contract: return: ERC 20 contract
def get_erc20_contract(w3: Web3, address=None): """ Get ERC20 interface :param w3: Web3 instance :param address: address of the proxy contract :return: ERC 20 contract """ return w3.eth.contract(address, abi=ERC20_INTERFACE['abi'], bytecode=ERC20_INTERFACE['bytecode'])
: param signatures: signatures in form of { bytes32 r } { bytes32 s } { uint8 v }: param pos: position of the signature: return: Tuple with v r s
def signature_split(signatures: bytes, pos: int) -> Tuple[int, int, int]: """ :param signatures: signatures in form of {bytes32 r}{bytes32 s}{uint8 v} :param pos: position of the signature :return: Tuple with v, r, s """ signature_pos = 65 * pos v = signatures[64 + signature_pos] r = int.from_bytes(signatures[signature_pos:32 + signature_pos], 'big') s = int.from_bytes(signatures[32 + signature_pos:64 + signature_pos], 'big') return v, r, s
Convert signature to bytes: param vrs: tuple of v r s: return: signature in form of { bytes32 r } { bytes32 s } { uint8 v }
def signature_to_bytes(vrs: Tuple[int, int, int]) -> bytes: """ Convert signature to bytes :param vrs: tuple of v, r, s :return: signature in form of {bytes32 r}{bytes32 s}{uint8 v} """ byte_order = 'big' v, r, s = vrs return (r.to_bytes(32, byteorder=byte_order) + s.to_bytes(32, byteorder=byte_order) + v.to_bytes(1, byteorder=byte_order))
Convert signatures to bytes: param signatures: list of tuples ( v r s ): return: 65 bytes per signature
def signatures_to_bytes(signatures: List[Tuple[int, int, int]]) -> bytes: """ Convert signatures to bytes :param signatures: list of tuples(v, r, s) :return: 65 bytes per signature """ return b''.join([signature_to_bytes(vrs) for vrs in signatures])
Find v and r valid values for a given s: param s: random value: return: v r
def find_valid_random_signature(s: int) -> Tuple[int, int]: """ Find v and r valid values for a given s :param s: random value :return: v, r """ for _ in range(10000): r = int(os.urandom(31).hex(), 16) v = (r % 2) + 27 if r < secpk1n: tx = Transaction(0, 1, 21000, b'', 0, b'', v=v, r=r, s=s) try: tx.sender return v, r except (InvalidTransaction, ValueError): logger.debug('Cannot find signature with v=%d r=%d s=%d', v, r, s) raise ValueError('Valid signature not found with s=%d', s)
: param master_copy: Master Copy of Gnosis Safe already deployed: param initializer: Data initializer to send to GnosisSafe setup method: param funder: Address that should get the payment ( if payment set ): param payment_token: Address if a token is used. If not set 0x0 will be ether: param payment: Payment: return: Transaction dictionary
def _build_proxy_contract_creation_constructor(self, master_copy: str, initializer: bytes, funder: str, payment_token: str, payment: int) -> ContractConstructor: """ :param master_copy: Master Copy of Gnosis Safe already deployed :param initializer: Data initializer to send to GnosisSafe setup method :param funder: Address that should get the payment (if payment set) :param payment_token: Address if a token is used. If not set, 0x0 will be ether :param payment: Payment :return: Transaction dictionary """ if not funder or funder == NULL_ADDRESS: funder = NULL_ADDRESS payment = 0 return get_paying_proxy_contract(self.w3).constructor( master_copy, initializer, funder, payment_token, payment)
: param master_copy: Master Copy of Gnosis Safe already deployed: param initializer: Data initializer to send to GnosisSafe setup method: param funder: Address that should get the payment ( if payment set ): param payment_token: Address if a token is used. If not set 0x0 will be ether: param payment: Payment: return: Transaction dictionary
def _build_proxy_contract_creation_tx(self, master_copy: str, initializer: bytes, funder: str, payment_token: str, payment: int, gas: int, gas_price: int, nonce: int=0): """ :param master_copy: Master Copy of Gnosis Safe already deployed :param initializer: Data initializer to send to GnosisSafe setup method :param funder: Address that should get the payment (if payment set) :param payment_token: Address if a token is used. If not set, 0x0 will be ether :param payment: Payment :return: Transaction dictionary """ return self._build_proxy_contract_creation_constructor( master_copy, initializer, funder, payment_token, payment ).buildTransaction({ 'gas': gas, 'gasPrice': gas_price, 'nonce': nonce, })
Use pyethereum Transaction to generate valid tx using a random signature: param tx_dict: Web3 tx dictionary: param s: Signature s value: return: PyEthereum creation tx for the proxy contract
def _build_contract_creation_tx_with_valid_signature(self, tx_dict: Dict[str, None], s: int) -> Transaction: """ Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract """ zero_address = HexBytes('0x' + '0' * 40) f_address = HexBytes('0x' + 'f' * 40) nonce = tx_dict['nonce'] gas_price = tx_dict['gasPrice'] gas = tx_dict['gas'] to = tx_dict.get('to', b'') # Contract creation should always have `to` empty value = tx_dict['value'] data = tx_dict['data'] for _ in range(100): try: v, r = self.find_valid_random_signature(s) contract_creation_tx = Transaction(nonce, gas_price, gas, to, value, HexBytes(data), v=v, r=r, s=s) sender_address = contract_creation_tx.sender contract_address = contract_creation_tx.creates if sender_address in (zero_address, f_address) or contract_address in (zero_address, f_address): raise InvalidTransaction return contract_creation_tx except InvalidTransaction: pass raise ValueError('Valid signature not found with s=%d', s)
Gas estimation done using web3 and calling the node Payment cannot be estimated as no ether is in the address. So we add some gas later.: param master_copy: Master Copy of Gnosis Safe already deployed: param initializer: Data initializer to send to GnosisSafe setup method: param funder: Address that should get the payment ( if payment set ): param payment_token: Address if a token is used. If not set 0x0 will be ether: return: Total gas estimation
def _estimate_gas(self, master_copy: str, initializer: bytes, funder: str, payment_token: str) -> int: """ Gas estimation done using web3 and calling the node Payment cannot be estimated, as no ether is in the address. So we add some gas later. :param master_copy: Master Copy of Gnosis Safe already deployed :param initializer: Data initializer to send to GnosisSafe setup method :param funder: Address that should get the payment (if payment set) :param payment_token: Address if a token is used. If not set, 0x0 will be ether :return: Total gas estimation """ # Estimate the contract deployment. We cannot estimate the refunding, as the safe address has not any fund gas: int = self._build_proxy_contract_creation_constructor( master_copy, initializer, funder, payment_token, 0).estimateGas() # We estimate the refund as a new tx if payment_token == NULL_ADDRESS: # Same cost to send 1 ether than 1000 gas += self.w3.eth.estimateGas({'to': funder, 'value': 1}) else: # Top should be around 52000 when storage is needed (funder no previous owner of token), # we use value 1 as we are simulating an internal call, and in that calls you don't pay for the data. # If it was a new tx sending 5000 tokens would be more expensive than sending 1 because of data costs try: gas += get_erc20_contract(self.w3, payment_token).functions.transfer(funder, 1).estimateGas({'from': payment_token}) except ValueError as exc: raise InvalidERC20Token from exc return gas
Signed transaction that compatible with w3. eth. sendRawTransaction Is not used because pyEthereum implementation of Transaction was found to be more robust regarding invalid signatures
def _sign_web3_transaction(tx: Dict[str, any], v: int, r: int, s: int) -> (bytes, HexBytes): """ Signed transaction that compatible with `w3.eth.sendRawTransaction` Is not used because `pyEthereum` implementation of Transaction was found to be more robust regarding invalid signatures """ unsigned_transaction = serializable_unsigned_transaction_from_dict(tx) rlp_encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s)) # To get the address signing, just do ecrecover_to_pub(unsigned_transaction.hash(), v, r, s) return rlp_encoded_transaction, unsigned_transaction.hash()
Check if proxy is valid: param address: address of the proxy: return: True if proxy is valid False otherwise
def check_proxy_code(self, address) -> bool: """ Check if proxy is valid :param address: address of the proxy :return: True if proxy is valid, False otherwise """ deployed_proxy_code = self.w3.eth.getCode(address) proxy_code_fns = (get_paying_proxy_deployed_bytecode, get_proxy_factory_contract(self.w3, self.proxy_factory_address).functions.proxyRuntimeCode().call) for proxy_code_fn in proxy_code_fns: if deployed_proxy_code == proxy_code_fn(): return True return False
Check safe has enough funds to pay for a tx: param safe_address: Address of the safe: param safe_tx_gas: Start gas: param data_gas: Data gas: param gas_price: Gas Price: param gas_token: Gas Token to use token instead of ether for the gas: return: True if enough funds False otherwise
def check_funds_for_tx_gas(self, safe_address: str, safe_tx_gas: int, data_gas: int, gas_price: int, gas_token: str) -> bool: """ Check safe has enough funds to pay for a tx :param safe_address: Address of the safe :param safe_tx_gas: Start gas :param data_gas: Data gas :param gas_price: Gas Price :param gas_token: Gas Token, to use token instead of ether for the gas :return: True if enough funds, False, otherwise """ if gas_token == NULL_ADDRESS: balance = self.ethereum_client.get_balance(safe_address) else: balance = self.ethereum_client.erc20.get_balance(safe_address, gas_token) return balance >= (safe_tx_gas + data_gas) * gas_price
Deploy master contract. Takes deployer_account ( if unlocked in the node ) or the deployer private key: param deployer_account: Unlocked ethereum account: param deployer_private_key: Private key of an ethereum account: return: deployed contract address
def deploy_master_contract(self, deployer_account=None, deployer_private_key=None) -> str: """ Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_contract = self.get_contract() tx = safe_contract.constructor().buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status contract_address = tx_receipt.contractAddress # Init master copy master_safe = self.get_contract(contract_address) tx = master_safe.functions.setup( # We use 2 owners that nobody controls for the master copy ["0x0000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000003"], 2, # Threshold. Maximum security NULL_ADDRESS, # Address for optional DELEGATE CALL b'', # Data for optional DELEGATE CALL NULL_ADDRESS, # Payment token 0, # Payment NULL_ADDRESS # Refund receiver ).buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status logger.info("Deployed and initialized Safe Master Contract=%s by %s", contract_address, deployer_address) return contract_address
Deploy proxy contract. Takes deployer_account ( if unlocked in the node ) or the deployer private key: param initializer: Initializer: param deployer_account: Unlocked ethereum account: param deployer_private_key: Private key of an ethereum account: return: deployed contract address
def deploy_paying_proxy_contract(self, initializer=b'', deployer_account=None, deployer_private_key=None) -> str: """ Deploy proxy contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param initializer: Initializer :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_proxy_contract = get_paying_proxy_contract(self.w3) tx = safe_proxy_contract.constructor(self.master_copy_address, initializer, NULL_ADDRESS, NULL_ADDRESS, 0).buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status return tx_receipt.contractAddress
Deploy proxy contract using the Proxy Factory Contract. Takes deployer_account ( if unlocked in the node ) or the deployer private key: param initializer: Initializer: param deployer_account: Unlocked ethereum account: param deployer_private_key: Private key of an ethereum account: return: deployed contract address
def deploy_proxy_contract(self, initializer=b'', deployer_account=None, deployer_private_key=None) -> str: """ Deploy proxy contract using the `Proxy Factory Contract`. Takes deployer_account (if unlocked in the node) or the deployer private key :param initializer: Initializer :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) proxy_factory_contract = get_proxy_factory_contract(self.w3, self.proxy_factory_address) create_proxy_fn = proxy_factory_contract.functions.createProxy(self.master_copy_address, initializer) contract_address = create_proxy_fn.call() tx = create_proxy_fn.buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=120) assert tx_receipt.status return contract_address
Deploy proxy contract using create2 withthe Proxy Factory Contract. Takes deployer_account ( if unlocked in the node ) or the deployer_private_key: param salt_nonce: Uint256 for create2 salt: param initializer: Data for safe creation: param gas: Gas: param gas_price: Gas Price: param deployer_private_key: Private key of an ethereum account: return: Tuple ( tx - hash tx deployed contract address )
def deploy_proxy_contract_with_nonce(self, salt_nonce: int, initializer: bytes, gas: int, gas_price: int, deployer_private_key=None) -> Tuple[bytes, Dict[str, any], str]: """ Deploy proxy contract using `create2` withthe `Proxy Factory Contract`. Takes `deployer_account` (if unlocked in the node) or the `deployer_private_key` :param salt_nonce: Uint256 for `create2` salt :param initializer: Data for safe creation :param gas: Gas :param gas_price: Gas Price :param deployer_private_key: Private key of an ethereum account :return: Tuple(tx-hash, tx, deployed contract address) """ assert deployer_private_key proxy_factory_contract = get_proxy_factory_contract(self.w3, self.proxy_factory_address) create_proxy_fn = proxy_factory_contract.functions.createProxyWithNonce(self.master_copy_address, initializer, salt_nonce) contract_address = create_proxy_fn.call() deployer_account = Account.privateKeyToAccount(deployer_private_key) nonce = self.ethereum_client.get_nonce_for_account(deployer_account.address, 'pending') # Auto estimation of gas does not work. We use a little more gas just in case tx = create_proxy_fn.buildTransaction({'from': deployer_account.address, 'gasPrice': gas_price, 'nonce': nonce, 'gas': gas + 50000}) signed_tx = deployer_account.signTransaction(tx) tx_hash = self.ethereum_client.send_raw_transaction(signed_tx.rawTransaction) return tx_hash, tx, contract_address
Deploy proxy factory contract. Takes deployer_account ( if unlocked in the node ) or the deployer private key: param deployer_account: Unlocked ethereum account: param deployer_private_key: Private key of an ethereum account: return: deployed contract address
def deploy_proxy_factory_contract(self, deployer_account=None, deployer_private_key=None) -> str: """ Deploy proxy factory contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) proxy_factory_contract = get_proxy_factory_contract(self.w3) tx = proxy_factory_contract.constructor().buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=120) assert tx_receipt.status contract_address = tx_receipt.contractAddress logger.info("Deployed and initialized Proxy Factory Contract=%s by %s", contract_address, deployer_address) return contract_address
Estimate tx gas using safe requiredTxGas method: return: int: Estimated gas: raises: CannotEstimateGas: If gas cannot be estimated: raises: ValueError: Cannot decode received data
def estimate_tx_gas_with_safe(self, safe_address: str, to: str, value: int, data: bytes, operation: int, block_identifier='pending') -> int: """ Estimate tx gas using safe `requiredTxGas` method :return: int: Estimated gas :raises: CannotEstimateGas: If gas cannot be estimated :raises: ValueError: Cannot decode received data """ data = data or b'' def parse_revert_data(result: bytes) -> int: # 4 bytes - error method id # 32 bytes - position # 32 bytes - length # Last 32 bytes - value of revert (if everything went right) gas_estimation_offset = 4 + 32 + 32 estimated_gas = result[gas_estimation_offset:] # Estimated gas must be 32 bytes if len(estimated_gas) != 32: logger.warning('Safe=%s Problem estimating gas, returned value is %s for tx=%s', safe_address, result.hex(), tx) raise CannotEstimateGas('Received %s for tx=%s' % (result.hex(), tx)) return int(estimated_gas.hex(), 16) # Add 10k, else we will fail in case of nested calls try: tx = self.get_contract(safe_address).functions.requiredTxGas( to, value, data, operation ).buildTransaction({ 'from': safe_address, 'gas': int(1e7), 'gasPrice': 0, }) # If we build the tx web3 will not try to decode it for us # Ganache 6.3.0 and Geth are working like this result: HexBytes = self.w3.eth.call(tx, block_identifier=block_identifier) return parse_revert_data(result) except ValueError as exc: # Parity """ Parity throws a ValueError, e.g. {'code': -32015, 'message': 'VM execution error.', 'data': 'Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000 000000000000000000000000000000000000000000000002c4d6574686f642063616e206f6e6c792062652063616c6c656 42066726f6d207468697320636f6e74726163740000000000000000000000000000000000000000'} """ error_dict = exc.args[0] data = error_dict.get('data') if not data: raise exc elif isinstance(data, str) and 'Reverted ' in data: # Parity result = HexBytes(data.replace('Reverted ', '')) return parse_revert_data(result) key = list(data.keys())[0] result = data[key]['return'] if result == '0x0': raise exc else: # Ganache-Cli with no `--noVMErrorsOnRPCResponse` flag enabled logger.warning('You should use `--noVMErrorsOnRPCResponse` flag with Ganache-cli') estimated_gas_hex = result[138:] assert len(estimated_gas_hex) == 64 estimated_gas = int(estimated_gas_hex, 16) return estimated_gas
Estimate tx gas using web3
def estimate_tx_gas_with_web3(self, safe_address: str, to: str, value: int, data: bytes) -> int: """ Estimate tx gas using web3 """ return self.ethereum_client.estimate_gas(safe_address, to, value, data, block_identifier='pending')
Estimate tx gas. Use the max of calculation using safe method and web3 if operation == CALL or use just the safe calculation otherwise
def estimate_tx_gas(self, safe_address: str, to: str, value: int, data: bytes, operation: int) -> int: """ Estimate tx gas. Use the max of calculation using safe method and web3 if operation == CALL or use just the safe calculation otherwise """ # Costs to route through the proxy and nested calls proxy_gas = 1000 # https://github.com/ethereum/solidity/blob/dfe3193c7382c80f1814247a162663a97c3f5e67/libsolidity/codegen/ExpressionCompiler.cpp#L1764 # This was `false` before solc 0.4.21 -> `m_context.evmVersion().canOverchargeGasForCall()` # So gas needed by caller will be around 35k old_call_gas = 35000 safe_gas_estimation = (self.estimate_tx_gas_with_safe(safe_address, to, value, data, operation) + proxy_gas + old_call_gas) # We cannot estimate DELEGATECALL (different storage) if SafeOperation(operation) == SafeOperation.CALL: try: web3_gas_estimation = (self.estimate_tx_gas_with_web3(safe_address, to, value, data) + proxy_gas + old_call_gas) except ValueError: web3_gas_estimation = 0 return max(safe_gas_estimation, web3_gas_estimation) else: return safe_gas_estimation
Estimates the gas for the verification of the signatures and other safe related tasks before and after executing a transaction. Calculation will be the sum of: - Base cost of 15000 gas - 100 of gas per word of data_bytes - Validate the signatures 5000 * threshold ( ecrecover for ecdsa ~ = 4K gas ): param safe_address: Address of the safe: param data_bytes_length: Length of the data ( in bytes so len ( HexBytes ( 0x12 )) would be 1: return: gas costs per signature * threshold of Safe
def estimate_tx_operational_gas(self, safe_address: str, data_bytes_length: int): """ Estimates the gas for the verification of the signatures and other safe related tasks before and after executing a transaction. Calculation will be the sum of: - Base cost of 15000 gas - 100 of gas per word of `data_bytes` - Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas) :param safe_address: Address of the safe :param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1` :return: gas costs per signature * threshold of Safe """ threshold = self.retrieve_threshold(safe_address) return 15000 + data_bytes_length // 32 * 100 + 5000 * threshold
Send multisig tx to the Safe: param tx_gas: Gas for the external tx. If not ( safe_tx_gas + data_gas ) * 2 will be used: param tx_gas_price: Gas price of the external tx. If not gas_price will be used: return: Tuple ( tx_hash tx ): raises: InvalidMultisigTx: If user tx cannot go through the Safe
def send_multisig_tx(self, safe_address: str, to: str, value: int, data: bytes, operation: int, safe_tx_gas: int, data_gas: int, gas_price: int, gas_token: str, refund_receiver: str, signatures: bytes, tx_sender_private_key: str, tx_gas=None, tx_gas_price=None, block_identifier='pending') -> Tuple[bytes, Dict[str, any]]: """ Send multisig tx to the Safe :param tx_gas: Gas for the external tx. If not, `(safe_tx_gas + data_gas) * 2` will be used :param tx_gas_price: Gas price of the external tx. If not, `gas_price` will be used :return: Tuple(tx_hash, tx) :raises: InvalidMultisigTx: If user tx cannot go through the Safe """ safe_tx = self.build_multisig_tx(safe_address, to, value, data, operation, safe_tx_gas, data_gas, gas_price, gas_token, refund_receiver, signatures) tx_sender_address = Account.privateKeyToAccount(tx_sender_private_key).address safe_tx.call(tx_sender_address=tx_sender_address) return safe_tx.execute(tx_sender_private_key=tx_sender_private_key, tx_gas=tx_gas, tx_gas_price=tx_gas_price, block_identifier=block_identifier)
Prepare Safe creation: param owners: Owners of the Safe: param threshold: Minimum number of users required to operate the Safe: param salt_nonce: Web3 instance: param gas_price: Gas Price: param payment_receiver: Address to refund when the Safe is created. Address ( 0 ) if no need to refund: param payment_token: Payment token instead of paying the funder with ether. If None Ether will be used: param payment_token_eth_value: Value of payment token per 1 Ether: param fixed_creation_cost: Fixed creation cost of Safe ( Wei )
def build(self, owners: List[str], threshold: int, salt_nonce: int, gas_price: int, payment_receiver: Optional[str] = None, payment_token: Optional[str] = None, payment_token_eth_value: float = 1.0, fixed_creation_cost: Optional[int] = None): """ Prepare Safe creation :param owners: Owners of the Safe :param threshold: Minimum number of users required to operate the Safe :param salt_nonce: Web3 instance :param gas_price: Gas Price :param payment_receiver: Address to refund when the Safe is created. Address(0) if no need to refund :param payment_token: Payment token instead of paying the funder with ether. If None Ether will be used :param payment_token_eth_value: Value of payment token per 1 Ether :param fixed_creation_cost: Fixed creation cost of Safe (Wei) """ assert 0 < threshold <= len(owners) payment_receiver = payment_receiver or NULL_ADDRESS payment_token = payment_token or NULL_ADDRESS assert Web3.isChecksumAddress(payment_receiver) assert Web3.isChecksumAddress(payment_token) # Get bytes for `setup(address[] calldata _owners, uint256 _threshold, address to, bytes calldata data, # address paymentToken, uint256 payment, address payable paymentReceiver)` # This initializer will be passed to the ProxyFactory to be called right after proxy is deployed # We use `payment=0` as safe has no ether yet and estimation will fail safe_setup_data: bytes = self._get_initial_setup_safe_data(owners, threshold, payment_token=payment_token, payment_receiver=payment_receiver) magic_gas: int = self._calculate_gas(owners, safe_setup_data, payment_token) estimated_gas: int = self._estimate_gas(safe_setup_data, salt_nonce, payment_token, payment_receiver) logger.debug('Magic gas %d - Estimated gas %d' % (magic_gas, estimated_gas)) gas = max(magic_gas, estimated_gas) # Payment will be safe deploy cost payment = self._calculate_refund_payment(gas, gas_price, fixed_creation_cost, payment_token_eth_value) # Now we have a estimate for `payment` so we get initialization data again safe_setup_data: bytes = self._get_initial_setup_safe_data(owners, threshold, payment_token=payment_token, payment=payment, payment_receiver=payment_receiver) safe_address = self.calculate_create2_address(safe_setup_data, salt_nonce) assert int(safe_address, 16), 'Calculated Safe address cannot be the NULL ADDRESS' return SafeCreate2Tx(salt_nonce, owners, threshold, self.master_copy_address, self.proxy_factory_address, payment_receiver, payment_token, payment, gas, gas_price, payment_token_eth_value, fixed_creation_cost, safe_address, safe_setup_data)
Calculate gas manually based on tests of previosly deployed safes: param owners: Safe owners: param safe_setup_data: Data for proxy setup: param payment_token: If payment token we will need more gas to transfer and maybe storage if first time: return: total gas needed for deployment
def _calculate_gas(owners: List[str], safe_setup_data: bytes, payment_token: str) -> int: """ Calculate gas manually, based on tests of previosly deployed safes :param owners: Safe owners :param safe_setup_data: Data for proxy setup :param payment_token: If payment token, we will need more gas to transfer and maybe storage if first time :return: total gas needed for deployment """ base_gas = 205000 # Transaction base gas # If we already have the token, we don't have to pay for storage, so it will be just 5K instead of 20K. # The other 1K is for overhead of making the call if payment_token != NULL_ADDRESS: payment_token_gas = 55000 else: payment_token_gas = 0 data_gas = 68 * len(safe_setup_data) # Data gas gas_per_owner = 20000 # Magic number calculated by testing and averaging owners return base_gas + data_gas + payment_token_gas + len(owners) * gas_per_owner
Gas estimation done using web3 and calling the node Payment cannot be estimated as no ether is in the address. So we add some gas later.: param initializer: Data initializer to send to GnosisSafe setup method: param salt_nonce: Nonce that will be used to generate the salt to calculate the address of the new proxy contract.: return: Total gas estimation
def _estimate_gas(self, initializer: bytes, salt_nonce: int, payment_token: str, payment_receiver: str) -> int: """ Gas estimation done using web3 and calling the node Payment cannot be estimated, as no ether is in the address. So we add some gas later. :param initializer: Data initializer to send to GnosisSafe setup method :param salt_nonce: Nonce that will be used to generate the salt to calculate the address of the new proxy contract. :return: Total gas estimation """ # Estimate the contract deployment. We cannot estimate the refunding, as the safe address has not any fund gas: int = self.proxy_factory_contract.functions.createProxyWithNonce(self.master_copy_address, initializer, salt_nonce).estimateGas() # It's not very relevant if is 1 or 9999 payment: int = 1 # We estimate the refund as a new tx if payment_token == NULL_ADDRESS: # Same cost to send 1 ether than 1000 gas += self.w3.eth.estimateGas({'to': payment_receiver, 'value': payment}) else: # Top should be around 52000 when storage is needed (funder no previous owner of token), # we use value 1 as we are simulating an internal call, and in that calls you don't pay for the data. # If it was a new tx sending 5000 tokens would be more expensive than sending 1 because of data costs gas += 55000 # try: # gas += get_erc20_contract(self.w3, # payment_token).functions.transfer(payment_receiver, # payment).estimateGas({'from': # payment_token}) # except ValueError as exc: # raise InvalidERC20Token from exc return gas
: return: Web3 contract tx prepared for call transact or buildTransaction
def w3_tx(self): """ :return: Web3 contract tx prepared for `call`, `transact` or `buildTransaction` """ safe_contract = get_safe_contract(self.w3, address=self.safe_address) return safe_contract.functions.execTransaction( self.to, self.value, self.data, self.operation, self.safe_tx_gas, self.data_gas, self.gas_price, self.gas_token, self.refund_receiver, self.signatures)
: param tx_sender_address:: param tx_gas: Force a gas limit: param block_identifier:: return: 1 if everything ok
def call(self, tx_sender_address: Optional[str] = None, tx_gas: Optional[int] = None, block_identifier='pending') -> int: """ :param tx_sender_address: :param tx_gas: Force a gas limit :param block_identifier: :return: `1` if everything ok """ parameters = {} if tx_sender_address: parameters['from'] = tx_sender_address if tx_gas: parameters['gas'] = tx_gas try: success = self.w3_tx.call(parameters, block_identifier=block_identifier) if not success: raise InvalidInternalTx('Success bit is %d, should be equal to 1' % success) return success except BadFunctionCallOutput as exc: # Geth return self._parse_vm_exception(str(exc)) except ValueError as exc: # Parity """ Parity throws a ValueError, e.g. {'code': -32015, 'message': 'VM execution error.', 'data': 'Reverted 0x08c379a0000000000000000000000000000000000000000000000000000000000000020000000000000000 000000000000000000000000000000000000000000000001b496e76616c6964207369676e6174757265732070726f7669 6465640000000000' } """ error_dict = exc.args[0] data = error_dict.get('data') if not data: raise exc elif isinstance(data, str) and 'Reverted ' in data: # Parity result = HexBytes(data.replace('Reverted ', '')) return self._parse_vm_exception(str(result))
Send multisig tx to the Safe: param tx_sender_private_key: Sender private key: param tx_gas: Gas for the external tx. If not ( safe_tx_gas + data_gas ) * 2 will be used: param tx_gas_price: Gas price of the external tx. If not gas_price will be used: param tx_nonce: Force nonce for tx_sender: param block_identifier: latest or pending: return: Tuple ( tx_hash tx ): raises: InvalidMultisigTx: If user tx cannot go through the Safe
def execute(self, tx_sender_private_key: str, tx_gas: Optional[int] = None, tx_gas_price: Optional[int] = None, tx_nonce: Optional[int] = None, block_identifier='pending') -> Tuple[bytes, Dict[str, any]]: """ Send multisig tx to the Safe :param tx_sender_private_key: Sender private key :param tx_gas: Gas for the external tx. If not, `(safe_tx_gas + data_gas) * 2` will be used :param tx_gas_price: Gas price of the external tx. If not, `gas_price` will be used :param tx_nonce: Force nonce for `tx_sender` :param block_identifier: `latest` or `pending` :return: Tuple(tx_hash, tx) :raises: InvalidMultisigTx: If user tx cannot go through the Safe """ tx_gas_price = tx_gas_price or self.gas_price # Use wrapped tx gas_price if not provided tx_gas = tx_gas or (self.safe_tx_gas + self.data_gas) * 2 tx_sender_address = Account.privateKeyToAccount(tx_sender_private_key).address tx_parameters = { 'from': tx_sender_address, 'gas': tx_gas, 'gasPrice': tx_gas_price, } if tx_nonce is not None: tx_parameters['nonce'] = tx_nonce self.tx = self.w3_tx.buildTransaction(tx_parameters) self.tx_hash = self.ethereum_client.send_unsigned_transaction(self.tx, private_key=tx_sender_private_key, retry=True, block_identifier=block_identifier) return self.tx_hash, self.tx
Appends towrite to the write queue
async def write(self, towrite: bytes, await_blocking=False): """ Appends towrite to the write queue >>> await test.write(b"HELLO") # Returns without wait time >>> await test.write(b"HELLO", await_blocking = True) # Returns when the bufer is flushed :param towrite: Write buffer :param await_blocking: wait for everything to be written """ await self._write(towrite) # Wait for the output buffer to be flushed if requested if await_blocking: return await self.flush()
Reads a given number of bytes
async def read(self, num_bytes=0) -> bytes: """ Reads a given number of bytes :param bytecount: How many bytes to read, leave it at default to read everything that is available :returns: incoming bytes """ if num_bytes < 1: num_bytes = self.in_waiting or 1 return await self._read(num_bytes)
Reads a given number of bytes
async def _read(self, num_bytes) -> bytes: """ Reads a given number of bytes :param num_bytes: How many bytes to read :returns: incoming bytes """ while True: if self.in_waiting < num_bytes: await asyncio.sleep(self._asyncio_sleep_time) else: # Try to read bytes inbytes = self._serial_instance.read(num_bytes) # Just for safety, should never happen if not inbytes: await asyncio.sleep(self._asyncio_sleep_time) else: return inbytes
Reads one line
async def readline(self) -> bytes: """ Reads one line >>> # Keeps waiting for a linefeed incase there is none in the buffer >>> await test.readline() :returns: bytes forming a line """ while True: line = self._serial_instance.readline() if not line: await asyncio.sleep(self._asyncio_sleep_time) else: return line
Verifies and sends message.
def send(self, message): """Verifies and sends message. :param message: Message instance. :param envelope_from: Email address to be used in MAIL FROM command. """ assert message.send_to, "No recipients have been added" if message.has_bad_headers(self.mail.default_sender): raise BadHeaderError if message.date is None: message.date = time.time() sender = message.sender or self.mail.default_sender if self.host: self.host.sendmail(sanitize_address(sender) if sender is not None else None, message.send_to, message.as_string(self.mail.default_sender), message.mail_options, message.rcpt_options) email_dispatched.send(message, mail=self.mail) self.num_emails += 1 if self.num_emails == self.mail.max_emails: self.num_emails = 0 if self.host: self.host.quit() self.host = self.configure_host()
Creates a MIMEText object with the given subtype ( default: plain ) If the text is unicode the utf - 8 charset is used.
def _mimetext(self, text, subtype='plain'): """Creates a MIMEText object with the given subtype (default: 'plain') If the text is unicode, the utf-8 charset is used. """ charset = self.charset or 'utf-8' return MIMEText(text, _subtype=subtype, _charset=charset)
Creates the email
def as_string(self, default_from=None): """Creates the email""" encoding = self.charset or 'utf-8' attachments = self.attachments or [] if len(attachments) == 0 and not self.html: # No html content and zero attachments means plain text msg = self._mimetext(self.body) elif len(attachments) > 0 and not self.html: # No html and at least one attachment means multipart msg = MIMEMultipart() msg.attach(self._mimetext(self.body)) else: # Anything else msg = MIMEMultipart() alternative = MIMEMultipart('alternative') alternative.attach(self._mimetext(self.body, 'plain')) alternative.attach(self._mimetext(self.html, 'html')) msg.attach(alternative) if self.charset: msg['Subject'] = Header(self.subject, encoding) else: msg['Subject'] = self.subject sender = self.sender or default_from if sender is not None: msg['From'] = sanitize_address(sender, encoding) msg['To'] = ', '.join(list(set(sanitize_addresses(self.recipients, encoding)))) msg['Date'] = formatdate(self.date, localtime=True) # see RFC 5322 section 3.6.4. msg['Message-ID'] = self.msgId if self.cc: msg['Cc'] = ', '.join(list(set(sanitize_addresses(self.cc, encoding)))) if self.reply_to: msg['Reply-To'] = sanitize_address(self.reply_to, encoding) if self.extra_headers: for k, v in self.extra_headers.items(): msg[k] = v for attachment in attachments: f = MIMEBase(*attachment.content_type.split('/')) f.set_payload(attachment.data) encode_base64(f) try: attachment.filename and attachment.filename.encode('ascii') except UnicodeEncodeError: filename = attachment.filename if not PY3: filename = filename.encode('utf8') f.add_header('Content-Disposition', attachment.disposition, filename=('UTF8', '', filename)) else: f.add_header('Content-Disposition', '%s;filename=%s' % (attachment.disposition, attachment.filename)) for key, value in attachment.headers: f.add_header(key, value) msg.attach(f) return msg.as_string()
Checks for bad headers i. e. newlines in subject sender or recipients.
def has_bad_headers(self, default_from=None): """Checks for bad headers i.e. newlines in subject, sender or recipients. """ sender = self.sender or default_from reply_to = self.reply_to or '' for val in [self.subject, sender, reply_to] + self.recipients: for c in '\r\n': if c in val: return True return False
Adds an attachment to the message.
def attach(self, filename=None, content_type=None, data=None, disposition=None, headers=None): """Adds an attachment to the message. :param filename: filename of attachment :param content_type: file mimetype :param data: the raw file data :param disposition: content-disposition (if any) """ self.attachments.append( Attachment(filename, content_type, data, disposition, headers))
Records all messages. Use in unit tests for example::
def record_messages(self): """Records all messages. Use in unit tests for example:: with mail.record_messages() as outbox: response = app.test_client.get("/email-sending-view/") assert len(outbox) == 1 assert outbox[0].subject == "testing" You must have blinker installed in order to use this feature. :versionadded: 0.4 """ if not email_dispatched: raise RuntimeError("blinker must be installed") outbox = [] def _record(message, mail): outbox.append(message) email_dispatched.connect(_record) try: yield outbox finally: email_dispatched.disconnect(_record)
Register Services that can be accessed by this DAL. Upon registration the service is set up.
def register_services(self, **services): """ Register Services that can be accessed by this DAL. Upon registration, the service is set up. :param **services: Keyword arguments where the key is the name to register the Service as and the value is the Service. """ for key, service in services.items(): if key in self._services: raise AlreadyExistsException('A Service for {} is already registered.'.format(key)) self._init_service(key, service) return self
: param middleware: Middleware in order of execution
def register_context_middleware(self, *middleware): """ :param middleware: Middleware in order of execution """ for m in middleware: if not is_generator(m): raise Exception('Middleware {} must be a Python generator callable.'.format(m)) self._middleware.extend(middleware)
Load a configuration module and return a Config
def from_module(module_name): """ Load a configuration module and return a Config """ d = importlib.import_module(module_name) config = {} for key in dir(d): if key.isupper(): config[key] = getattr(d, key) return Config(config)
Register resources with the ResourceManager.
def register_resources(self, **resources): """ Register resources with the ResourceManager. """ for key, resource in resources.items(): if key in self._resources: raise AlreadyExistsException('A Service for {} is already registered.'.format(key)) self._init_resource(key, resource)
Raises an exception if value for key is empty.
def require(self, key): """ Raises an exception if value for ``key`` is empty. """ value = self.get(key) if not value: raise ValueError('"{}" is empty.'.format(key)) return value
Setup the context. Should only be called by __enter__ ing the context.
def _setup(self): """ Setup the context. Should only be called by __enter__'ing the context. """ self.data_manager.ctx_stack.push(self) self._setup_hook() middleware = self.data_manager.get_middleware(self) # Create each middleware generator # This just calls each middleware and passes it the current context. # The middleware should then yield once. self._middleware_generators = [ (m, m(self)) for m in middleware ] for middleware, generator in self._middleware_generators: try: generator.next() except StopIteration: # Middleware didn't want to setup, but did not # raise an exception. Why not? raise MiddlewareSetupException('Middleware %s did not yield on setup.' % middleware)
Teardown a Resource or Middleware.
def _exit(self, obj, type, value, traceback): """ Teardown a Resource or Middleware. """ if type is None: # No in-context exception occurred try: obj.next() except StopIteration: # Resource closed as expected return else: raise RuntimeError('{} yielded more than once.'.format(obj)) else: # In-context exception occurred try: obj.throw(type, value, traceback) raise RuntimeError('{} did not close after throw()'.format(obj)) except StopIteration as exc: # Suppress the exception *unless* it's the same exception that # was passed to throw(). This prevents a StopIteration # raised inside the "with" statement from being suppressed return exc is not value except: # only re-raise if it's *not* the exception that was # passed to throw(), because __exit__() must not raise # an exception unless __exit__() itself failed. But # resource.throw() will raise the exception to signal propagation, # so this fixes the impedance mismatch between the throw() protocol # and the __exit__() protocol. # # Middleware or Resources that throw exceptions before yielding # will just rethrow the same exception here which is expected. They # won't have a chance to do anything about the exception though which # seems OK since they never got to the point of being ready anyway. if sys.exc_info()[1] is not value: raise
Hook to setup this service with a specific DataManager.
def setup(self, data_manager): """ Hook to setup this service with a specific DataManager. Will recursively setup sub-services. """ self._data_manager = data_manager if self._data_manager: self._dal = self._data_manager.get_dal() else: self._dal = None for key, service in self._services.items(): service.setup(self._data_manager)
The group index with respect to wavelength.
def ng(self, wavelength): ''' The group index with respect to wavelength. Args: wavelength (float, list, None): The wavelength(s) the group index will be evaluated at. Returns: float, list: The group index at the target wavelength(s). ''' return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength)
The group velocity dispersion ( GVD ) with respect to wavelength.
def gvd(self, wavelength): ''' The group velocity dispersion (GVD) with respect to wavelength. Args: wavelength (float, list, None): The wavelength(s) the GVD will be evaluated at. Returns: float, list: The GVD at the target wavelength(s). ''' g = (wavelength*1.e-9)**3./(2.*spc.pi*spc.c**2.) * self.nDer2(wavelength) return g
Helpful function to evaluate Cauchy equations.
def _cauchy_equation(wavelength, coefficients): ''' Helpful function to evaluate Cauchy equations. Args: wavelength (float, list, None): The wavelength(s) the Cauchy equation will be evaluated at. coefficients (list): A list of the coefficients of the Cauchy equation. Returns: float, list: The refractive index at the target wavelength(s). ''' n = 0. for i, c in enumerate(coefficients): exponent = 2*i n += c / wavelength**exponent return n
Main function
def main(): """ Main function """ bc = BackendUpdate() bc.initialize() logger.info("backend_client, version: %s", __version__) logger.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~") success = False if bc.item_type and bc.action == 'list': success = bc.get_resource_list(bc.item_type, bc.item) if bc.item_type and bc.action == 'get': if bc.list: success = bc.get_resource_list(bc.item_type, bc.item) else: if not bc.item: logger.error("Can not %s a %s with no name!", bc.action, bc.item_type) logger.error("Perharps you missed some parameters, run 'alignak-backend-cli -h'") exit(64) success = bc.get_resource(bc.item_type, bc.item) if bc.action in ['add', 'update']: success = bc.create_update_resource(bc.item_type, bc.item, bc.action == 'update') if bc.action == 'delete': success = bc.delete_resource(bc.item_type, bc.item) if not success: logger.error("%s '%s' %s failed", bc.item_type, bc.item, bc.action) if not bc.verbose: logger.warning("Set verbose mode to have more information (-v)") exit(2) exit(0)
Login on backend with username and password
def initialize(self): # pylint: disable=attribute-defined-outside-init """Login on backend with username and password :return: None """ try: logger.info("Authenticating...") self.backend = Backend(self.backend_url) self.backend.login(self.username, self.password) except BackendException as exp: # pragma: no cover, should never happen logger.exception("Exception: %s", exp) logger.error("Response: %s", exp.response) if self.backend.token is None: print("Access denied!") print("~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Exiting with error code: 1") exit(1) logger.info("Authenticated.") # Logged-in user and default realm users = self.backend.get_all('user', {'where': json.dumps({'name': self.username})}) self.logged_in_user = users['_items'][0] self.default_realm = self.logged_in_user['_realm'] # Main realm self.realm_all = None realms = self.backend.get_all('realm') for r in realms['_items']: if r['name'] == 'All' and r['_level'] == 0: self.realm_all = r['_id'] logger.info("Found realm 'All': %s", self.realm_all) if r['_id'] == self.default_realm: logger.info("Found logged-in user realm: %s", r['name']) # Default timeperiods self.tp_always = None self.tp_never = None timeperiods = self.backend.get_all('timeperiod') for tp in timeperiods['_items']: if tp['name'] == '24x7': self.tp_always = tp['_id'] logger.info("Found TP '24x7': %s", self.tp_always) if tp['name'].lower() == 'none' or tp['name'].lower() == 'never': self.tp_never = tp['_id'] logger.info("Found TP 'Never': %s", self.tp_never)
Dump the data to a JSON formatted file: param data: data to be dumped: param filename: name of the file to use. Only the file name not the full path!: return: dumped file absolute file name
def file_dump(self, data, filename): # pylint: disable=no-self-use """ Dump the data to a JSON formatted file :param data: data to be dumped :param filename: name of the file to use. Only the file name, not the full path! :return: dumped file absolute file name """ dump = json.dumps(data, indent=4, separators=(',', ': '), sort_keys=True) path = os.path.join(self.folder or os.getcwd(), filename) try: dfile = open(path, "wt") dfile.write(dump) dfile.close() return path except (OSError, IndexError) as exp: # pragma: no cover, should never happen logger.exception("Error when writing the list dump file %s : %s", path, str(exp)) return None
Get a specific resource list
def get_resource_list(self, resource_name, name=''): # pylint: disable=too-many-locals, too-many-nested-blocks """Get a specific resource list If name is not None, it may be a request to get the list of the services of an host. """ try: logger.info("Trying to get %s list", resource_name) params = {} if resource_name in ['host', 'service', 'user']: params = {'where': json.dumps({'_is_template': self.model})} if resource_name == 'service' and name and '/' in name: splitted_name = name.split('/') # Get host from name response2 = self.backend.get( 'host', params={'where': json.dumps({'name': splitted_name[0], '_is_template': self.model})}) if response2['_items']: host = response2['_items'][0] logger.info("Got host '%s' for the service '%s'", splitted_name[0], splitted_name[1]) else: logger.warning("Not found host '%s'!", splitted_name[0]) return False params = {'where': json.dumps({'host': host['_id']})} if self.embedded and resource_name in self.embedded_resources: params.update({'embedded': json.dumps(self.embedded_resources[resource_name])}) rsp = self.backend.get_all(resource_name, params=params) if rsp['_items'] and rsp['_status'] == 'OK': response = rsp['_items'] logger.info("-> found %ss", resource_name) # Exists in the backend, we got the element if not self.dry_run: logger.info("-> dumping %ss list", resource_name) for item in response: # Filter fields prefixed with an _ (internal backend fields) for field in list(item): if field in ['_created', '_updated', '_etag', '_links', '_status']: item.pop(field) continue # Filter fields prefixed with an _ in embedded items if self.embedded and resource_name in self.embedded_resources and \ field in self.embedded_resources[resource_name]: # Embedded items may be a list or a simple dictionary, # always make it a list embedded_items = item[field] if not isinstance(item[field], list): embedded_items = [item[field]] # Filter fields in each embedded item for embedded_item in embedded_items: if not embedded_item: continue for embedded_field in list(embedded_item): if embedded_field.startswith('_'): embedded_item.pop(embedded_field) filename = self.file_dump(response, 'alignak-%s-list-%ss.json' % ('model' if self.model else 'object', resource_name)) if filename: logger.info("-> dumped %ss list to %s", resource_name, filename) else: logger.info("Dry-run mode: should have dumped an %s list", resource_name) return True else: logger.warning("-> %s list is empty", resource_name) if not self.dry_run: logger.info("-> dumping %ss list", resource_name) filename = self.file_dump([], 'alignak-%s-list-%ss.json' % ('model' if self.model else 'object', resource_name)) if filename: logger.info("-> dumped %ss list to %s", resource_name, filename) return True except BackendException as exp: # pragma: no cover, should never happen logger.exception("Exception: %s", exp) logger.error("Response: %s", exp.response) print("Get error for '%s' list" % (resource_name)) print("~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Exiting with error code: 5") return False
Get a specific resource by name
def get_resource(self, resource_name, name): # pylint: disable=too-many-locals, too-many-nested-blocks """Get a specific resource by name""" try: logger.info("Trying to get %s: '%s'", resource_name, name) services_list = False if resource_name == 'host' and '/' in name: splitted_name = name.split('/') services_list = True name = splitted_name[0] params = {'where': json.dumps({'name': name})} if resource_name in ['host', 'service', 'user']: params = {'where': json.dumps({'name': name, '_is_template': self.model})} if resource_name == 'service' and '/' in name: splitted_name = name.split('/') # new_name = splitted_name[0] + '_' + splitted_name[1] # name = splitted_name[1] # Get host from name response2 = self.backend.get( 'host', params={'where': json.dumps({'name': splitted_name[0]})}) if response2['_items']: host = response2['_items'][0] logger.info("Got host '%s' for the service '%s'", splitted_name[0], splitted_name[1]) else: logger.warning("Not found host '%s'!", splitted_name[0]) return False params = {'where': json.dumps({'name': splitted_name[1], 'host': host['_id'], '_is_template': self.model})} if self.embedded and resource_name in self.embedded_resources: params.update({'embedded': json.dumps(self.embedded_resources[resource_name])}) response = self.backend.get(resource_name, params=params) if response['_items']: response = response['_items'][0] logger.info("-> found %s '%s': %s", resource_name, name, response['_id']) if services_list: # Get services for the host params = {'where': json.dumps({'host': response['_id']})} if self.embedded and 'service' in self.embedded_resources: params.update( {'embedded': json.dumps(self.embedded_resources['service'])}) response2 = self.backend.get('service', params=params) if response2['_items']: response['_services'] = response2['_items'] logger.info("Got %d services for host '%s'", len(response2['_items']), splitted_name[0]) else: logger.warning("Not found host '%s'!", splitted_name[0]) return False # Exists in the backend, we got the element if not self.dry_run: logger.info("-> dumping %s: %s", resource_name, name) # Filter fields prefixed with an _ (internal backend fields) for field in list(response): if field in ['_created', '_updated', '_etag', '_links', '_status']: response.pop(field) continue # Filter fields prefixed with an _ in embedded items if self.embedded and resource_name in self.embedded_resources and \ field in self.embedded_resources[resource_name]: logger.info("-> embedded %s", field) # Embedded items may be a list or a simple dictionary, # always make it a list embedded_items = response[field] if not isinstance(response[field], list): embedded_items = [response[field]] # Filter fields in each embedded item for embedded_item in embedded_items: if not embedded_item: continue for embedded_field in list(embedded_item): if embedded_field.startswith('_'): embedded_item.pop(embedded_field) dump = json.dumps(response, indent=4, separators=(',', ': '), sort_keys=True) if not self.quiet: print(dump) if resource_name == 'service' and '/' in name: name = splitted_name[0] + '_' + splitted_name[1] filename = self.file_dump(response, 'alignak-object-dump-%s-%s.json' % (resource_name, name)) if filename: logger.info("-> dumped %s '%s' to %s", resource_name, name, filename) logger.info("-> dumped %s: %s", resource_name, name) else: if resource_name == 'service' and '/' in name: name = splitted_name[0] + '_' + splitted_name[1] logger.info("Dry-run mode: should have dumped an %s '%s'", resource_name, name) return True else: logger.warning("-> %s '%s' not found", resource_name, name) return False except BackendException as exp: # pragma: no cover, should never happen logger.exception("Exception: %s", exp) logger.error("Response: %s", exp.response) print("Get error for '%s' : %s" % (resource_name, name)) print("~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Exiting with error code: 5") return False
Delete a specific resource by name
def delete_resource(self, resource_name, name): """Delete a specific resource by name""" try: logger.info("Trying to get %s: '%s'", resource_name, name) if name is None: # No name is defined, delete all the resources... if not self.dry_run: headers = { 'Content-Type': 'application/json' } logger.info("-> deleting all %s", resource_name) self.backend.delete(resource_name, headers) logger.info("-> deleted all %s", resource_name) else: response = {'_id': '_fake', '_etag': '_fake'} logger.info("Dry-run mode: should have deleted all %s", resource_name) else: params = {'where': json.dumps({'name': name})} if resource_name in ['host', 'service', 'user']: params = {'where': json.dumps({'name': name, '_is_template': self.model})} if resource_name == 'service' and '/' in name: splitted_name = name.split('/') name = splitted_name[0] + '_' + splitted_name[1] # Get host from name response2 = self.backend.get( 'host', params={'where': json.dumps({'name': splitted_name[0]})}) if response2['_items']: host = response2['_items'][0] logger.info("Got host '%s' for the service '%s'", splitted_name[0], splitted_name[1]) else: logger.warning("Not found host '%s'!", splitted_name[0]) return False if splitted_name[1] == '*': params = {'where': json.dumps({'host': host['_id']})} else: params = {'where': json.dumps({'name': splitted_name[1], 'host': host['_id']})} response = self.backend.get_all(resource_name, params=params) if response['_items']: logger.info("-> found %d matching %s", len(response['_items']), resource_name) for item in response['_items']: logger.info("-> found %s '%s': %s", resource_name, name, item['name']) # Exists in the backend, we must delete the element... if not self.dry_run: headers = { 'Content-Type': 'application/json', 'If-Match': item['_etag'] } logger.info("-> deleting %s: %s", resource_name, item['name']) self.backend.delete(resource_name + '/' + item['_id'], headers) logger.info("-> deleted %s: %s", resource_name, item['name']) else: response = {'_id': '_fake', '_etag': '_fake'} logger.info("Dry-run mode: should have deleted an %s '%s'", resource_name, name) logger.info("-> deleted: '%s': %s", resource_name, item['_id']) else: logger.warning("-> %s item '%s' not found", resource_name, name) return False except BackendException as exp: # pragma: no cover, should never happen logger.exception("Exception: %s", exp) logger.error("Response: %s", exp.response) print("Deletion error for '%s' : %s" % (resource_name, name)) print("~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Exiting with error code: 5") return False return True
Create or update a specific resource
def create_update_resource(self, resource_name, name, update=False): # pylint: disable=too-many-return-statements, too-many-locals # pylint: disable=too-many-nested-blocks """Create or update a specific resource :param resource_name: backend resource endpoint (eg. host, user, ...) :param name: name of the resource to create/update :param update: True to update an existing resource, else will try to create :return: """ if self.data is None: self.data = {} # If some data are provided, try to get them json_data = None if self.data: try: # Data may be provided on the command line or from a file if self.data == 'stdin': input_file = sys.stdin else: path = os.path.join(self.folder or os.getcwd(), self.data) input_file = open(path) json_data = json.load(input_file) logger.info("Got provided data: %s", json_data) if input_file is not sys.stdin: input_file.close() except IOError: logger.error("Error reading data file: %s", path) return False except ValueError: logger.error("Error malformed data file: %s", path) return False if name is None and json_data is None: logger.error("-> can not add/update a %s without a name and/or data!", resource_name) return False # Manage provided templates used_templates = [] if self.templates is not None: logger.info("Searching the %s template(s): %s", resource_name, self.templates) for template in self.templates: response = self.backend.get( resource_name, params={'where': json.dumps({'name': template, '_is_template': True})}) if response['_items']: used_templates.append(response['_items'][0]['_id']) logger.info("-> found %s template '%s': %s", resource_name, template, response['_items'][0]['_id']) else: logger.error("-> %s required template not found '%s'", resource_name, template) return False try: if json_data is None: json_data = {'name': name} if not isinstance(json_data, list): json_data = [json_data] logger.info("Got %d %ss", len(json_data), resource_name) count = 0 for json_item in json_data: logger.info("-> json item: %s", json_item) if resource_name not in ['history', 'userrestrictrole', 'logcheckresult'] \ and name is None and ('name' not in json_item or not json_item['name']): logger.warning("-> unnamed '%s'!", resource_name) continue # Manage resource name item_name = name if 'name' in json_item: item_name = json_item['name'] # Got the item name params = {'name': item_name} if resource_name == 'service' and 'host' in json_item: # Get host from name host_search = {'name': json_item['host']} if '_is_template' in json_item: host_search.update({'_is_template': json_item['_is_template']}) logger.info("Host search: %s", host_search) resp_host = self.backend.get( 'host', params={'where': json.dumps(host_search)}) if resp_host['_items']: host = resp_host['_items'][0] logger.info("Got host '%s' for the service '%s'", host['name'], item_name) else: logger.warning("Host not found: '%s' for the service: %s!", json_item['host'], item_name) continue params = {'name': item_name, 'host': host['_id']} if resource_name == 'service' and '/' in item_name: splitted_name = item_name.split('/') # Get host from name host_search = {'name': splitted_name[0]} if '_is_template' in json_item: host_search.update({'_is_template': json_item['_is_template']}) resp_host = self.backend.get( 'host', params={'where': json.dumps(host_search)}) if resp_host['_items']: host = resp_host['_items'][0] logger.info("Got host '%s' for the service '%s'", splitted_name[0], splitted_name[1]) else: logger.warning("Host not found: '%s' for the service: %s!", splitted_name[0], item_name) continue item_name = splitted_name[1] params = {'name': item_name, 'host': host['_id']} if '_is_template' in json_item: params.update({'_is_template': json_item['_is_template']}) params = {'where': json.dumps(params)} if name: logger.info("Trying to get %s: '%s', params: %s", resource_name, item_name, params) response = self.backend.get(resource_name, params=params) if response['_items']: found_item = response['_items'][0] found_id = found_item['_id'] found_etag = found_item['_etag'] logger.info("-> found %s '%s': %s", resource_name, item_name, found_id) if not update: logger.warning("-> '%s' %s cannot be created because it already " "exists!", resource_name, item_name) continue else: if update: logger.warning("-> '%s' %s cannot be updated because it does not " "exist!", resource_name, item_name) continue # Item data updated with provided information if some # Data to update item_data = {} if self.include_read_data: # Include read data if required item_data = found_item # Json provided data update existing data item_data.update(json_item) # Name is also updated (eg. for a service...) item_data['name'] = item_name # Template information if templating is required if used_templates: item_data.update({'_templates': used_templates, '_templates_with_services': True}) for field in item_data.copy(): logger.debug("Field: %s = %s", field, item_data[field]) # Filter Eve extra fields if field in ['_created', '_updated', '_etag', '_links', '_status']: item_data.pop(field) continue # Filter specific backend inner computed fields # pylint: disable=fixme # todo: list to be completed! if field in ['_overall_state_id']: item_data.pop(field) continue # Manage potential object link fields if field not in ['realm', '_realm', '_templates', 'command', 'host', 'service', 'escalation_period', 'maintenance_period', 'snapshot_period', 'check_period', 'dependency_period', 'notification_period', 'host_notification_period', 'escalation_period', 'service_notification_period', 'host_notification_commands', 'service_notification_commands', 'service_dependencies', 'users', 'usergroups', 'check_command', 'event_handler', 'grafana', 'statsd']: continue field_values = item_data[field] if not isinstance(item_data[field], list): field_values = [item_data[field]] found = None for value in field_values: logger.debug(" - %s, single value: %s", field, value) try: int(value, 16) logger.debug(" - %s, uuid value: %s", field, value) if not isinstance(item_data[field], list): found = value else: if found is None: found = [] found.append(value) except TypeError: pass except ValueError: # Not an integer, consider an item name field_params = {'where': json.dumps({'name': value})} logger.debug(" - %s, params: %s", field, field_params) if field in ['escalation_period', 'maintenance_period', 'snapshot_period', 'check_period', 'dependency_period', 'notification_period', 'host_notification_period', 'service_notification_period']: response2 = self.backend.get('timeperiod', params=field_params) elif field in ['_realm']: response2 = self.backend.get('realm', params=field_params) elif field in ['service_dependencies']: response2 = self.backend.get('service', params=field_params) elif field in ['users']: response2 = self.backend.get('user', params=field_params) elif field in ['usergroups']: response2 = self.backend.get('usergroup', params=field_params) elif field in ['check_command', 'event_handler', 'service_notification_commands', 'host_notification_commands']: response2 = self.backend.get('command', params=field_params) elif field in ['_templates']: field_params = {'where': json.dumps({'name': value, '_is_template': True})} response2 = self.backend.get(resource_name, params=field_params) else: response2 = self.backend.get(field, params=field_params) if response2['_items']: response2 = response2['_items'][0] logger.info("Replaced %s = %s with found item _id", field, value) if not isinstance(item_data[field], list): found = response2['_id'] else: if found is None: found = [] found.append(response2['_id']) if found is None: logger.warning("Not found %s = %s, removing field!", field, field_values) item_data.pop(field) else: item_data[field] = found if resource_name not in ['realm'] and '_realm' not in item_data: logger.info("add default realm to the data") item_data.update({'_realm': self.default_realm}) if resource_name in ['realm'] and '_realm' not in item_data: logger.info("add parent realm to the data") item_data.update({'_parent': self.default_realm}) if '_id' in item_data: item_data.pop('_id') if not update: # Trying to create a new element if not item_data['name']: item_data.pop('name') logger.info("-> trying to create the %s: %s.", resource_name, item_name) logger.debug("-> with: %s.", item_data) if not self.dry_run: try: response = self.backend.post(resource_name, item_data, headers=None) except BackendException as exp: self.item = item_name logger.error("Exception: %s", exp) # logger.error("Response: %s", exp.response) continue else: response = {'_status': 'OK', '_id': '_fake', '_etag': '_fake'} else: if not name: logger.warning("-> can not update '%s' with no name!", resource_name) continue # Trying to update an element logger.info("-> trying to update the %s: %s.", resource_name, item_name) logger.debug("-> with: %s.", item_data) if not self.dry_run: try: headers = {'Content-Type': 'application/json', 'If-Match': found_etag} response = self.backend.patch(resource_name + '/' + found_id, item_data, headers=headers, inception=True) except BackendException as exp: self.item = item_name logger.exception("Exception: %s", exp) # logger.error("Response: %s", exp.response) continue else: response = {'_status': 'OK', '_id': '_fake', '_etag': '_fake'} if response['_status'] == 'ERR': logger.warning("Response: %s", response) return False if not update: # Created a new element if not self.dry_run: logger.info("-> created: '%s': %s", resource_name, response['_id']) else: logger.info("Dry-run mode: should have created an %s '%s'", resource_name, name) else: # Updated an element if not self.dry_run: logger.info("-> updated: '%s': %s", resource_name, response['_id']) else: logger.info("Dry-run mode: should have updated an %s '%s'", resource_name, name) count = count + 1 except BackendException as exp: # pragma: no cover, should never happen logger.exception("Exception: %s", exp) logger.error("Response: %s", exp.response) print("Creation/update error for '%s' : %s" % (resource_name, name)) print("~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Exiting with error code: 5") return False if count == len(json_data): return True return False
Returns the response from the requested endpoint with the requested method: param method: str. one of the methods accepted by Requests ( POST GET... ): param endpoint: str. the relative endpoint to access: param params: ( optional ) Dictionary or bytes to be sent in the query string for the: class: Request.: param data: ( optional ) Dictionary bytes or file - like object to send in the body of the: class: Request.: param json: ( optional ) json to send in the body of the: class: Request.: param headers: ( optional ) Dictionary of HTTP Headers to send with the: class: Request.: return: Requests. response
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None): # pylint: disable=too-many-arguments """ Returns the response from the requested endpoint with the requested method :param method: str. one of the methods accepted by Requests ('POST', 'GET', ...) :param endpoint: str. the relative endpoint to access :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :return: Requests.response """ logger.debug("Parameters for get_response:") logger.debug("\t - endpoint: %s", endpoint) logger.debug("\t - method: %s", method) logger.debug("\t - headers: %s", headers) logger.debug("\t - json: %s", json) logger.debug("\t - params: %s", params) logger.debug("\t - data: %s", data) url = self.get_url(endpoint) # First stage. Errors are connection errors (timeout, no session, ...) try: response = self.session.request(method=method, url=url, headers=headers, json=json, params=params, data=data, proxies=self.proxies, timeout=self.timeout) logger.debug("response headers: %s", response.headers) logger.debug("response content: %s", response.content) except RequestException as e: response = {"_status": "ERR", "_error": {"message": e, "code": BACKEND_ERROR}, "_issues": {"message": e, "code": BACKEND_ERROR}} raise BackendException(code=BACKEND_ERROR, message=e, response=response) else: return response
Decodes and returns the response as JSON ( dict ) or raise BackendException: param response: requests. response object: return: dict
def decode(response): """ Decodes and returns the response as JSON (dict) or raise BackendException :param response: requests.response object :return: dict """ # Second stage. Errors are backend errors (bad login, bad url, ...) try: response.raise_for_status() except requests.HTTPError as e: raise BackendException(code=response.status_code, message=e, response=response) else: resp_json = response.json() # Catch errors not sent in a HTTP error error = resp_json.get('_error', None) if error: raise BackendException(code=error['code'], message=error['message'], response=response) return resp_json
Set token in authentification for next requests: param token: str. token to set in auth. If None reinit auth
def set_token(self, token): """ Set token in authentification for next requests :param token: str. token to set in auth. If None, reinit auth """ if token: auth = HTTPBasicAuth(token, '') self._token = token self.authenticated = True # TODO: Remove this parameter self.session.auth = auth logger.debug("Using session token: %s", token) else: self._token = None self.authenticated = False self.session.auth = None logger.debug("Session token/auth reinitialised")
Log into the backend and get the token
def login(self, username, password, generate='enabled', proxies=None): """ Log into the backend and get the token generate parameter may have following values: - enabled: require current token (default) - force: force new token generation - disabled if login is: - accepted, returns True - refused, returns False In case of any error, raises a BackendException :param username: login name :type username: str :param password: password :type password: str :param generate: Can have these values: enabled | force | disabled :type generate: str :param proxies: dict of proxy (http and / or https) :type proxies: dict :return: return True if authentication is successfull, otherwise False :rtype: bool """ logger.debug("login for: %s with generate: %s", username, generate) if not username or not password: raise BackendException(BACKEND_ERROR, "Missing mandatory parameters") if proxies: for key in proxies.keys(): try: assert key in PROXY_PROTOCOLS except AssertionError: raise BackendException(BACKEND_ERROR, "Wrong proxy protocol ", key) self.proxies = proxies endpoint = 'login' json = {u'username': username, u'password': password} if generate == 'force': json['action'] = 'generate' logger.debug("Asking for generating new token") response = self.get_response(method='POST', endpoint=endpoint, json=json) if response.status_code == 401: logger.error("Backend refused login with params %s", json) self.set_token(token=None) return False resp = self.decode(response=response) if 'token' in resp: self.set_token(token=resp['token']) return True if generate == 'force': # pragma: no cover - need specific backend tests self.set_token(token=None) raise BackendException(BACKEND_ERROR, "Token not provided") if generate == 'disabled': # pragma: no cover - need specific backend tests logger.error("Token disabled ... to be implemented!") return False if generate == 'enabled': # pragma: no cover - need specific backend tests logger.warning("Token enabled, but none provided, require new token generation") return self.login(username, password, 'force') return False
Connect to alignak backend and retrieve all available child endpoints of root
def get_domains(self): """ Connect to alignak backend and retrieve all available child endpoints of root If connection is successful, returns a list of all the resources available in the backend: Each resource is identified with its title and provides its endpoint relative to backend root endpoint.:: [ {u'href': u'loghost', u'title': u'loghost'}, {u'href': u'escalation', u'title': u'escalation'}, ... ] If an error occurs a BackendException is raised. If an exception occurs, it is raised to caller. :return: list of available resources :rtype: list """ resp = self.get('') if "_links" in resp: _links = resp["_links"] if "child" in _links: return _links["child"] return {}
Get all items in the specified endpoint of alignak backend
def get_all(self, endpoint, params=None): # pylint: disable=too-many-locals """ Get all items in the specified endpoint of alignak backend If an error occurs, a BackendException is raised. If the max_results parameter is not specified in parameters, it is set to BACKEND_PAGINATION_LIMIT (backend maximum value) to limit requests number. This method builds a response that always contains: _items and _status:: { u'_items': [ ... ], u'_status': u'OK' } :param endpoint: endpoint (API URL) relative from root endpoint :type endpoint: str :param params: list of parameters for the backend API :type params: dict :return: dict of properties :rtype: dict """ # Set max results at maximum value supported by the backend to limit requests number if not params: params = {'max_results': BACKEND_PAGINATION_LIMIT} elif params and 'max_results' not in params: params['max_results'] = BACKEND_PAGINATION_LIMIT # Get first page last_page = False items = [] if self.processes == 1: while not last_page: # Get elements ... resp = self.get(endpoint=endpoint, params=params) # Response contains: # _items: # ... # _links: # self, parent, prev, last, next # _meta: # - max_results, total, page if 'next' in resp['_links']: # Go to next page ... params['page'] = int(resp['_meta']['page']) + 1 params['max_results'] = int(resp['_meta']['max_results']) else: last_page = True items.extend(resp['_items']) else: def get_pages(endpoint, params, pages, out_q): """ Function to get pages loaded by multiprocesses :param endpoint: endpoint to get data :type endpoint: string :param params: parameters for get request :type params: dict :param pages: range of pages to get :type pages: list :param out_q: Queue object :type out_q: multiprocessing.Queue :return: None """ multi_items = [] for page in pages: params['page'] = page resp = self.get(endpoint, params) multi_items.extend(resp['_items']) out_q.put(multi_items) # Get first page resp = self.get(endpoint, params) number_pages = int(math.ceil( float(resp['_meta']['total']) / float(resp['_meta']['max_results']))) out_q = multiprocessing.Queue() chunksize = int(math.ceil(number_pages / float(self.processes))) procs = [] for i in range(self.processes): begin = i * chunksize end = begin + chunksize if end > number_pages: end = number_pages begin += 1 end += 1 p = multiprocessing.Process(target=get_pages, args=(endpoint, params, range(begin, end), out_q)) procs.append(p) p.start() # Collect all results into a single result dict. We know how many dicts # with results to expect. for i in range(self.processes): items.extend(out_q.get()) # Wait for all worker processes to finish for p in procs: p.join() return { '_items': items, '_status': 'OK' }
Method to update an item
def patch(self, endpoint, data, headers=None, inception=False): """ Method to update an item The headers must include an If-Match containing the object _etag. headers = {'If-Match': contact_etag} The data dictionary contain the fields that must be modified. If the patching fails because the _etag object do not match with the provided one, a BackendException is raised with code = 412. If inception is True, this method makes e new get request on the endpoint to refresh the _etag and then a new patch is called. If an HTTP 412 error occurs, a BackendException is raised. This exception is: - code: 412 - message: response content - response: backend response All other HTTP error raises a BackendException. If some _issues are provided by the backend, this exception is: - code: HTTP error code - message: response content - response: JSON encoded backend response (including '_issues' dictionary ...) If no _issues are provided and an _error is signaled by the backend, this exception is: - code: backend error code - message: backend error message - response: JSON encoded backend response :param endpoint: endpoint (API URL) :type endpoint: str :param data: properties of item to update :type data: dict :param headers: headers (example: Content-Type). 'If-Match' required :type headers: dict :param inception: if True tries to get the last _etag :type inception: bool :return: dictionary containing patch response from the backend :rtype: dict """ if not headers: raise BackendException(BACKEND_ERROR, "Header If-Match required for patching an object") response = self.get_response(method='PATCH', endpoint=endpoint, json=data, headers=headers) if response.status_code == 200: return self.decode(response=response) if response.status_code == 412: # 412 means Precondition failed, but confirm ... if inception: # update etag and retry to patch resp = self.get(endpoint) headers = {'If-Match': resp['_etag']} return self.patch(endpoint, data=data, headers=headers, inception=False) raise BackendException(response.status_code, response.content) else: # pragma: no cover - should never occur raise BackendException(response.status_code, response.content)
Method to delete an item or all items
def delete(self, endpoint, headers): """ Method to delete an item or all items headers['If-Match'] must contain the _etag identifier of the element to delete :param endpoint: endpoint (API URL) :type endpoint: str :param headers: headers (example: Content-Type) :type headers: dict :return: response (deletion information) :rtype: dict """ response = self.get_response(method='DELETE', endpoint=endpoint, headers=headers) logger.debug("delete, response: %s", response) if response.status_code != 204: # pragma: no cover - should not happen ... resp = self.decode(response=response) resp = {"_status": "OK"} return resp
Returns True if path1 and path2 refer to the same file.
def samefile(path1, path2): """ Returns True if path1 and path2 refer to the same file. """ # Check if both are on the same volume and have the same file ID info1 = fs.getfileinfo(path1) info2 = fs.getfileinfo(path2) return (info1.dwVolumeSerialNumber == info2.dwVolumeSerialNumber and info1.nFileIndexHigh == info2.nFileIndexHigh and info1.nFileIndexLow == info2.nFileIndexLow)
Given a path return a pair containing a new REPARSE_DATA_BUFFER and the length of the buffer ( not necessarily the same as sizeof due to packing issues ). If no path is provided the maximum length is assumed.
def new_junction_reparse_buffer(path=None): """ Given a path, return a pair containing a new REPARSE_DATA_BUFFER and the length of the buffer (not necessarily the same as sizeof due to packing issues). If no path is provided, the maximum length is assumed. """ if path is None: # The maximum reparse point data buffer length is 16384 bytes. We are a # bit conservative here and set a length of 16000 bytes (8000 # characters) + a few more for the header. substnamebufferchars = 8000 else: # 1 more character for the null terminator. Python 2.x calculates # len(surrogate pair) = 2, so multiplying this by 2 is the right thing # to do. substnamebufferchars = len(path) + 1 # It is amazing how ugly MSDN's version of REPARSE_DATA_BUFFER is: # <http://msdn.microsoft.com/en-us/library/windows/hardware/ff552012>. It # is a variable-length struct with two strings in the wchar[] buffer at # the end. Both are supposed to be null-terminated, and the individual # lengths do not include that of the null character, but the total # ReparseDataLength does. # # In our case, only the SubstituteName part of the mount point/junction- # specific part is relevant. So we set PrintNameLength to 0, but we still # need to allow for one null character, so PrintNameBuffer has length 1. class REPARSE_DATA_BUFFER(ctypes.Structure): _fields_ = [("ReparseTag", ctypes.c_ulong), ("ReparseDataLength", ctypes.c_ushort), ("Reserved", ctypes.c_ushort), ("SubstituteNameOffset", ctypes.c_ushort), ("SubstituteNameLength", ctypes.c_ushort), ("PrintNameOffset", ctypes.c_ushort), ("PrintNameLength", ctypes.c_ushort), ("SubstituteNameBuffer", ctypes.c_wchar * substnamebufferchars), ("PrintNameBuffer", ctypes.c_wchar * 1)] numpathbytes = (substnamebufferchars - 1) * sizeof(ctypes.c_wchar) # We can't really use sizeof on the struct because of packing issues. # Instead, calculate the size manually buffersize = (numpathbytes + (sizeof(ctypes.c_wchar) * 2) + (sizeof(ctypes.c_ushort) * 4)) if path is None: buffer = REPARSE_DATA_BUFFER() buffer.ReparseTag = IO_REPARSE_TAG_MOUNT_POINT else: buffer = REPARSE_DATA_BUFFER( IO_REPARSE_TAG_MOUNT_POINT, buffersize, 0, # print name offset, length 0, numpathbytes, # substitute name offset, length numpathbytes + 2, 0, # print name path, # substitute name "") return (buffer, buffersize + REPARSE_DATA_BUFFER.SubstituteNameOffset.offset)
Create a junction at link_name pointing to source.
def create(source, link_name): """ Create a junction at link_name pointing to source. """ success = False if not os.path.isdir(source): raise Exception("%s is not a directory" % source) if os.path.exists(link_name): raise Exception("%s: junction link name already exists" % link_name) link_name = os.path.abspath(link_name) os.mkdir(link_name) # Get a handle to the directory hlink = CreateFile(link_name, fs.GENERIC_WRITE, fs.FILE_SHARE_READ | fs.FILE_SHARE_WRITE, None, fs.OPEN_EXISTING, fs.FILE_FLAG_OPEN_REPARSE_POINT | fs.FILE_FLAG_BACKUP_SEMANTICS, None) try: if hlink == fs.INVALID_HANDLE_VALUE: raise WinError() srcvolpath = unparsed_convert(source) (junctioninfo, infolen) = new_junction_reparse_buffer(srcvolpath) dummy = DWORD(0) res = DeviceIoControl( hlink, FSCTL_SET_REPARSE_POINT, byref(junctioninfo), infolen, None, 0, byref(dummy), None) if res == 0: raise WinError() success = True finally: if hlink != fs.INVALID_HANDLE_VALUE: CloseHandle(hlink) if not success: os.rmdir(link_name)
Return information for the volume containing the given path. This is going to be a pair containing ( file system file system flags ).
def getvolumeinfo(path): """ Return information for the volume containing the given path. This is going to be a pair containing (file system, file system flags). """ # Add 1 for a trailing backslash if necessary, and 1 for the terminating # null character. volpath = ctypes.create_unicode_buffer(len(path) + 2) rv = GetVolumePathName(path, volpath, len(volpath)) if rv == 0: raise WinError() fsnamebuf = ctypes.create_unicode_buffer(MAX_PATH + 1) fsflags = DWORD(0) rv = GetVolumeInformation(volpath, None, 0, None, None, byref(fsflags), fsnamebuf, len(fsnamebuf)) if rv == 0: raise WinError() return (fsnamebuf.value, fsflags.value)
Sets command name and formatting for subsequent calls to logger
def initialize_logger(args): """Sets command name and formatting for subsequent calls to logger""" global log_filename log_filename = os.path.join(os.getcwd(), "jacquard.log") if args.log_file: _validate_log_file(args.log_file) log_filename = args.log_file logging.basicConfig(format=_FILE_LOG_FORMAT, level="DEBUG", datefmt=_DATE_FORMAT, filename=log_filename) global _verbose if args.verbose: _verbose = args.verbose start_time = datetime.now().strftime(_DATE_FORMAT) global _logging_dict _logging_dict = {'user': getpass.getuser(), 'host': socket.gethostname(), 'start_time': start_time, 'tool': args.subparser_name}
Suppress default exit behavior
def error(self, message): '''Suppress default exit behavior''' message = self._remessage_invalid_subparser(message) raise utils.UsageError(message)
Recognizes and claims MuTect VCFs form the set of all input VCFs.
def claim(self, file_readers): """Recognizes and claims MuTect VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and MuTectVcfReaders. """ unclaimed_readers = [] vcf_readers = [] for file_reader in file_readers: if self._is_mutect_vcf(file_reader): vcf_reader = vcf.VcfReader(file_reader) vcf_readers.append(_MutectVcfReader(vcf_reader)) else: unclaimed_readers.append(file_reader) return (unclaimed_readers, vcf_readers)
Returns a standardized column header.
def _get_new_column_header(self, vcf_reader): """Returns a standardized column header. MuTect sample headers include the name of input alignment, which is nice, but doesn't match up with the sample names reported in Strelka or VarScan. To fix this, we replace with NORMAL and TUMOR using the MuTect metadata command line to replace them correctly.""" mutect_dict = self._build_mutect_dict(vcf_reader.metaheaders) new_header_list = [] required_keys = set([self._NORMAL_SAMPLE_KEY, self._TUMOR_SAMPLE_KEY]) mutect_keys = set(mutect_dict.keys()) if not required_keys.issubset(mutect_keys): raise utils.JQException("Unable to determine normal " "and tumor sample ordering " "based on MuTect metaheader.") for field_name in vcf_reader.column_header.split("\t"): if field_name == mutect_dict[self._NORMAL_SAMPLE_KEY]: field_name = "NORMAL" elif field_name == mutect_dict[self._TUMOR_SAMPLE_KEY]: field_name = "TUMOR" new_header_list.append(field_name) return "\t".join(new_header_list)
Build a file path from * paths * and return the contents.
def read(*paths): """Build a file path from *paths* and return the contents.""" with open(os.path.join(*paths), 'r') as filename: return filename.read()
Recognizes and claims VarScan VCFs form the set of all input VCFs.
def claim(self, file_readers): """Recognizes and claims VarScan VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Since VarScan can claim high-confidence files as well, this process is significantly more complex than for other callers. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and VarScanVcfReaders. """ (prefix_to_readers, filter_files, unclaimed_set) = self._find_varscan_files(file_readers) prefix_by_patients = self._split_prefix_by_patient(prefix_to_readers) self._validate_vcf_readers(prefix_by_patients) vcf_hc_pairs = self._pair_files(prefix_to_readers, filter_files) self._validate_vcf_hc_pairs(vcf_hc_pairs) vcf_readers = self._create_vcf_readers(vcf_hc_pairs) return list(unclaimed_set), vcf_readers
Extract ( float ) value of dependent tag or None if absent.
def _get_dependent_value(tag_values, dependent_tag_id): '''Extract (float) value of dependent tag or None if absent.''' try: values = tag_values[dependent_tag_id].split(",") return max([float(value) for value in values]) except KeyError: return None except ValueError: return None
Derive mean and stdev.
def _init_population_stats(self, vcf_reader, dependent_tag_id): '''Derive mean and stdev. Adapted from online variance algorithm from Knuth, The Art of Computer Programming, volume 2 Returns: mean and stdev when len(values) > 1, otherwise (None, None) Values rounded to _MAX_PRECISION to ameliorate discrepancies between python versions.''' #pylint: disable=invalid-name n = 0 mean = 0 M2 = 0 try: vcf_reader.open() for vcf_record in vcf_reader.vcf_records(): for tag_values in vcf_record.sample_tag_values.values(): value = self._get_dependent_value(tag_values, dependent_tag_id) if value is not None: n += 1 delta = value - mean mean += delta / n M2 += delta * (value - mean) finally: vcf_reader.close() mean = round(mean, self._MAX_PRECISION) stdev = 0 if n == 0: mean = None stdev = None elif n >= 2: variance = M2/n stdev = round(math.sqrt(variance), self._MAX_PRECISION) return mean, stdev
Allows each caller to claim incoming files as they are recognized.
def claim(self, unclaimed_file_readers): """Allows each caller to claim incoming files as they are recognized. Args: unclaimed_file_readers: Usually, all files in the input dir. Returns: A tuple of unclaimed file readers and claimed VcfReaders. The presence of any unclaimed file readers could indicate stray files in the input dir. """ claimed_vcf_readers = [] for caller in self._callers: (unclaimed_file_readers, translated_vcf_readers) = caller.claim(unclaimed_file_readers) claimed_vcf_readers.extend(translated_vcf_readers) return unclaimed_file_readers, claimed_vcf_readers
Generates parsed VcfRecord objects.
def vcf_records(self, format_tags=None, qualified=False): """Generates parsed VcfRecord objects. Typically called in a for loop to process each vcf record in a VcfReader. VcfReader must be opened in advanced and closed when complete. Skips all headers. Args: qualified: When True, sample names are prefixed with file name Returns: Parsed VcfRecord Raises: StopIteration: when reader is exhausted. TypeError: if reader is closed. """ if qualified: sample_names = self.qualified_sample_names else: sample_names = self.sample_names for line in self._file_reader.read_lines(): if line.startswith("#"): continue vcf_record = vcf.VcfRecord.parse_record(line, sample_names) if format_tags: vcf_record = self.modify_format_tag(vcf_record, format_tags) yield vcf_record
Similar to follow but also looks up if inode of file is changed e. g. if it was re - created.
def follow_path(file_path, buffering=-1, encoding=None, errors='strict'): """ Similar to follow, but also looks up if inode of file is changed e.g. if it was re-created. Returned generator yields strings encoded by using encoding. If encoding is not specified, it defaults to locale.getpreferredencoding() >>> import io >>> import os >>> f = io.open('test_follow_path.txt', 'w+') >>> generator = follow_path('test_follow_path.txt') >>> _ = f.write('Line 1\\n') >>> f.flush() >>> print(next(generator)) Line 1 >>> _ = f.write('Line 2\\n') >>> f.flush() >>> print(next(generator)) Line 2 >>> _ = f.truncate(0) >>> _ = f.seek(0) >>> _ = f.write('Line 3\\n') >>> f.flush() >>> print(next(generator)) Line 3 >>> f.close() >>> os.remove('test_follow_path.txt') >>> f = io.open('test_follow_path.txt', 'w+') >>> _ = f.write('Line 4\\n') >>> f.flush() >>> print(next(generator)) Line 4 >>> print(next(generator)) None >>> f.close() >>> os.remove('test_follow_path.txt') """ if encoding is None: encoding = locale.getpreferredencoding() class FollowPathGenerator(object): def __init__(self): if os.path.isfile(file_path): self.following_file = io.open(file_path, 'rb', buffering) self.follow_generator = Tailer(self.following_file, end=True).follow() self.follow_from_end_on_open = False else: self.following_file = None self.follow_generator = None self.follow_from_end_on_open = True def next(self): while True: if self.follow_generator: line = next(self.follow_generator) else: line = None if line is None: if self.follow_generator: try: is_file_changed = not os.path.isfile(file_path) or os.stat(file_path).st_ino != os.fstat(self.following_file.fileno()).st_ino except OSError: # File could be deleted between isfile and stat invocations, which will make the latter to fail. is_file_changed = True if is_file_changed: # File was deleted or re-created. self.following_file.close() self.following_file = None self.follow_generator = None if not self.follow_generator and os.path.isfile(file_path): # New file is available. Open it. try: self.following_file = io.open(file_path, 'rb', buffering) self.follow_generator = Tailer(self.following_file, end=self.follow_from_end_on_open).follow() self.follow_from_end_on_open = False # something could be written before we noticed change of file except (IOError, OSError) as e: LOG.info("Unable to tail file: %s", e) if self.following_file: self.following_file.close() self.following_file= None self.follow_generator = None line = None else: line = next(self.follow_generator) return line.decode(encoding, errors) if line is not None else line def __iter__(self): return self def __next__(self): return self.next() return FollowPathGenerator()
Split data into lines where lines are separated by LINE_TERMINATORS.
def splitlines(self, data): """ Split data into lines where lines are separated by LINE_TERMINATORS. :param data: Any chunk of binary data. :return: List of lines without any characters at LINE_TERMINATORS. """ return re.split(b'|'.join(self.LINE_TERMINATORS), data)
Read given number of bytes from file.: param read_size: Number of bytes to read. - 1 to read all.: return: Number of bytes read and data that was read.
def read(self, read_size=-1): """ Read given number of bytes from file. :param read_size: Number of bytes to read. -1 to read all. :return: Number of bytes read and data that was read. """ read_str = self.file.read(read_size) return len(read_str), read_str
Return line terminator data begins with or None.
def prefix_line_terminator(self, data): """ Return line terminator data begins with or None. """ for t in self.LINE_TERMINATORS: if data.startswith(t): return t return None
Return line terminator data ends with or None.
def suffix_line_terminator(self, data): """ Return line terminator data ends with or None. """ for t in self.LINE_TERMINATORS: if data.endswith(t): return t return None
Seek next line relative to the current file position.
def seek_next_line(self): """ Seek next line relative to the current file position. :return: Position of the line or -1 if next line was not found. """ where = self.file.tell() offset = 0 while True: data_len, data = self.read(self.read_size) data_where = 0 if not data_len: break # Consider the following example: Foo\r | \nBar where " | " denotes current position, # 'Foo\r' is the read part and '\nBar' is the remaining part. # We should completely consume terminator "\r\n" by reading one extra byte. if b'\r\n' in self.LINE_TERMINATORS and data[-1] == b'\r'[0]: terminator_where = self.file.tell() terminator_len, terminator_data = self.read(1) if terminator_len and terminator_data[0] == b'\n'[0]: data_len += 1 data += b'\n' else: self.file.seek(terminator_where) while data_where < data_len: terminator = self.prefix_line_terminator(data[data_where:]) if terminator: self.file.seek(where + offset + data_where + len(terminator)) return self.file.tell() else: data_where += 1 offset += data_len self.file.seek(where + offset) return -1
Seek previous line relative to the current file position.
def seek_previous_line(self): """ Seek previous line relative to the current file position. :return: Position of the line or -1 if previous line was not found. """ where = self.file.tell() offset = 0 while True: if offset == where: break read_size = self.read_size if self.read_size <= where else where self.file.seek(where - offset - read_size, SEEK_SET) data_len, data = self.read(read_size) # Consider the following example: Foo\r | \nBar where " | " denotes current position, # '\nBar' is the read part and 'Foo\r' is the remaining part. # We should completely consume terminator "\r\n" by reading one extra byte. if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]: terminator_where = self.file.tell() if terminator_where > data_len + 1: self.file.seek(where - offset - data_len - 1, SEEK_SET) terminator_len, terminator_data = self.read(1) if terminator_data[0] == b'\r'[0]: data_len += 1 data = b'\r' + data self.file.seek(terminator_where) data_where = data_len while data_where > 0: terminator = self.suffix_line_terminator(data[:data_where]) if terminator and offset == 0 and data_where == data_len: # The last character is a line terminator that finishes current line. Ignore it. data_where -= len(terminator) elif terminator: self.file.seek(where - offset - (data_len - data_where)) return self.file.tell() else: data_where -= 1 offset += data_len if where == 0: # Nothing more to read. return -1 else: # Very first line. self.file.seek(0) return 0
Return the last lines of the file.
def tail(self, lines=10): """ Return the last lines of the file. """ self.file.seek(0, SEEK_END) for i in range(lines): if self.seek_previous_line() == -1: break data = self.file.read() for t in self.LINE_TERMINATORS: if data.endswith(t): # Only terminators _between_ lines should be preserved. # Otherwise terminator of the last line will be treated as separtaing line and empty line. data = data[:-len(t)] break if data: return self.splitlines(data) else: return []
Return the top lines of the file.
def head(self, lines=10): """ Return the top lines of the file. """ self.file.seek(0) for i in range(lines): if self.seek_next_line() == -1: break end_pos = self.file.tell() self.file.seek(0) data = self.file.read(end_pos) for t in self.LINE_TERMINATORS: if data.endswith(t): # Only terminators _between_ lines should be preserved. # Otherwise terminator of the last line will be treated as separtaing line and empty line. data = data[:-len(t)] break if data: return self.splitlines(data) else: return []
Iterator generator that returns lines as data is added to the file.
def follow(self): """ Iterator generator that returns lines as data is added to the file. None will be yielded if no new line is available. Caller may either wait and re-try or end iteration. """ trailing = True while True: where = self.file.tell() if where > os.fstat(self.file.fileno()).st_size: # File was truncated. where = 0 self.file.seek(where) line = self.file.readline() if line: if trailing and line in self.LINE_TERMINATORS: # This is just the line terminator added to the end of the file # before a new line, ignore. trailing = False continue terminator = self.suffix_line_terminator(line) if terminator: line = line[:-len(terminator)] trailing = False yield line else: trailing = True self.file.seek(where) yield None
Recognizes and claims Strelka VCFs form the set of all input VCFs.
def claim(self, file_readers): """Recognizes and claims Strelka VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and StrelkaVcfReaders. """ (prefix_to_reader, unclaimed_readers) = self._find_strelka_files(file_readers) prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader) self._validate_vcf_readers(prefix_by_patients) vcf_readers = self._create_vcf_readers(prefix_to_reader) return (unclaimed_readers, vcf_readers)
Generates parsed VcfRecord objects.
def vcf_records(self, qualified=False): """Generates parsed VcfRecord objects. Typically called in a for loop to process each vcf record in a VcfReader. VcfReader must be opened in advanced and closed when complete. Skips all headers. Args: qualified: When True, sample names are prefixed with file name Returns: Parsed VcfRecord Raises: StopIteration: when reader is exhausted. TypeError: if reader is closed. """ if qualified: sample_names = self.qualified_sample_names else: sample_names = self.sample_names for line in self._file_reader.read_lines(): if line.startswith("#"): continue yield VcfRecord.parse_record(line, sample_names)
Alternative constructor that parses VcfRecord from VCF string.
def parse_record(cls, vcf_line, sample_names): """Alternative constructor that parses VcfRecord from VCF string. Aspire to parse/represent the data such that it could be reliably round-tripped. (This nicety means INFO fields and FORMAT tags should be treated as ordered to avoid shuffling.) Args: vcf_line: the VCF variant record as a string; tab separated fields, trailing newlines are ignored. Must have at least 8 fixed fields (through INFO) sample_names: a list of sample name strings; these should match the VCF header column Returns: A mutable VcfRecord. """ vcf_fields = vcf_line.rstrip("\r\n").split("\t") chrom, pos, rid, ref, alt, qual, rfilter, info \ = vcf_fields[0:8] sample_fields = [] sample_tag_values = {} if len(vcf_fields) > 9: rformat = vcf_fields[8] sample_fields = vcf_fields[9:] sample_tag_values = VcfRecord._sample_tag_values(sample_names, rformat, sample_fields) return VcfRecord(chrom, pos, ref, alt, rid, qual, rfilter, info, sample_tag_values)
Creates a sample dict of tag - value dicts for a single variant record.
def _sample_tag_values(cls, sample_names, rformat, sample_fields): """Creates a sample dict of tag-value dicts for a single variant record. Args: sample_names: list of sample name strings. rformat: record format string (from VCF record). sample_fields: list of strings where each string is the ';' seperated format values for an individual sample. Returns: An dict of samples, where each key is a sample and each value is an dict of format-values. See attribute below for example. Will return '.' if no values for sampe field. """ sample_tag_values = OrderedDict() tag_names = VcfRecord._format_list(rformat) for i, sample_field in enumerate(sample_fields): tag_values = sample_field.split(":") if sample_field else "." sample_tag_values[sample_names[i]] = OrderedDict(zip(tag_names, tag_values)) return sample_tag_values
Returns set of format tags.
def format_tags(self): """Returns set of format tags.""" tags = VcfRecord._EMPTY_SET if self.sample_tag_values: first_sample = list(self.sample_tag_values.keys())[0] tags = set(self.sample_tag_values[first_sample].keys()) return tags
Adds new info field ( flag or key = value pair ).
def add_info_field(self, field): """Adds new info field (flag or key=value pair). Args: field: String flag (e.g. "SOMATIC") or key-value ("NEW_DP=42") Raises: KeyError: if info field already exists """ if field in self.info_dict: msg = "New info field [{}] already exists.".format(field) raise KeyError(msg) if "=" in field: key, value = field.split("=") self.info_dict[key] = value else: self.info_dict[field] = field self._join_info_fields()
Updates info attribute from info dict.
def _join_info_fields(self): """Updates info attribute from info dict.""" if self.info_dict: info_fields = [] if len(self.info_dict) > 1: self.info_dict.pop(".", None) for field, value in self.info_dict.items(): if field == value: info_fields.append(value) else: info_fields.append("=".join([field, value])) self.info = ";".join(info_fields) else: self.info = "."
Returns string representation of format field.
def _format_field(self): """Returns string representation of format field.""" format_field = "." if self.sample_tag_values: first_sample = list(self.sample_tag_values.keys())[0] tag_names = self.sample_tag_values[first_sample].keys() if tag_names: format_field = ":".join(tag_names) return format_field
Returns string representation of sample - format values.
def _sample_field(self, sample): """Returns string representation of sample-format values. Raises: KeyError: if requested sample is not defined. """ tag_values = self.sample_tag_values[sample].values() if tag_values: return ":".join(tag_values) else: return "."