INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Returns the values needed to validate the transaction s signature_message_fragment value.
def get_signature_validation_trytes(self): # type: () -> TryteString """ Returns the values needed to validate the transaction's ``signature_message_fragment`` value. """ return ( self.address.address + self.value_as_trytes + self.legacy_tag + self.timestamp_as_trytes + self.current_index_as_trytes + self.last_index_as_trytes )
Sets the is_confirmed for the bundle.
def is_confirmed(self, new_is_confirmed): # type: (bool) -> None """ Sets the ``is_confirmed`` for the bundle. """ self._is_confirmed = new_is_confirmed for txn in self: txn.is_confirmed = new_is_confirmed
Attempts to decipher encoded messages from the transactions in the bundle.
def get_messages(self, errors='drop'): # type: (Text) -> List[Text] """ Attempts to decipher encoded messages from the transactions in the bundle. :param errors: How to handle trytes that can't be converted, or bytes that can't be decoded using UTF-8: 'drop' Drop the trytes from the result. 'strict' Raise an exception. 'replace' Replace with a placeholder character. 'ignore' Omit the invalid tryte/byte sequence. """ decode_errors = 'strict' if errors == 'drop' else errors messages = [] for group in self.group_transactions(): # Ignore inputs. if group[0].value < 0: continue message_trytes = TryteString(b'') for txn in group: message_trytes += txn.signature_message_fragment if message_trytes: try: messages.append(message_trytes.decode(decode_errors)) except (TrytesDecodeError, UnicodeDecodeError): if errors != 'drop': raise return messages
Returns TryteString representations of the transactions in this bundle.
def as_tryte_strings(self, head_to_tail=False): # type: (bool) -> List[TransactionTrytes] """ Returns TryteString representations of the transactions in this bundle. :param head_to_tail: Determines the order of the transactions: - ``True``: head txn first, tail txn last. - ``False`` (default): tail txn first, head txn last. Note that the order is reversed by default, as this is the way bundles are typically broadcast to the Tangle. """ transactions = self if head_to_tail else reversed(self) return [t.as_tryte_string() for t in transactions]
Groups transactions in the bundle by address.
def group_transactions(self): # type: () -> List[List[Transaction]] """ Groups transactions in the bundle by address. """ groups = [] if self: last_txn = self.tail_transaction current_group = [last_txn] for current_txn in self.transactions[1:]: # Transactions are grouped by address, so as long as the # address stays consistent from one transaction to # another, we are still in the same group. if current_txn.address == last_txn.address: current_group.append(current_txn) else: groups.append(current_group) current_group = [current_txn] last_txn = current_txn if current_group: groups.append(current_group) return groups
Automatically discover commands in the specified package.
def discover_commands(package, recursively=True): # type: (Union[ModuleType, Text], bool) -> Dict[Text, 'CommandMeta'] """ Automatically discover commands in the specified package. :param package: Package path or reference. :param recursively: If True, will descend recursively into sub-packages. :return: All commands discovered in the specified package, indexed by command name (note: not class name). """ # http://stackoverflow.com/a/25562415/ if isinstance(package, string_types): package = import_module(package) # type: ModuleType commands = {} for _, name, is_package in walk_packages(package.__path__, package.__name__ + '.'): # Loading the module is good enough; the CommandMeta metaclass will # ensure that any commands in the module get registered. # Prefix in name module move to function "walk_packages" for fix # conflict with names importing packages # Bug https://github.com/iotaledger/iota.lib.py/issues/63 sub_package = import_module(name) # Index any command classes that we find. for (_, obj) in get_members(sub_package): if is_class(obj) and isinstance(obj, CommandMeta): command_name = getattr(obj, 'command') if command_name: commands[command_name] = obj if recursively and is_package: commands.update(discover_commands(sub_package)) return commands
Sends the request object to the adapter and returns the response.
def _execute(self, request): # type: (dict) -> dict """ Sends the request object to the adapter and returns the response. The command name will be automatically injected into the request before it is sent (note: this will modify the request object). """ request['command'] = self.command return self.adapter.send_request(request)
Applies a filter to a value. If the value does not pass the filter an exception will be raised with lots of contextual info attached to it.
def _apply_filter(value, filter_, failure_message): # type: (dict, Optional[f.BaseFilter], Text) -> dict """ Applies a filter to a value. If the value does not pass the filter, an exception will be raised with lots of contextual info attached to it. """ if filter_: runner = f.FilterRunner(filter_, value) if runner.is_valid(): return runner.cleaned_data else: raise with_context( exc = ValueError( '{message} ({error_codes}) ' '(`exc.context["filter_errors"]` ' 'contains more information).'.format( message = failure_message, error_codes = runner.error_codes, ), ), context = { 'filter_errors': runner.get_errors(with_context=True), }, ) return value
Returns the URL to check job status.
def get_jobs_url(self, job_id): # type: (Text) -> Text """ Returns the URL to check job status. :param job_id: The ID of the job to check. """ return compat.urllib_parse.urlunsplit(( self.uri.scheme, self.uri.netloc, self.uri.path.rstrip('/') + '/jobs/' + job_id, self.uri.query, self.uri.fragment, ))
Returns all errors found with the bundle.
def errors(self): # type: () -> List[Text] """ Returns all errors found with the bundle. """ try: self._errors.extend(self._validator) # type: List[Text] except StopIteration: pass return self._errors
Returns whether the bundle is valid.
def is_valid(self): # type: () -> bool """ Returns whether the bundle is valid. """ if not self._errors: try: # We only have to check for a single error to determine # if the bundle is valid or not. self._errors.append(next(self._validator)) except StopIteration: pass return not self._errors
Creates a generator that does all the work.
def _create_validator(self): # type: () -> Generator[Text, None, None] """ Creates a generator that does all the work. """ # Group transactions by address to make it easier to iterate # over inputs. grouped_transactions = self.bundle.group_transactions() # Define a few expected values. bundle_hash = self.bundle.hash last_index = len(self.bundle) - 1 # Track a few others as we go along. balance = 0 # Check indices and balance first. # Note that we use a counter to keep track of the current index, # since at this point we can't trust that the transactions have # correct ``current_index`` values. counter = 0 for group in grouped_transactions: for txn in group: balance += txn.value if txn.bundle_hash != bundle_hash: yield 'Transaction {i} has invalid bundle hash.'.format( i=counter, ) if txn.current_index != counter: yield ( 'Transaction {i} has invalid current index value ' '(expected {i}, actual {actual}).'.format( actual=txn.current_index, i=counter, ) ) if txn.last_index != last_index: yield ( 'Transaction {i} has invalid last index value ' '(expected {expected}, actual {actual}).'.format( actual=txn.last_index, expected=last_index, i=counter, ) ) counter += 1 # Bundle must be balanced (spends must match inputs). if balance != 0: yield ( 'Bundle has invalid balance ' '(expected 0, actual {actual}).'.format( actual=balance, ) ) # Signature validation is only meaningful if the transactions # are otherwise valid. if not self._errors: signature_validation_queue = [] # type: List[List[Transaction]] for group in grouped_transactions: # Signature validation only applies to inputs. if group[0].value >= 0: continue validate_group_signature = True for j, txn in enumerate(group): if (j > 0) and (txn.value != 0): # Input is malformed; signature fragments after # the first should have zero value. yield ( 'Transaction {i} has invalid value ' '(expected 0, actual {actual}).'.format( actual=txn.value, # If we get to this point, we know that # the ``current_index`` value for each # transaction can be trusted. i=txn.current_index, ) ) # We won't be able to validate the signature, # but continue anyway, so that we can check that # the other transactions in the group have the # correct ``value``. validate_group_signature = False continue # After collecting the signature fragment from each # transaction in the group, queue them up to run through # the validator. # # We have to perform signature validation separately so # that we can try different algorithms (for # backwards-compatibility). # # References: # # - https://github.com/iotaledger/kerl#kerl-integration-in-iota if validate_group_signature: signature_validation_queue.append(group) # Once we've finished checking the attributes from each # transaction in the bundle, go back and validate # signatures. if signature_validation_queue: # ``yield from`` is an option here, but for # compatibility with Python 2 clients, we will do it the # old-fashioned way. for error in self._get_bundle_signature_errors( signature_validation_queue ): yield error
Validates the signature fragments in the bundle.
def _get_bundle_signature_errors(self, groups): # type: (List[List[Transaction]]) -> List[Text] """ Validates the signature fragments in the bundle. :return: List of error messages. If empty, signature fragments are valid. """ # Start with the currently-supported hash algo. current_pos = None current_errors = [] for current_pos, group in enumerate(groups): error = self._get_group_signature_error(group, SUPPORTED_SPONGE) if error: current_errors.append(error) # Pause and retry with the legacy algo. break # If validation failed, then go back and try with the legacy # algo (only applies if we are currently transitioning to a new # algo). if current_errors and LEGACY_SPONGE: for group in groups: # noinspection PyTypeChecker if self._get_group_signature_error(group, LEGACY_SPONGE): # Legacy algo doesn't work, either; no point in # continuing. break else: # If we get here, then we were able to validate the # signature fragments successfully using the legacy # algorithm. return [] # If we get here, then validation also failed when using the # legacy algorithm. # At this point, we know that the bundle is invalid, but we will # continue validating with the supported algorithm anyway, so # that we can return an error message for every invalid input. current_errors.extend(filter(None, ( self._get_group_signature_error(group, SUPPORTED_SPONGE) for group in groups[current_pos + 1:] ))) return current_errors
Validates the signature fragments for a group of transactions using the specified sponge type.
def _get_group_signature_error(group, sponge_type): # type: (List[Transaction], type) -> Optional[Text] """ Validates the signature fragments for a group of transactions using the specified sponge type. Note: this method assumes that the transactions in the group have already passed basic validation (see :py:meth:`_create_validator`). :return: - ``None``: Indicates that the signature fragments are valid. - ``Text``: Error message indicating the fragments are invalid. """ validate_group_signature = validate_signature_fragments( fragments=[txn.signature_message_fragment for txn in group], hash_=group[0].bundle_hash, public_key=group[0].address, sponge_type=sponge_type, ) if validate_group_signature: return None return ( 'Transaction {i} has invalid signature ' '(using {fragments} fragments).'.format( fragments=len(group), i=group[0].current_index, ) )
Recursively traverse the Tangle collecting transactions until we hit a new bundle.
def _traverse_bundle(self, txn_hash, target_bundle_hash=None): # type: (TransactionHash, Optional[BundleHash]) -> List[Transaction] """ Recursively traverse the Tangle, collecting transactions until we hit a new bundle. This method is (usually) faster than ``findTransactions``, and it ensures we don't collect transactions from replayed bundles. """ trytes = ( GetTrytesCommand(self.adapter)(hashes=[txn_hash])['trytes'] ) # type: List[TryteString] if not trytes: raise with_context( exc=BadApiResponse( 'Bundle transactions not visible ' '(``exc.context`` has more info).', ), context={ 'transaction_hash': txn_hash, 'target_bundle_hash': target_bundle_hash, }, ) transaction = Transaction.from_tryte_string(trytes[0]) if (not target_bundle_hash) and transaction.current_index: raise with_context( exc=BadApiResponse( '``_traverse_bundle`` started with a non-tail transaction ' '(``exc.context`` has more info).', ), context={ 'transaction_object': transaction, 'target_bundle_hash': target_bundle_hash, }, ) if target_bundle_hash: if target_bundle_hash != transaction.bundle_hash: # We've hit a different bundle; we can stop now. return [] else: target_bundle_hash = transaction.bundle_hash if transaction.current_index == transaction.last_index == 0: # Bundle only has one transaction. return [transaction] # Recursively follow the trunk transaction, to fetch the next # transaction in the bundle. return [transaction] + self._traverse_bundle( txn_hash=transaction.trunk_transaction_hash, target_bundle_hash=target_bundle_hash )
Starts the REPL.
def _start_repl(api): # type: (Iota) -> None """ Starts the REPL. """ banner = ( 'IOTA API client for {uri} ({testnet}) ' 'initialized as variable `api`.\n' 'Type `help(api)` for list of API commands.'.format( testnet='testnet' if api.testnet else 'mainnet', uri=api.adapter.get_uri(), ) ) scope_vars = {'api': api} try: # noinspection PyUnresolvedReferences import IPython except ImportError: # IPython not available; use regular Python REPL. from code import InteractiveConsole InteractiveConsole(locals=scope_vars).interact(banner, '') else: print(banner) IPython.start_ipython(argv=[], user_ns=scope_vars)
Generates a random seed using a CSPRNG.
def random(cls, length=Hash.LEN): """ Generates a random seed using a CSPRNG. :param length: Length of seed, in trytes. For maximum security, this should always be set to 81, but you can change it if you're 110% sure you know what you're doing. See https://iota.stackexchange.com/q/249 for more info. """ return super(Seed, cls).random(length)
Generates the digest used to do the actual signing.
def get_digest(self): # type: () -> Digest """ Generates the digest used to do the actual signing. Signing keys can have variable length and tend to be quite long, which makes them not-well-suited for use in crypto algorithms. The digest is essentially the result of running the signing key through a PBKDF, yielding a constant-length hash that can be used for crypto. """ hashes_per_fragment = FRAGMENT_LENGTH // Hash.LEN key_fragments = self.iter_chunks(FRAGMENT_LENGTH) # The digest will contain one hash per key fragment. digest = [0] * HASH_LENGTH * len(key_fragments) # Iterate over each fragment in the key. for i, fragment in enumerate(key_fragments): fragment_trits = fragment.as_trits() key_fragment = [0] * FRAGMENT_LENGTH hash_trits = [] # Within each fragment, iterate over one hash at a time. for j in range(hashes_per_fragment): hash_start = j * HASH_LENGTH hash_end = hash_start + HASH_LENGTH hash_trits = fragment_trits[hash_start:hash_end] for k in range(26): sponge = Kerl() sponge.absorb(hash_trits) sponge.squeeze(hash_trits) key_fragment[hash_start:hash_end] = hash_trits # After processing all of the hashes in the fragment, # generate a final hash and append it to the digest. # # Note that we will do this once per fragment in the key, so # the longer the key is, the longer the digest will be. sponge = Kerl() sponge.absorb(key_fragment) sponge.squeeze(hash_trits) fragment_hash_start = i * HASH_LENGTH fragment_hash_end = fragment_hash_start + HASH_LENGTH digest[fragment_hash_start:fragment_hash_end] = hash_trits return Digest(TryteString.from_trits(digest), self.key_index)
Signs the inputs starting at the specified index.
def sign_input_transactions(self, bundle, start_index): # type: (Bundle, int) -> None """ Signs the inputs starting at the specified index. :param bundle: The bundle that contains the input transactions to sign. :param start_index: The index of the first input transaction. If necessary, the resulting signature will be split across subsequent transactions automatically. """ if not bundle.hash: raise with_context( exc=ValueError('Cannot sign inputs without a bundle hash!'), context={ 'bundle': bundle, 'key_index': self.key_index, 'start_index': start_index, }, ) from iota.crypto.signing import SignatureFragmentGenerator signature_fragment_generator = ( SignatureFragmentGenerator(self, bundle.hash) ) # We can only fit one signature fragment into each transaction, # so we have to split the entire signature. for j in range(self.security_level): # Do lots of validation before we attempt to sign the # transaction, and attach lots of context info to any # exception. # # This method is likely to be invoked at a very low level in # the application, so if anything goes wrong, we want to # make sure it's as easy to troubleshoot as possible! try: txn = bundle[start_index + j] except IndexError as e: raise with_context( exc=e, context={ 'bundle': bundle, 'key_index': self.key_index, 'current_index': start_index + j, }, ) # Only inputs can be signed. if txn.value > 0: raise with_context( exc=ValueError( 'Attempting to sign non-input transaction #{i} ' '(value={value}).'.format( i=txn.current_index, value=txn.value, ), ), context={ 'bundle': bundle, 'key_index': self.key_index, 'start_index': start_index, }, ) if txn.signature_message_fragment: raise with_context( exc=ValueError( 'Attempting to sign input transaction #{i}, ' 'but it has a non-empty fragment ' '(is it already signed?).'.format( i=txn.current_index, ), ), context={ 'bundle': bundle, 'key_index': self.key_index, 'start_index': start_index, }, ) txn.signature_message_fragment = next(signature_fragment_generator)
Makes JSON - serializable objects play nice with IPython s default pretty - printer.
def _repr_pretty_(self, p, cycle): """ Makes JSON-serializable objects play nice with IPython's default pretty-printer. Sadly, :py:func:`pprint.pprint` does not have a similar mechanism. References: - http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html - :py:meth:`IPython.lib.pretty.RepresentationPrinter.pretty` - :py:func:`pprint._safe_repr` """ class_name = type(self).__name__ if cycle: p.text('{cls}(...)'.format( cls=class_name, )) else: with p.group( len(class_name) + 1, '{cls}('.format(cls=class_name), ')', ): prepared = self.as_json_compatible() if isinstance(prepared, Mapping): p.text('**') elif isinstance(prepared, Iterable): p.text('*') p.pretty(prepared)
Absorb trits into the sponge from a buffer.
def absorb(self, trits, offset=0, length=None): # type: (MutableSequence[int], int, Optional[int]) -> None """ Absorb trits into the sponge from a buffer. :param trits: Buffer that contains the trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``. """ # Pad input if necessary, so that it can be divided evenly into # hashes. # Note that this operation creates a COPY of ``trits``; the # incoming buffer is not modified! pad = ((len(trits) % TRIT_HASH_LENGTH) or TRIT_HASH_LENGTH) trits += [0] * (TRIT_HASH_LENGTH - pad) if length is None: length = len(trits) if length < 1: raise with_context( exc=ValueError('Invalid length passed to ``absorb``.'), context={ 'trits': trits, 'offset': offset, 'length': length, }, ) while offset < length: stop = min(offset + TRIT_HASH_LENGTH, length) # If we're copying over a full chunk, zero last trit. if stop - offset == TRIT_HASH_LENGTH: trits[stop - 1] = 0 signed_nums = conv.convertToBytes(trits[offset:stop]) # Convert signed bytes into their equivalent unsigned # representation, in order to use Python's built-in bytes # type. unsigned_bytes = bytearray( conv.convert_sign(b) for b in signed_nums ) self.k.update(unsigned_bytes) offset += TRIT_HASH_LENGTH
Squeeze trits from the sponge into a buffer.
def squeeze(self, trits, offset=0, length=None): # type: (MutableSequence[int], int, Optional[int]) -> None """ Squeeze trits from the sponge into a buffer. :param trits: Buffer that will hold the squeezed trits. IMPORTANT: If ``trits`` is too small, it will be extended! :param offset: Starting offset in ``trits``. :param length: Number of trits to squeeze from the sponge. If not specified, defaults to :py:data:`TRIT_HASH_LENGTH` (i.e., by default, we will try to squeeze exactly 1 hash). """ # Pad input if necessary, so that it can be divided evenly into # hashes. pad = ((len(trits) % TRIT_HASH_LENGTH) or TRIT_HASH_LENGTH) trits += [0] * (TRIT_HASH_LENGTH - pad) if length is None: # By default, we will try to squeeze one hash. # Note that this is different than ``absorb``. length = len(trits) or TRIT_HASH_LENGTH if length < 1: raise with_context( exc=ValueError('Invalid length passed to ``squeeze``.'), context={ 'trits': trits, 'offset': offset, 'length': length, }, ) while offset < length: unsigned_hash = self.k.digest() if PY2: unsigned_hash = map(ord, unsigned_hash) # type: ignore signed_hash = [conv.convert_sign(b) for b in unsigned_hash] trits_from_hash = conv.convertToTrits(signed_hash) trits_from_hash[TRIT_HASH_LENGTH - 1] = 0 stop = min(TRIT_HASH_LENGTH, length - offset) trits[offset:offset + stop] = trits_from_hash[0:stop] flipped_bytes = bytearray( conv.convert_sign(~b) for b in unsigned_hash ) # Reset internal state before feeding back in. self.reset() self.k.update(flipped_bytes) offset += TRIT_HASH_LENGTH
Attaches a context value to an Exception.
def with_context(exc, context): # type: (Exception, dict) -> Exception """ Attaches a ``context`` value to an Exception. Before: .. code-block:: python exc = Exception('Frog blast the vent core!') exc.context = { ... } raise exc After: .. code-block:: python raise with_context( exc=Exception('Frog blast the vent core!'), context={ ... }, ) """ if not hasattr(exc, 'context'): exc.context = {} exc.context.update(context) return exc
Generates a filter chain for validating a security level.
def SecurityLevel(): """ Generates a filter chain for validating a security level. """ return ( f.Type(int) | f.Min(1) | f.Max(3) | f.Optional(default=AddressGenerator.DEFAULT_SECURITY_LEVEL) )
Returns a TryteString representation of the transaction.
def as_tryte_string(self): # type: () -> TryteString """ Returns a TryteString representation of the transaction. """ if not self.bundle_hash: raise with_context( exc=RuntimeError( 'Cannot get TryteString representation of {cls} instance ' 'without a bundle hash; call ``bundle.finalize()`` first ' '(``exc.context`` has more info).'.format( cls=type(self).__name__, ), ), context={ 'transaction': self, }, ) return super(ProposedTransaction, self).as_tryte_string()
Increments the transaction s legacy tag used to fix insecure bundle hashes when finalizing a bundle.
def increment_legacy_tag(self): """ Increments the transaction's legacy tag, used to fix insecure bundle hashes when finalizing a bundle. References: - https://github.com/iotaledger/iota.lib.py/issues/84 """ self._legacy_tag = ( Tag.from_trits(add_trits(self.legacy_tag.as_trits(), [1])) )
Determines the most relevant tag for the bundle.
def tag(self): # type: () -> Tag """ Determines the most relevant tag for the bundle. """ for txn in reversed(self): # type: ProposedTransaction if txn.tag: return txn.tag return Tag(b'')
Adds a transaction to the bundle.
def add_transaction(self, transaction): # type: (ProposedTransaction) -> None """ Adds a transaction to the bundle. If the transaction message is too long, it will be split automatically into multiple transactions. """ if self.hash: raise RuntimeError('Bundle is already finalized.') if transaction.value < 0: raise ValueError('Use ``add_inputs`` to add inputs to the bundle.') self._transactions.append(ProposedTransaction( address=transaction.address, value=transaction.value, tag=transaction.tag, message=transaction.message[:Fragment.LEN], timestamp=transaction.timestamp, )) # If the message is too long to fit in a single transactions, # it must be split up into multiple transactions so that it will # fit. fragment = transaction.message[Fragment.LEN:] while fragment: self._transactions.append(ProposedTransaction( address=transaction.address, value=0, tag=transaction.tag, message=fragment[:Fragment.LEN], timestamp=transaction.timestamp, )) fragment = fragment[Fragment.LEN:]
Adds inputs to spend in the bundle.
def add_inputs(self, inputs): # type: (Iterable[Address]) -> None """ Adds inputs to spend in the bundle. Note that each input may require multiple transactions, in order to hold the entire signature. :param inputs: Addresses to use as the inputs for this bundle. .. important:: Must have ``balance`` and ``key_index`` attributes! Use :py:meth:`iota.api.get_inputs` to prepare inputs. """ if self.hash: raise RuntimeError('Bundle is already finalized.') for addy in inputs: if addy.balance is None: raise with_context( exc=ValueError( 'Address {address} has null ``balance`` ' '(``exc.context`` has more info).'.format( address=addy, ), ), context={ 'address': addy, }, ) if addy.key_index is None: raise with_context( exc=ValueError( 'Address {address} has null ``key_index`` ' '(``exc.context`` has more info).'.format( address=addy, ), ), context={ 'address': addy, }, ) self._create_input_transactions(addy)
Finalizes the bundle preparing it to be attached to the Tangle.
def finalize(self): # type: () -> None """ Finalizes the bundle, preparing it to be attached to the Tangle. """ if self.hash: raise RuntimeError('Bundle is already finalized.') if not self: raise ValueError('Bundle has no transactions.') # Quick validation. balance = self.balance if balance < 0: if self.change_address: self.add_transaction(ProposedTransaction( address=self.change_address, value=-balance, tag=self.tag, )) else: raise ValueError( 'Bundle has unspent inputs (balance: {balance}); ' 'use ``send_unspent_inputs_to`` to create ' 'change transaction.'.format( balance=balance, ), ) elif balance > 0: raise ValueError( 'Inputs are insufficient to cover bundle spend ' '(balance: {balance}).'.format( balance=balance, ), ) # Generate bundle hash. while True: sponge = Kerl() last_index = len(self) - 1 for i, txn in enumerate(self): txn.current_index = i txn.last_index = last_index sponge.absorb(txn.get_signature_validation_trytes().as_trits()) bundle_hash_trits = [0] * HASH_LENGTH sponge.squeeze(bundle_hash_trits) bundle_hash = BundleHash.from_trits(bundle_hash_trits) # Check that we generated a secure bundle hash. # https://github.com/iotaledger/iota.lib.py/issues/84 if any(13 in part for part in normalize(bundle_hash)): # Increment the legacy tag and try again. tail_transaction = ( self.tail_transaction ) # type: ProposedTransaction tail_transaction.increment_legacy_tag() else: break # Copy bundle hash to individual transactions. for txn in self: txn.bundle_hash = bundle_hash # Initialize signature/message fragment. txn.signature_message_fragment = Fragment(txn.message or b'')
Sign inputs in a finalized bundle.
def sign_inputs(self, key_generator): # type: (KeyGenerator) -> None """ Sign inputs in a finalized bundle. """ if not self.hash: raise RuntimeError('Cannot sign inputs until bundle is finalized.') # Use a counter for the loop so that we can skip ahead as we go. i = 0 while i < len(self): txn = self[i] if txn.value < 0: # In order to sign the input, we need to know the index # of the private key used to generate it. if txn.address.key_index is None: raise with_context( exc=ValueError( 'Unable to sign input {input}; ' '``key_index`` is None ' '(``exc.context`` has more info).'.format( input=txn.address, ), ), context={ 'transaction': txn, }, ) if txn.address.security_level is None: raise with_context( exc=ValueError( 'Unable to sign input {input}; ' '``security_level`` is None ' '(``exc.context`` has more info).'.format( input=txn.address, ), ), context={ 'transaction': txn, }, ) self.sign_input_at(i, key_generator.get_key_for(txn.address)) i += txn.address.security_level else: # No signature needed (nor even possible, in some # cases); skip this transaction. i += 1
Signs the input at the specified index.
def sign_input_at(self, start_index, private_key): # type: (int, PrivateKey) -> None """ Signs the input at the specified index. :param start_index: The index of the first input transaction. If necessary, the resulting signature will be split across multiple transactions automatically (i.e., if an input has ``security_level=2``, you still only need to call :py:meth:`sign_input_at` once). :param private_key: The private key that will be used to generate the signature. .. important:: Be sure that the private key was generated using the correct seed, or the resulting signature will be invalid! """ if not self.hash: raise RuntimeError('Cannot sign inputs until bundle is finalized.') private_key.sign_input_transactions(self, start_index)
Creates transactions for the specified input address.
def _create_input_transactions(self, addy): # type: (Address) -> None """ Creates transactions for the specified input address. """ self._transactions.append(ProposedTransaction( address=addy, tag=self.tag, # Spend the entire address balance; if necessary, we will # add a change transaction to the bundle. value=-addy.balance, )) # Signatures require additional transactions to store, due to # transaction length limit. # Subtract 1 to account for the transaction we just added. for _ in range(addy.security_level - 1): self._transactions.append(ProposedTransaction( address=addy, tag=self.tag, # Note zero value; this is a meta transaction. value=0, ))
Converts between any two standard units of iota.
def convert_value_to_standard_unit(value, symbol='i'): # type: (Text, Text) -> float """ Converts between any two standard units of iota. :param value: Value (affixed) to convert. For example: '1.618 Mi'. :param symbol: Unit symbol of iota to convert to. For example: 'Gi'. :return: Float as units of given symbol to convert to. """ try: # Get input value value_tuple = value.split() amount = float(value_tuple[0]) except (ValueError, IndexError, AttributeError): raise with_context( ValueError('Value to convert is not valid.'), context={ 'value': value, }, ) try: # Set unit symbols and find factor/multiplier. unit_symbol_from = value_tuple[1] unit_factor_from = float(STANDARD_UNITS[unit_symbol_from]) unit_factor_to = float(STANDARD_UNITS[symbol]) except (KeyError, IndexError): # Invalid symbol or no factor raise with_context( ValueError('Invalid IOTA unit.'), context={ 'value': value, 'symbol': symbol, }, ) return amount * (unit_factor_from / unit_factor_to)
modular_squareroot_in_FQ2 ( x ) returns the value y such that y ** 2 % q == x and None if this is not possible. In cases where there are two solutions the value with higher imaginary component is favored ; if both solutions have equal imaginary component the value with higher real component is favored.
def modular_squareroot_in_FQ2(value: FQ2) -> FQ2: """ ``modular_squareroot_in_FQ2(x)`` returns the value ``y`` such that ``y**2 % q == x``, and None if this is not possible. In cases where there are two solutions, the value with higher imaginary component is favored; if both solutions have equal imaginary component the value with higher real component is favored. """ candidate_squareroot = value ** ((FQ2_order + 8) // 16) check = candidate_squareroot ** 2 / value if check in eighth_roots_of_unity[::2]: x1 = candidate_squareroot / eighth_roots_of_unity[eighth_roots_of_unity.index(check) // 2] x2 = -x1 x1_re, x1_im = x1.coeffs x2_re, x2_im = x2.coeffs return x1 if (x1_im > x2_im or (x1_im == x2_im and x1_re > x2_re)) else x2 return None
A compressed point is a 384 - bit integer with the bit order ( c_flag b_flag a_flag x ) where the c_flag bit is always set to 1 the b_flag bit indicates infinity when set to 1 the a_flag bit helps determine the y - coordinate when decompressing and the 381 - bit integer x is the x - coordinate of the point.
def compress_G1(pt: G1Uncompressed) -> G1Compressed: """ A compressed point is a 384-bit integer with the bit order (c_flag, b_flag, a_flag, x), where the c_flag bit is always set to 1, the b_flag bit indicates infinity when set to 1, the a_flag bit helps determine the y-coordinate when decompressing, and the 381-bit integer x is the x-coordinate of the point. """ if is_inf(pt): # Set c_flag = 1 and b_flag = 1. leave a_flag = x = 0 return G1Compressed(POW_2_383 + POW_2_382) else: x, y = normalize(pt) # Record y's leftmost bit to the a_flag a_flag = (y.n * 2) // q # Set c_flag = 1 and b_flag = 0 return G1Compressed(x.n + a_flag * POW_2_381 + POW_2_383)
Recovers x and y coordinates from the compressed point.
def decompress_G1(z: G1Compressed) -> G1Uncompressed: """ Recovers x and y coordinates from the compressed point. """ # b_flag == 1 indicates the infinity point b_flag = (z % POW_2_383) // POW_2_382 if b_flag == 1: return Z1 x = z % POW_2_381 # Try solving y coordinate from the equation Y^2 = X^3 + b # using quadratic residue y = pow((x**3 + b.n) % q, (q + 1) // 4, q) if pow(y, 2, q) != (x**3 + b.n) % q: raise ValueError( "The given point is not on G1: y**2 = x**3 + b" ) # Choose the y whose leftmost bit is equal to the a_flag a_flag = (z % POW_2_382) // POW_2_381 if (y * 2) // q != a_flag: y = q - y return (FQ(x), FQ(y), FQ(1))
The compressed point ( z1 z2 ) has the bit order: z1: ( c_flag1 b_flag1 a_flag1 x1 ) z2: ( c_flag2 b_flag2 a_flag2 x2 ) where - c_flag1 is always set to 1 - b_flag1 indicates infinity when set to 1 - a_flag1 helps determine the y - coordinate when decompressing - a_flag2 b_flag2 and c_flag2 are always set to 0
def compress_G2(pt: G2Uncompressed) -> G2Compressed: """ The compressed point (z1, z2) has the bit order: z1: (c_flag1, b_flag1, a_flag1, x1) z2: (c_flag2, b_flag2, a_flag2, x2) where - c_flag1 is always set to 1 - b_flag1 indicates infinity when set to 1 - a_flag1 helps determine the y-coordinate when decompressing, - a_flag2, b_flag2, and c_flag2 are always set to 0 """ if not is_on_curve(pt, b2): raise ValueError( "The given point is not on the twisted curve over FQ**2" ) if is_inf(pt): return G2Compressed((POW_2_383 + POW_2_382, 0)) x, y = normalize(pt) x_re, x_im = x.coeffs y_re, y_im = y.coeffs # Record the leftmost bit of y_im to the a_flag1 # If y_im happens to be zero, then use the bit of y_re a_flag1 = (y_im * 2) // q if y_im > 0 else (y_re * 2) // q # Imaginary part of x goes to z1, real part goes to z2 # c_flag1 = 1, b_flag1 = 0 z1 = x_im + a_flag1 * POW_2_381 + POW_2_383 # a_flag2 = b_flag2 = c_flag2 = 0 z2 = x_re return G2Compressed((z1, z2))
Recovers x and y coordinates from the compressed point ( z1 z2 ).
def decompress_G2(p: G2Compressed) -> G2Uncompressed: """ Recovers x and y coordinates from the compressed point (z1, z2). """ z1, z2 = p # b_flag == 1 indicates the infinity point b_flag1 = (z1 % POW_2_383) // POW_2_382 if b_flag1 == 1: return Z2 x1 = z1 % POW_2_381 x2 = z2 # x1 is the imaginary part, x2 is the real part x = FQ2([x2, x1]) y = modular_squareroot_in_FQ2(x**3 + b2) if y is None: raise ValueError("Failed to find a modular squareroot") # Choose the y whose leftmost bit of the imaginary part is equal to the a_flag1 # If y_im happens to be zero, then use the bit of y_re a_flag1 = (z1 % POW_2_382) // POW_2_381 y_re, y_im = y.coeffs if (y_im > 0 and (y_im * 2) // q != a_flag1) or (y_im == 0 and (y_re * 2) // q != a_flag1): y = FQ2((y * -1).coeffs) if not is_on_curve((x, y, FQ2([1, 0])), b2): raise ValueError( "The given point is not on the twisted curve over FQ**2" ) return (x, y, FQ2([1, 0]))
Extended euclidean algorithm to find modular inverses for integers
def prime_field_inv(a: int, n: int) -> int: """ Extended euclidean algorithm to find modular inverses for integers """ if a == 0: return 0 lm, hm = 1, 0 low, high = a % n, n while low > 1: r = high // low nm, new = hm - lm * r, high - low * r lm, low, hm, high = nm, new, lm, low return lm % n
Load a lexicon from a JSON file.
def from_json_file(cls, filename): """ Load a lexicon from a JSON file. Args: filename (str): The path to a JSON dump. """ with open(filename, 'r') as fp: return cls(json.load(fp))
Given a string and a category finds and combines words into groups based on their proximity.
def find_word_groups(self, text, category, proximity=2): """ Given a string and a category, finds and combines words into groups based on their proximity. Args: text (str): Some text. tokens (list): A list of regex strings. Returns: list. The combined strings it found. Example: COLOURS = [r"red(?:dish)?", r"grey(?:ish)?", r"green(?:ish)?"] s = 'GREYISH-GREEN limestone with RED or GREY sandstone.' find_word_groups(s, COLOURS) --> ['greyish green', 'red', 'grey'] """ f = re.IGNORECASE words = getattr(self, category) regex = re.compile(r'(\b' + r'\b|\b'.join(words) + r'\b)', flags=f) candidates = regex.finditer(text) starts, ends = [], [] groups = [] for item in candidates: starts.append(item.span()[0]) ends.append(item.span()[1]) groups.append(item.group().lower()) new_starts = [] # As a check only. new_groups = [] # This is what I want. skip = False for i, g in enumerate(groups): if skip: skip = False continue if (i < len(groups)-1) and (starts[i+1]-ends[i] <= proximity): if g[-1] == '-': sep = '' # Don't insert spaces after hyphens. else: sep = ' ' new_groups.append(g + sep + groups[i+1]) new_starts.append(starts[i]) skip = True else: if g not in new_groups: new_groups.append(g) new_starts.append(starts[i]) skip = False return new_groups
Given a string and a dict of synonyms returns the preferred word. Case insensitive.
def find_synonym(self, word): """ Given a string and a dict of synonyms, returns the 'preferred' word. Case insensitive. Args: word (str): A word. Returns: str: The preferred word, or the input word if not found. Example: >>> syn = {'snake': ['python', 'adder']} >>> find_synonym('adder', syn) 'snake' >>> find_synonym('rattler', syn) 'rattler' TODO: Make it handle case, returning the same case it received. """ if word and self.synonyms: # Make the reverse look-up table. reverse_lookup = {} for k, v in self.synonyms.items(): for i in v: reverse_lookup[i.lower()] = k.lower() # Now check words against this table. if word.lower() in reverse_lookup: return reverse_lookup[word.lower()] return word
Parse a piece of text and replace any abbreviations with their full word equivalents. Uses the lexicon. abbreviations dictionary to find abbreviations.
def expand_abbreviations(self, text): """ Parse a piece of text and replace any abbreviations with their full word equivalents. Uses the lexicon.abbreviations dictionary to find abbreviations. Args: text (str): The text to parse. Returns: str: The text with abbreviations replaced. """ if not self.abbreviations: raise LexiconError("No abbreviations in lexicon.") def chunks(data, SIZE=25): """ Regex only supports 100 groups for munging callbacks. So we have to chunk the abbreviation dicitonary. """ it = iter(data) for i in range(0, len(data), SIZE): yield {k: data[k] for k in islice(it, SIZE)} def cb(g): """Regex callback""" return self.abbreviations.get(g.group(0)) or g.group(0) # Special cases. # TODO: We should handle these with a special set of # replacements that are made before the others. text = re.sub(r'w/', r'wi', text) # Main loop. for subdict in chunks(self.abbreviations): regex = r'(\b' + r'\b)|(\b'.join(subdict.keys()) + r'\b)' text = re.sub(regex, cb, text) return text
Takes a piece of text representing a lithologic description for one component e. g. Red vf - f sandstone and turns it into a dictionary of attributes.
def get_component(self, text, required=False, first_only=True): """ Takes a piece of text representing a lithologic description for one component, e.g. "Red vf-f sandstone" and turns it into a dictionary of attributes. TODO: Generalize this so that we can use any types of word, as specified in the lexicon. """ component = {} for i, (category, words) in enumerate(self.__dict__.items()): # There is probably a more elegant way to do this. if category in SPECIAL: # There are special entries in the lexicon. continue groups = self.find_word_groups(text, category) if groups and first_only: groups = groups[:1] elif groups: # groups = groups pass else: groups = [None] if required: with warnings.catch_warnings(): warnings.simplefilter("always") w = "No lithology in lexicon matching '{0}'" warnings.warn(w.format(text)) filtered = [self.find_synonym(i) for i in groups] if first_only: component[category] = filtered[0] else: component[category] = filtered return component
Split a description into parts each of which can be turned into a single component.
def split_description(self, text): """ Split a description into parts, each of which can be turned into a single component. """ # Protect some special sequences. t = re.sub(r'(\d) ?in\. ', r'\1 inch ', text) # Protect. t = re.sub(r'(\d) ?ft\. ', r'\1 feet ', t) # Protect. # Transform all part delimiters to first splitter. words = getattr(self, 'splitters') try: splitter = words[0].strip() except: splitter = 'with' t = re.sub(r'\,?\;?\.? ?((under)?(less than)? \d+%) (?=\w)', r' '+splitter+' \1 ', t) # Split. f = re.IGNORECASE pattern = re.compile(r'(?:' + r'|'.join(words) + r')', flags=f) parts = filter(None, pattern.split(t)) return [i.strip() for i in parts]
Lists the categories in the lexicon except the optional categories.
def categories(self): """ Lists the categories in the lexicon, except the optional categories. Returns: list: A list of strings of category names. """ keys = [k for k in self.__dict__.keys() if k not in SPECIAL] return keys
Jupyter Notebook magic repr function.
def _repr_html_(self): """ Jupyter Notebook magic repr function. """ rows, c = '', '' s = '<tr><td><strong>{k}</strong></td><td style="{stl}">{v}</td></tr>' for k, v in self.__dict__.items(): if k == '_colour': k = 'colour' c = utils.text_colour_for_hex(v) style = 'color:{}; background-color:{}'.format(c, v) else: style = 'color:black; background-color:white' if k == 'component': try: v = v._repr_html_() except AttributeError: v = v.__repr__() rows += s.format(k=k, v=v, stl=style) html = '<table>{}</table>'.format(rows) return html
Jupyter Notebook magic repr function as a row – used by Legend. _repr_html_ ().
def _repr_html_row_(self, keys): """ Jupyter Notebook magic repr function as a row – used by ``Legend._repr_html_()``. """ tr, th, c = '', '', '' r = '<td style="{stl}">{v}</td>' h = '<th>{k}</th>' for k in keys: v = self.__dict__.get(k) if k == '_colour': k = 'colour' c = utils.text_colour_for_hex(v) style = 'color:{}; background-color:{}'.format(c, v) else: style = 'color:black; background-color:white' if k == 'component': try: v = v._repr_html_() except AttributeError: v = v.__repr__() tr += r.format(v=v, stl=style) th += h.format(k=k) return th, tr
Returns a minimal Decor with a random colour.
def random(cls, component): """ Returns a minimal Decor with a random colour. """ colour = random.sample([i for i in range(256)], 3) return cls({'colour': colour, 'component': component, 'width': 1.0})
Make a simple plot of the Decor.
def plot(self, fmt=None, fig=None, ax=None): """ Make a simple plot of the Decor. Args: fmt (str): A Python format string for the component summaries. fig (Pyplot figure): A figure, optional. Use either fig or ax, not both. ax (Pyplot axis): An axis, optional. Use either fig or ax, not both. Returns: fig or ax or None. If you pass in an ax, you get it back. If you pass in a fig, you get it. If you pass nothing, the function creates a plot object as a side-effect. """ u = 4 # aspect ratio of decor plot v = 0.25 # ratio of decor tile width r = None if (fig is None) and (ax is None): fig = plt.figure(figsize=(u, 1)) else: r = fig if ax is None: ax = fig.add_axes([0.1*v, 0.1, 0.8*v, 0.8]) else: r = ax rect1 = patches.Rectangle((0, 0), u*v, u*v, color=self.colour, lw=1, hatch=self.hatch, ec='k') ax.add_patch(rect1) ax.text(1.0+0.1*v*u, u*v*0.5, self.component.summary(fmt=fmt), fontsize=max(u, 15), verticalalignment='center', horizontalalignment='left') ax.set_xlim([0, u*v]) ax.set_ylim([0, u*v]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.invert_yaxis() return r
Jupyter Notebook magic repr function.
def _repr_html_(self): """ Jupyter Notebook magic repr function. """ all_keys = list(set(itertools.chain(*[d.keys for d in self]))) rows = '' for decor in self: th, tr = decor._repr_html_row_(keys=all_keys) rows += '<tr>{}</tr>'.format(tr) header = '<tr>{}</tr>'.format(th) html = '<table>{}{}</table>'.format(header, rows) return html
Generate a default legend.
def builtin(cls, name): """ Generate a default legend. Args: name (str): The name of the legend you want. Not case sensitive. 'nsdoe': Nova Scotia Dept. of Energy 'canstrat': Canstrat 'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2 'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1 'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3 'sgmc': USGS State Geologic Map Compilation Default 'nagmdm__6_2'. Returns: Legend: The legend stored in `defaults.py`. """ names = { 'nsdoe': LEGEND__NSDOE, 'canstrat': LEGEND__Canstrat, 'nagmdm__6_2': LEGEND__NAGMDM__6_2, 'nagmdm__6_1': LEGEND__NAGMDM__6_1, 'nagmdm__4_3': LEGEND__NAGMDM__4_3, 'sgmc': LEGEND__SGMC, } return cls.from_csv(text=names[name.lower()])
Generate a default timescale legend. No arguments.
def builtin_timescale(cls, name): """ Generate a default timescale legend. No arguments. Returns: Legend: The timescale stored in `defaults.py`. """ names = { 'isc': TIMESCALE__ISC, 'usgs_isc': TIMESCALE__USGS_ISC, 'dnag': TIMESCALE__DNAG, } return cls.from_csv(text=names[name.lower()])
Generate a random legend for a given list of components.
def random(cls, components, width=False, colour=None): """ Generate a random legend for a given list of components. Args: components (list or Striplog): A list of components. If you pass a Striplog, it will use the primary components. If you pass a component on its own, you will get a random Decor. width (bool): Also generate widths for the components, based on the order in which they are encountered. colour (str): If you want to give the Decors all the same colour, provide a hex string. Returns: Legend or Decor: A legend (or Decor) with random colours. TODO: It might be convenient to have a partial method to generate an 'empty' legend. Might be an easy way for someone to start with a template, since it'll have the components in it already. """ try: # Treating as a Striplog. list_of_Decors = [Decor.random(c) for c in [i[0] for i in components.unique if i[0]] ] except: try: list_of_Decors = [Decor.random(c) for c in components.copy()] except: # It's a single component. list_of_Decors = [Decor.random(components)] if colour is not None: for d in list_of_Decors: d.colour = colour if width: for i, d in enumerate(list_of_Decors): d.width = i + 1 return cls(list_of_Decors)
A slightly easier way to make legends from images.
def from_image(cls, filename, components, ignore=None, col_offset=0.1, row_offset=2): """ A slightly easier way to make legends from images. Args: filename (str) components (list) ignore (list): Colours to ignore, e.g. "#FFFFFF" to ignore white. col_offset (Number): If < 1, interpreted as proportion of way across the image. If > 1, interpreted as pixels from left. row_offset (int): Number of pixels to skip at the top of each interval. """ if ignore is None: ignore = [] rgb = utils.loglike_from_image(filename, offset=col_offset) loglike = np.array([utils.rgb_to_hex(t) for t in rgb]) # Get the pixels and colour values at 'tops' (i.e. changes). _, hexes = utils.tops_from_loglike(loglike, offset=row_offset) # Reduce to unique colours. hexes_reduced = [] for h in hexes: if h not in hexes_reduced: if h not in ignore: hexes_reduced.append(h) list_of_Decors = [] for i, c in enumerate(components): d = Decor({'colour': hexes_reduced[i], 'component': c}) list_of_Decors.append(d) return cls(list_of_Decors)
Read CSV text and generate a Legend.
def from_csv(cls, filename=None, text=None): """ Read CSV text and generate a Legend. Args: string (str): The CSV string. In the first row, list the properties. Precede the properties of the component with 'comp ' or 'component '. For example: colour, width, comp lithology, comp colour #FFFFFF, 0, , #F7E9A6, 3, Sandstone, Grey #FF99CC, 2, Anhydrite, ... etc Note: To edit a legend, the easiest thing to do is probably this: - `legend.to_csv()` - Edit the legend, call it `new_legend`. - `legend = Legend.from_csv(text=new_legend)` """ if (filename is None) and (text is None): raise LegendError("You must provide a filename or CSV text.") if (filename is not None): with open(filename, 'r') as f: text = f.read() try: f = StringIO(text) # Python 3 except TypeError: f = StringIO(unicode(text)) # Python 2 r = csv.DictReader(f, skipinitialspace=True) list_of_Decors, components = [], [] kind = 'component' for row in r: d, component = {}, {} for (k, v) in row.items(): if (k in [None, '']): continue if (v in [None, '']): if k.lower() not in ['color', 'colour']: continue if k[:4].lower() == 'comp': prop = ' '.join(k.split()[1:]) if v.lower() == 'true': component[prop] = True elif v.lower() == 'false': component[prop] = False else: try: component[prop] = float(v) except ValueError: component[prop] = v.lower() elif k[:5].lower() == 'curve': prop = ' '.join(k.split()[1:]) component[prop] = v.lower() kind = 'curve' else: try: d[k] = float(v) except ValueError: d[k] = v.lower() this_component = Component(component) d[kind] = this_component # Check for duplicates and warn. if this_component in components: with warnings.catch_warnings(): warnings.simplefilter("always") w = "This legend contains duplicate components." warnings.warn(w) components.append(this_component) # Append to the master list and continue. list_of_Decors.append(Decor(d)) return cls(list_of_Decors)
Renders a legend as a CSV string.
def to_csv(self): """ Renders a legend as a CSV string. No arguments. Returns: str: The legend as a CSV. """ # We can't delegate this to Decor because we need to know the superset # of all Decor properties. There may be lots of blanks. header = [] component_header = [] for row in self: for j in row.__dict__.keys(): if j == '_colour': j = 'colour' header.append(j) for k in row.component.__dict__.keys(): component_header.append(k) header = set(header) component_header = set(component_header) header.remove('component') header_row = '' if 'colour' in header: header_row += 'colour,' header.remove('colour') has_colour = True for item in header: header_row += item + ',' for item in component_header: header_row += 'component ' + item + ',' # Now we have a header row! Phew. # Next we'll go back over the legend and collect everything. result = header_row.strip(',') + '\n' for row in self: if has_colour: result += row.__dict__.get('_colour', '') + ',' for item in header: result += str(row.__dict__.get(item, '')) + ',' for item in component_header: result += str(row.component.__dict__.get(item, '')) + ',' result += '\n' return result
The maximum width of all the Decors in the Legend. This is needed to scale a Legend or Striplog when plotting with widths turned on.
def max_width(self): """ The maximum width of all the Decors in the Legend. This is needed to scale a Legend or Striplog when plotting with widths turned on. """ try: maximum = max([row.width for row in self.__list if row.width is not None]) return maximum except: return 0
Get the decor for a component.
def get_decor(self, c, match_only=None): """ Get the decor for a component. Args: c (component): The component to look up. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: Decor. The matching Decor from the Legend, or None if not found. """ if isinstance(c, Component): if c: if match_only: # Filter the component only those attributes c = Component({k: getattr(c, k, None) for k in match_only}) for decor in self.__list: try: if c == decor.component: return decor except AttributeError: continue else: for decor in self.__list: try: if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic: return decor except AttributeError: continue return Decor({'colour': '#eeeeee', 'component': Component()})
Get the attribute of a component.
def getattr(self, c, attr, default=None, match_only=None): """ Get the attribute of a component. Args: c (component): The component to look up. attr (str): The attribute to get. default (str): What to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: obj. The specified attribute of the matching Decor in the Legend. """ matching_decor = self.get_decor(c, match_only=match_only) try: return getattr(matching_decor, attr) except AttributeError: return default
Get the display colour of a component. Wraps getattr ().
def get_colour(self, c, default='#eeeeee', match_only=None): """ Get the display colour of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (str): The colour to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: str. The hex string of the matching Decor in the Legend. """ return self.getattr(c=c, attr='colour', default=default, match_only=match_only)
Get the display width of a component. Wraps getattr ().
def get_width(self, c, default=0, match_only=None): """ Get the display width of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (float): The width to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: float. The width of the matching Decor in the Legend. """ return self.getattr(c=c, attr='width', default=default, match_only=match_only)
Get the component corresponding to a display colour. This is for generating a Striplog object from a colour image of a striplog.
def get_component(self, colour, tolerance=0, default=None): """ Get the component corresponding to a display colour. This is for generating a Striplog object from a colour image of a striplog. Args: colour (str): The hex colour string to look up. tolerance (float): The colourspace distance within which to match. default (component or None): The component to return in the event of no match. Returns: component. The component best matching the provided colour. """ if not (0 <= tolerance <= np.sqrt(195075)): raise LegendError('Tolerance must be between 0 and 441.67') for decor in self.__list: if colour.lower() == decor.colour: return decor.component # If we're here, we didn't find one yet. r1, g1, b1 = utils.hex_to_rgb(colour) # Start with a best match of black. best_match = '#000000' best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.) # Now compare to each colour in the legend. for decor in self.__list: r2, g2, b2 = decor.rgb distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.) if distance < best_match_dist: best_match = decor.component best_match_dist = distance best_match_colour = decor.colour if best_match_dist <= tolerance: return best_match else: with warnings.catch_warnings(): warnings.simplefilter("always") w = "No match found for {0} ".format(colour.lower()) w += "with tolerance of {0}. Best match is ".format(tolerance) w += "{0}, {1}".format(best_match.summary(), best_match_colour) w += ", d={0}".format(best_match_dist) warnings.warn(w) return default
Make a simple plot of the legend.
def plot(self, fmt=None): """ Make a simple plot of the legend. Simply calls Decor.plot() on all of its members. TODO: Build a more attractive plot. """ for d in self.__list: d.plot(fmt=fmt) return None
Jupyter Notebook magic repr function.
def _repr_html_(self): """ Jupyter Notebook magic repr function. """ rows = '' s = '<tr><td><strong>{k}</strong></td><td>{v}</td></tr>' for k, v in self.__dict__.items(): rows += s.format(k=k, v=v) html = '<table>{}</table>'.format(rows) return html
Generate a Component from a text string using a Lexicon.
def from_text(cls, text, lexicon, required=None, first_only=True): """ Generate a Component from a text string, using a Lexicon. Args: text (str): The text string to parse. lexicon (Lexicon): The dictionary to use for the categories and lexemes. required (str): An attribute that we must have. If a required attribute is missing from the component, then None is returned. first_only (bool): Whether to only take the first match of a lexeme against the text string. Returns: Component: A Component object, or None if there was no must-have field. """ component = lexicon.get_component(text, first_only=first_only) if required and (required not in component): return None else: return cls(component)
Given a format string return a summary description of a component.
def summary(self, fmt=None, initial=True, default=''): """ Given a format string, return a summary description of a component. Args: component (dict): A component dictionary. fmt (str): Describes the format with a string. If no format is given, you will just get a list of attributes. If you give the empty string (''), you'll get `default` back. By default this gives you the empty string, effectively suppressing the summary. initial (bool): Whether to capitialize the first letter. Default is True. default (str): What to give if there's no component defined. Returns: str: A summary string. Example: r = Component({'colour': 'Red', 'grainsize': 'VF-F', 'lithology': 'Sandstone'}) r.summary() --> 'Red, vf-f, sandstone' """ if default and not self.__dict__: return default if fmt == '': return default keys = [k for k, v in self.__dict__.items() if v is not ''] f = fmt or '{' + '}, {'.join(keys) + '}' try: summary = CustomFormatter().format(f, **self.__dict__) except KeyError as e: raise ComponentError("Error building summary, "+str(e)) if summary and initial and not fmt: summary = summary[0].upper() + summary[1:] return summary
Graceful deprecation for old class name.
def Rock(*args, **kwargs): """ Graceful deprecation for old class name. """ with warnings.catch_warnings(): warnings.simplefilter("always") w = "The 'Rock' class was renamed 'Component'. " w += "Please update your code." warnings.warn(w, DeprecationWarning, stacklevel=2) return Component(*args, **kwargs)
Processes a single row from the file.
def _process_row(text, columns): """ Processes a single row from the file. """ if not text: return # Construct the column dictionary that maps each field to # its start, its length, and its read and write functions. coldict = {k: {'start': s, 'len': l, 'read': r, 'write': w} for k, (s, l, r, w) in columns.items()} # Now collect the item item = {} for field in coldict: value = _get_field(text, coldict, field) if value is not None: item[field] = value return item
Read all the rows and return a dict of the results.
def parse_canstrat(text): """ Read all the rows and return a dict of the results. """ result = {} for row in text.split('\n'): if not row: continue if len(row) < 8: # Not a real record. continue # Read the metadata for this row/ row_header = _process_row(row, columns_) or {'card': None} card = row_header['card'] # Now we know the card type for this row, we can process it. if card is not None: item = _process_row(row, columns[card]) this_list = result.get(card, []) this_list.append(item) result[card] = this_list # Flatten if possible. for c, d in result.items(): if len(d) == 1: result[c] = d[0] return result
Still unsure about best way to do this hence cruft.
def get_template(name): """ Still unsure about best way to do this, hence cruft. """ text = re.sub(r'\r\n', r'\n', name) text = re.sub(r'\{([FISDE°].*?)\}', r'{{\1}}', text) return text
Private method. Checks if striplog is monotonically increasing in depth.
def __strict(self): """ Private method. Checks if striplog is monotonically increasing in depth. Returns: Bool. """ def conc(a, b): return a + b # Check boundaries, b b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self])) return all(np.diff(b) >= 0)
Property. Summarize a Striplog with some statistics.
def unique(self): """ Property. Summarize a Striplog with some statistics. Returns: List. A list of (Component, total thickness thickness) tuples. """ all_rx = set([iv.primary for iv in self]) table = {r: 0 for r in all_rx} for iv in self: table[iv.primary] += iv.thickness return sorted(table.items(), key=operator.itemgetter(1), reverse=True)
Property.
def top(self): """ Property. """ # For backwards compatibility. with warnings.catch_warnings(): warnings.simplefilter("always") w = "Striplog.top is deprecated; please use Striplog.unique" warnings.warn(w, DeprecationWarning, stacklevel=2) return self.unique
Private method. Take a sequence of tops in an arbitrary dimension and provide a list of intervals from which a striplog can be made.
def __intervals_from_tops(self, tops, values, basis, components, field=None, ignore_nan=True): """ Private method. Take a sequence of tops in an arbitrary dimension, and provide a list of intervals from which a striplog can be made. This is only intended to be used by ``from_image()``. Args: tops (iterable). A list of floats. values (iterable). A list of values to look up. basis (iterable). A list of components. components (iterable). A list of Components. Returns: List. A list of Intervals. """ # Scale tops to actual depths. length = float(basis.size) start, stop = basis[0], basis[-1] tops = [start + (p/(length-1)) * (stop-start) for p in tops] bases = tops[1:] + [stop] list_of_Intervals = [] for i, t in enumerate(tops): v, c, d = values[i], [], {} if ignore_nan and np.isnan(v): continue if (field is not None): d = {field: v} if components is not None: try: c = [deepcopy(components[int(v)])] except IndexError: c = [] if c and (c[0] is None): c = [] interval = Interval(t, bases[i], data=d, components=c) list_of_Intervals.append(interval) return list_of_Intervals
Private function. Make sure we have what we need to make a striplog.
def _clean_longitudinal_data(cls, data, null=None): """ Private function. Make sure we have what we need to make a striplog. """ # Rename 'depth' or 'MD' if ('top' not in data.keys()): data['top'] = data.pop('depth', data.pop('MD', None)) # Sort everything idx = list(data.keys()).index('top') values = sorted(zip(*data.values()), key=lambda x: x[idx]) data = {k: list(v) for k, v in zip(data.keys(), zip(*values))} if data['top'] is None: raise StriplogError('Could not get tops.') # Get rid of null-like values if specified. if null is not None: for k, v in data.items(): data[k] = [i if i != null else None for i in v] return data
Makes a striplog from a Petrel text file.
def from_petrel(cls, filename, stop=None, points=False, null=None, function=None, include=None, exclude=None, remap=None, ignore=None): """ Makes a striplog from a Petrel text file. Returns: striplog. """ result = utils.read_petrel(filename, function=function, remap=remap, ) data = cls._clean_longitudinal_data(result, null=null ) list_of_Intervals = cls._build_list_of_Intervals(data, stop=stop, points=points, include=include, exclude=exclude, ignore=ignore ) if list_of_Intervals: return cls(list_of_Intervals) return None
Private function. Takes a data dictionary and reconstructs a list of Intervals from it.
def _build_list_of_Intervals(cls, data_dict, stop=None, points=False, include=None, exclude=None, ignore=None, lexicon=None): """ Private function. Takes a data dictionary and reconstructs a list of Intervals from it. Args: data_dict (dict) stop (float): Where to end the last interval. points (bool) include (dict) exclude (dict) ignore (list) lexicon (Lexicon) Returns: list. """ include = include or {} exclude = exclude or {} ignore = ignore or [] # Reassemble as list of dicts all_data = [] for data in zip(*data_dict.values()): all_data.append({k: v for k, v in zip(data_dict.keys(), data)}) # Sort all_data = sorted(all_data, key=lambda x: x['top']) # Filter down: wanted_data = [] for dictionary in all_data: keep = True delete = [] for k, v in dictionary.items(): incl = include.get(k, utils.null_default(True)) excl = exclude.get(k, utils.null_default(False)) if k in ignore: delete.append(k) if not incl(v): keep = False if excl(v): keep = False if delete: for key in delete: _ = dictionary.pop(key, None) if keep: wanted_data.append(dictionary) # Fill in if not points: for i, iv in enumerate(wanted_data): if iv.get('base', None) is None: try: # To set from next interval iv['base'] = wanted_data[i+1]['top'] except (IndexError, KeyError): # It's the last interval if stop is not None: thick = stop - iv['top'] else: thick = 1 iv['base'] = iv['top'] + thick # Build the list of intervals to pass to __init__() list_of_Intervals = [] for iv in wanted_data: top = iv.pop('top') base = iv.pop('base', None) descr = iv.pop('description', '') if iv: c, d = {}, {} for k, v in iv.items(): if (k[:5].lower() == 'comp ') or (k[:9].lower() == 'component'): k = re.sub(r'comp(?:onent)? ', '', k, flags=re.I) c[k] = v # It's a component else: if v is not None: d[k] = v # It's data comp = [Component(c)] if c else None this = Interval(**{'top': top, 'base': base, 'description': descr, 'data': d, 'components': comp}) else: this = Interval(**{'top': top, 'base': base, 'description': descr, 'lexicon': lexicon}) list_of_Intervals.append(this) return list_of_Intervals
Load from a CSV file or text.
def from_csv(cls, filename=None, text=None, dlm=',', lexicon=None, points=False, include=None, exclude=None, remap=None, function=None, null=None, ignore=None, source=None, stop=None, fieldnames=None): """ Load from a CSV file or text. """ if (filename is None) and (text is None): raise StriplogError("You must provide a filename or CSV text.") if (filename is not None): if source is None: source = filename with open(filename, 'r') as f: text = f.read() source = source or 'CSV' # Deal with multiple spaces in space delimited file. if dlm == ' ': text = re.sub(r'[ \t]+', ' ', text) if fieldnames is not None: text = dlm.join(fieldnames) + '\n' + text try: f = StringIO(text) # Python 3 except TypeError: f = StringIO(unicode(text)) # Python 2 reader = csv.DictReader(f, delimiter=dlm) # Reorganize the data to make fixing it easier. reorg = {k.strip().lower(): [] for k in reader.fieldnames if k is not None} t = f.tell() for key in reorg: f.seek(t) for r in reader: s = {k.strip().lower(): v.strip() for k, v in r.items()} try: reorg[key].append(float(s[key])) except ValueError: reorg[key].append(s[key]) f.close() remap = remap or {} for k, v in remap.items(): reorg[v] = reorg.pop(k) data = cls._clean_longitudinal_data(reorg, null=null) list_of_Intervals = cls._build_list_of_Intervals(data, points=points, lexicon=lexicon, include=include, exclude=exclude, ignore=ignore, stop=stop) return cls(list_of_Intervals, source=source)
Convert a CSV string into a striplog. Expects 2 or 3 fields: top description OR top base description
def from_descriptions(cls, text, lexicon=None, source='CSV', dlm=',', points=False, abbreviations=False, complete=False, order='depth', columns=None, ): """ Convert a CSV string into a striplog. Expects 2 or 3 fields: top, description OR top, base, description Args: text (str): The input text, given by ``well.other``. lexicon (Lexicon): A lexicon, required to extract components. source (str): A source. Default: 'CSV'. dlm (str): The delimiter, given by ``well.dlm``. Default: ',' points (bool): Whether to treat as points or as intervals. abbreviations (bool): Whether to expand abbreviations in the description. Default: False. complete (bool): Whether to make 'blank' intervals, or just leave gaps. Default: False. order (str): The order, 'depth' or 'elevation'. Default: 'depth'. columns (tuple or list): The names of the columns. Returns: Striplog: A ``striplog`` object. Example: # TOP BOT LITH 312.34, 459.61, Sandstone 459.71, 589.61, Limestone 589.71, 827.50, Green shale 827.60, 1010.84, Fine sandstone """ text = re.sub(r'(\n+|\r\n|\r)', '\n', text.strip()) as_strings = [] try: f = StringIO(text) # Python 3 except TypeError: f = StringIO(unicode(text)) # Python 2 reader = csv.reader(f, delimiter=dlm, skipinitialspace=True) for row in reader: as_strings.append(row) f.close() if not columns: if order[0].lower() == 'e': columns = ('base', 'top', 'description') else: columns = ('top', 'base', 'description') result = {k: [] for k in columns} # Set the indices for the fields. tix = columns.index('top') bix = columns.index('base') dix = columns.index('description') for i, row in enumerate(as_strings): # THIS ONLY WORKS FOR MISSING TOPS! if len(row) == 2: row = [row[0], None, row[1]] # TOP this_top = float(row[tix]) # THIS ONLY WORKS FOR MISSING TOPS! # BASE # Base is null: use next top if this isn't the end. if row[1] is None: if i < len(as_strings)-1: this_base = float(as_strings[i+1][0]) # Next top. else: this_base = this_top + 1 # Default to 1 m thick at end. else: this_base = float(row[bix]) # DESCRIPTION this_descr = row[dix].strip() # Deal with making intervals or points... if not points: # Insert intervals where needed. if complete and (i > 0) and (this_top != result['base'][-1]): result['top'].append(result['base'][-1]) result['base'].append(this_top) result['description'].append('') else: this_base = None # Gets set to Top in striplog creation # ASSIGN result['top'].append(this_top) result['base'].append(this_base) result['description'].append(this_descr) # Build the list. list_of_Intervals = [] for i, t in enumerate(result['top']): b = result['base'][i] d = result['description'][i] interval = Interval(t, b, description=d, lexicon=lexicon, abbreviations=abbreviations) list_of_Intervals.append(interval) return cls(list_of_Intervals, source=source)
Read an image and generate Striplog.
def from_image(cls, filename, start, stop, legend, source="Image", col_offset=0.1, row_offset=2, tolerance=0): """ Read an image and generate Striplog. Args: filename (str): An image file, preferably high-res PNG. start (float or int): The depth at the top of the image. stop (float or int): The depth at the bottom of the image. legend (Legend): A legend to look up the components in. source (str): A source for the data. Default: 'Image'. col_offset (Number): The proportion of the way across the image from which to extract the pixel column. Default: 0.1 (ie 10%). row_offset (int): The number of pixels to skip at the top of each change in colour. Default: 2. tolerance (float): The Euclidean distance between hex colours, which has a maximum (black to white) of 441.67 in base 10. Default: 0. Returns: Striplog: The ``striplog`` object. """ rgb = utils.loglike_from_image(filename, col_offset) loglike = np.array([utils.rgb_to_hex(t) for t in rgb]) # Get the pixels and colour values at 'tops' (i.e. changes). tops, hexes = utils.tops_from_loglike(loglike, offset=row_offset) # If there are consecutive tops, we assume it's because there is a # single-pixel row that we don't want. So take the second one only. # We used to do this reduction in ``utils.tops_from_loglike()`` but # it was prventing us from making intervals only one sample thick. nonconsecutive = np.append(np.diff(tops), 2) tops = tops[nonconsecutive > 1] hexes = hexes[nonconsecutive > 1] # Get the set of unique colours. hexes_reduced = list(set(hexes)) # Get the components corresponding to the colours. components = [legend.get_component(h, tolerance=tolerance) for h in hexes_reduced] # Turn them into integers. values = [hexes_reduced.index(i) for i in hexes] basis = np.linspace(start, stop, loglike.size) list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components) return cls(list_of_Intervals, source="Image")
For backwards compatibility.
def from_img(cls, *args, **kwargs): """ For backwards compatibility. """ with warnings.catch_warnings(): warnings.simplefilter("always") w = "from_img() is deprecated; please use from_image()" warnings.warn(w) return cls.from_image(*args, **kwargs)
DEPRECATING.
def _from_array(cls, a, lexicon=None, source="", points=False, abbreviations=False): """ DEPRECATING. Turn an array-like into a Striplog. It should have the following format (where ``base`` is optional): [(top, base, description), (top, base, description), ... ] Args: a (array-like): A list of lists or of tuples, or an array. lexicon (Lexicon): A language dictionary to extract structured objects from the descriptions. source (str): The source of the data. Default: ''. points (bool): Whether to treat as point data. Default: False. Returns: Striplog: The ``striplog`` object. """ with warnings.catch_warnings(): warnings.simplefilter("always") w = "from_array() is deprecated." warnings.warn(w, DeprecationWarning, stacklevel=2) csv_text = '' for interval in a: interval = [str(i) for i in interval] if (len(interval) < 2) or (len(interval) > 3): raise StriplogError('Elements must have 2 or 3 items') descr = interval[-1].strip('" ') interval[-1] = '"' + descr + '"' csv_text += ', '.join(interval) + '\n' return cls.from_descriptions(csv_text, lexicon, source=source, points=points, abbreviations=abbreviations)
Turn a 1D array into a striplog given a cutoff.
def from_log(cls, log, cutoff=None, components=None, legend=None, legend_field=None, field=None, right=False, basis=None, source='Log'): """ Turn a 1D array into a striplog, given a cutoff. Args: log (array-like): A 1D array or a list of integers. cutoff (number or array-like): The log value(s) at which to bin the log. Optional. components (array-like): A list of components. Use this or ``legend``. legend (``Legend``): A legend object. Use this or ``components``. legend_field ('str'): If you're not trying to match against components, then you can match the log values to this field in the Decors. field (str): The field in the Interval's ``data`` to store the log values as. right (bool): Which side of the cutoff to send things that are equal to, i.e. right on, the cutoff. basis (array-like): A depth basis for the log, so striplog knows where to put the boundaries. source (str): The source of the data. Default 'Log'. Returns: Striplog: The ``striplog`` object. """ if (components is None) and (legend is None) and (field is None): m = 'You must provide a list of components, and legend, or a field.' raise StriplogError(m) if (legend is not None) and (legend_field is None): try: # To treat it like a legend. components = [deepcopy(decor.component) for decor in legend] except AttributeError: # It's just a list of components. pass if legend_field is not None: field_values = [getattr(d, legend_field, 0) for d in legend] components = [Component() for i in range(int(max(field_values)+1))] for i, decor in enumerate(legend): components[i] = deepcopy(decor.component) if cutoff is not None: # First make sure we have enough components. try: n = len(cutoff) except TypeError: n = 1 if len(components) < n+1: m = 'For n cutoffs, you need to provide at least' m += 'n+1 components.' raise StriplogError(m) # Digitize. try: # To use cutoff as a list. a = np.digitize(log, cutoff, right) except ValueError: # It's just a number. a = np.digitize(log, [cutoff], right) else: a = np.copy(log) tops, values = utils.tops_from_loglike(a) if basis is None: m = 'You must provide a depth or elevation basis.' raise StriplogError(m) list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components, field=field ) return cls(list_of_Intervals, source=source)
Turn LAS3 lithology section into a Striplog.
def from_las3(cls, string, lexicon=None, source="LAS", dlm=',', abbreviations=False): """ Turn LAS3 'lithology' section into a Striplog. Args: string (str): A section from an LAS3 file. lexicon (Lexicon): The language for conversion to components. source (str): A source for the data. dlm (str): The delimiter. abbreviations (bool): Whether to expand abbreviations. Returns: Striplog: The ``striplog`` object. Note: Handles multiple 'Data' sections. It would be smarter for it to handle one at a time, and to deal with parsing the multiple sections in the Well object. Does not read an actual LAS file. Use the Well object for that. """ f = re.DOTALL | re.IGNORECASE regex = r'\~\w+?_Data.+?\n(.+?)(?:\n\n+|\n*\~|\n*$)' pattern = re.compile(regex, flags=f) text = pattern.search(string).group(1) s = re.search(r'\.(.+?)\: ?.+?source', string) if s: source = s.group(1).strip() return cls.from_descriptions(text, lexicon, source=source, dlm=dlm, abbreviations=abbreviations)
Eat a Canstrat DAT file and make a striplog.
def from_canstrat(cls, filename, source='canstrat'): """ Eat a Canstrat DAT file and make a striplog. """ with open(filename) as f: dat = f.read() data = parse_canstrat(dat) list_of_Intervals = [] for d in data[7]: # 7 is the 'card type' for lithology info. if d.pop('skip'): continue top = d.pop('top') base = d.pop('base') comps = [Component({'lithology': d['rtc'], 'colour': d['colour_name'] })] iv = Interval(top=top, base=base, components=comps, data=d) list_of_Intervals.append(iv) return cls(list_of_Intervals, source=source)
Returns a shallow copy.
def copy(self): """Returns a shallow copy.""" return Striplog([i.copy() for i in self], order=self.order, source=self.source)
Returns a CSV string built from the summaries of the Intervals.
def to_csv(self, filename=None, as_text=True, use_descriptions=False, dlm=",", header=True): """ Returns a CSV string built from the summaries of the Intervals. Args: use_descriptions (bool): Whether to use descriptions instead of summaries, if available. dlm (str): The delimiter. header (bool): Whether to form a header row. Returns: str: A string of comma-separated values. """ if (filename is None): if (not as_text): raise StriplogError("You must provide a filename or set as_text to True.") else: as_text = False if as_text: output = StringIO() else: output = open(filename, 'w') fieldnames = ['Top', 'Base', 'Component'] writer = csv.DictWriter(output, delimiter=dlm, fieldnames=fieldnames, quoting=csv.QUOTE_MINIMAL) if header: writer.writeheader() for i in self.__list: if use_descriptions and i.description: text = i.description elif i.primary: text = i.primary.summary() else: text = '' data = {j: k for j, k in zip(fieldnames, [i.top.z, i.base.z, text])} writer.writerow(data) if as_text: return output.getvalue() #return output else: output.close return None
Returns an LAS 3. 0 section string.
def to_las3(self, use_descriptions=False, dlm=",", source="Striplog"): """ Returns an LAS 3.0 section string. Args: use_descriptions (bool): Whether to use descriptions instead of summaries, if available. dlm (str): The delimiter. source (str): The sourse of the data. Returns: str: A string forming Lithology section of an LAS3 file. """ data = self.to_csv(use_descriptions=use_descriptions, dlm=dlm, header=False) return templates.section.format(name='Lithology', short="LITH", source=source, data=data)
Return a fully sampled log from a striplog. Useful for crossplotting with log data for example.
def to_log(self, step=1.0, start=None, stop=None, basis=None, field=None, field_function=None, dtype=None, table=None, legend=None, legend_field=None, match_only=None, undefined=0, return_meta=False ): """ Return a fully sampled log from a striplog. Useful for crossplotting with log data, for example. Args: step (float): The step size. Default: 1.0. start (float): The start depth of the new log. You will want to match the logs, so use the start depth from the LAS file. Default: The basis if provided, else the start of the striplog. stop (float): The stop depth of the new log. Use the stop depth of the LAS file. Default: The basis if provided, else the stop depth of the striplog. field (str): If you want the data to come from one of the attributes of the components in the striplog, provide it. field_function (function): Provide a function to apply to the field you are asking for. It's up to you to make sure the function does what you want. legend (Legend): If you want the codes to come from a legend, provide one. Otherwise the codes come from the log, using integers in the order of prevalence. If you use a legend, they are assigned in the order of the legend. legend_field (str): If you want to get a log representing one of the fields in the legend, such as 'width' or 'grainsize'. match_only (list): If you only want to match some attributes of the Components (e.g. lithology), provide a list of those you want to match. undefined (number): What to fill in where no value can be determined, e.g. ``-999.25`` or ``np.null``. Default 0. return_meta (bool): If ``True``, also return the depth basis (np.linspace), and the component table. Returns: ndarray: If ``return_meta`` was ``True``, you get: * The log data as an array of ints. * The depth basis as an array of floats. * A list of the components in the order matching the ints. If ``return_meta`` was ``False`` (the default), you only get the log data. """ # Make the preparations. if basis is not None: start, stop = basis[0], basis[-1] step = basis[1] - start else: start = start or self.start.z stop = stop or self.stop.z pts = np.ceil((stop - start)/step) + 1 basis = np.linspace(start, stop, int(pts)) if (field is not None) or (legend_field is not None): result = np.zeros_like(basis, dtype=dtype) else: result = np.zeros_like(basis, dtype=np.int) if np.isnan(undefined): try: result[:] = np.nan except: pass # array type is int # If needed, make a look-up table for the log values. if table is None: table = [Component({})] if legend: table += [j.component for j in legend] elif field: s = set([iv.data.get(field) for iv in self]) table = [None] + list(filter(None, s)) else: table += [j[0] for j in self.unique] # Adjust the table if necessary. Go over all the components in the # table list, and remove elements that are not in the match list. # Careful! This results in a new table, with components that may not # be in the original list of components. if match_only is not None: if not isinstance(match_only, (list, tuple, set,)): raise StriplogError("match_only should be a list, not a string") table_new = [] for c in table: if c == '': continue # No idea why sometimes there's a '' c_new = Component({k: v for k, v in c.__dict__.items() if k in match_only}) # Only add unique, and preserve order. if c_new not in table_new: table_new.append(c_new) table = table_new else: match_only = [] start_ix = self.read_at(start, index=True) stop_ix = self.read_at(stop, index=True) if stop_ix is not None: stop_ix += 1 # Assign the values. for i in self[start_ix:stop_ix]: c = i.primary if match_only: c = Component({k: getattr(c, k, None) for k in match_only}) if legend and legend_field: # Use the legend field. try: key = legend.getattr(c, legend_field, undefined) key = key or undefined except ValueError: key = undefined elif field: # Get data directly from that field in iv.data. f = field_function or utils.null try: v = f(i.data.get(field, undefined)) or undefined key = table.index(v) except ValueError: key = undefined else: # Use the lookup table. try: key = table.index(c) or undefined except ValueError: key = undefined top_index = int(np.ceil((max(start, i.top.z)-start)/step)) base_index = int(np.ceil((min(stop, i.base.z)-start)/step)) try: result[top_index:base_index+1] = key except: # Have a list or array or something. result[top_index:base_index+1] = key[0] if return_meta: return result, basis, table else: return result
Plotting but only for points ( as opposed to intervals ).
def plot_points(self, ax, legend=None, field=None, field_function=None, undefined=0, **kwargs): """ Plotting, but only for points (as opposed to intervals). """ ys = [iv.top.z for iv in self] if field is not None: f = field_function or utils.null xs = [f(iv.data.get(field, undefined)) for iv in self] else: xs = [1 for iv in self] ax.set_xlim((min(xs), max(xs))) for x, y in zip(xs, ys): ax.axhline(y, color='lightgray', zorder=0) ax.scatter(xs, ys, clip_on=False, **kwargs) return ax
Plotting but only for tops ( as opposed to intervals ).
def plot_tops(self, ax, legend=None, field=None, **kwargs): """ Plotting, but only for tops (as opposed to intervals). """ if field is None: raise StriplogError('You must provide a field to plot.') ys = [iv.top.z for iv in self] try: try: ts = [getattr(iv.primary, field) for iv in self] except: ts = [iv.data.get(field) for iv in self] except: raise StriplogError('Could not retrieve field.') for y, t in zip(ys, ts): ax.axhline(y, color='lightblue', lw=3, zorder=0) ax.text(0.1, y-max(ys)/200, t, ha='left') return ax
Plotting but only for tops ( as opposed to intervals ).
def plot_field(self, ax, legend=None, field=None, **kwargs): """ Plotting, but only for tops (as opposed to intervals). """ if field is None: raise StriplogError('You must provide a field to plot.') try: try: xs = [getattr(iv.primary, field) for iv in self] except: xs = [iv.data.get(field) for iv in self] except: raise StriplogError('Could not retrieve field.') for iv, x in zip(self.__list, xs): _, ymin = utils.axis_transform(ax, 0, iv.base.z, ylim=(self.start.z, self.stop.z), inverse=True) _, ymax = utils.axis_transform(ax, 0, iv.top.z, ylim=(self.start.z, self.stop.z), inverse=True) ax.axvline(x, ymin=ymin, ymax=ymax) return ax
Plotting but only the Rectangles. You have to set up the figure. Returns a matplotlib axis object.
def plot_axis(self, ax, legend, ladder=False, default_width=1, match_only=None, colour=None, colour_function=None, cmap=None, default=None, width_field=None, **kwargs ): """ Plotting, but only the Rectangles. You have to set up the figure. Returns a matplotlib axis object. Args: ax (axis): The matplotlib axis to plot into. legend (Legend): The Legend to use for colours, etc. ladder (bool): Whether to use widths or not. Default False. default_width (int): A width for the plot if not using widths. Default 1. match_only (list): A list of strings matching the attributes you want to compare when plotting. colour (str): Which data field to use for colours. cmap (cmap): Matplotlib colourmap. Default ``viridis``. default (float): The default (null) value. width_field (str): The field to use for the width of the patches. **kwargs are passed through to matplotlib's ``patches.Rectangle``. Returns: axis: The matplotlib.pyplot axis. """ default_c = None patches = [] for iv in self.__list: origin = (0, iv.top.z) d = legend.get_decor(iv.primary, match_only=match_only) thick = iv.base.z - iv.top.z if ladder: if width_field is not None: w = iv.data.get(width_field, 1) w = default_width * w/self.max_field(width_field) default_c = 'gray' elif legend is not None: w = d.width or default_width try: w = default_width * w/legend.max_width except: w = default_width else: w = default_width # Allow override of lw this_patch_kwargs = kwargs.copy() lw = this_patch_kwargs.pop('lw', 0) ec = this_patch_kwargs.pop('ec', 'k') fc = this_patch_kwargs.pop('fc', None) or default_c or d.colour if colour is None: rect = mpl.patches.Rectangle(origin, w, thick, fc=fc, lw=lw, hatch=d.hatch, ec=ec, # edgecolour for hatching **this_patch_kwargs) ax.add_patch(rect) else: rect = mpl.patches.Rectangle(origin, w, thick, lw=lw, ec=ec, # edgecolour for hatching **this_patch_kwargs) patches.append(rect) if colour is not None: cmap = cmap or 'viridis' p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw) p.set_array(self.get_data(colour, colour_function, default=default)) ax.add_collection(p) cb = plt.colorbar(p) # orientation='horizontal' only really works with ticks=[0, 0.1, 0.2] say cb.outline.set_linewidth(0) return ax
Get data from the striplog.
def get_data(self, field, function=None, default=None): """ Get data from the striplog. """ f = function or utils.null data = [] for iv in self: d = iv.data.get(field) if d is None: if default is not None: d = default else: d = np.nan data.append(f(d)) return np.array(data)
Hands - free plotting.
def plot(self, legend=None, width=1.5, ladder=True, aspect=10, ticks=(1, 10), match_only=None, ax=None, return_fig=False, colour=None, cmap='viridis', default=None, style='intervals', field=None, **kwargs): """ Hands-free plotting. Args: legend (Legend): The Legend to use for colours, etc. width (int): The width of the plot, in inches. Default 1. ladder (bool): Whether to use widths or not. Default False. aspect (int): The aspect ratio of the plot. Default 10. ticks (int or tuple): The (minor,major) tick interval for depth. Only the major interval is labeled. Default (1,10). match_only (list): A list of strings matching the attributes you want to compare when plotting. ax (ax): A maplotlib axis to plot onto. If you pass this, it will be returned. Optional. return_fig (bool): Whether or not to return the maplotlib ``fig`` object. Default False. colour (str): Which data field to use for colours. cmap (cmap): Matplotlib colourmap. Default ``viridis``. **kwargs are passed through to matplotlib's ``patches.Rectangle``. Returns: None. Unless you specify ``return_fig=True`` or pass in an ``ax``. """ if legend is None: legend = Legend.random(self.components) if style.lower() == 'tops': # Make sure width is at least 3 for 'tops' style width = max([3, width]) if ax is None: return_ax = False fig = plt.figure(figsize=(width, aspect*width)) ax = fig.add_axes([0.35, 0.05, 0.6, 0.95]) else: return_ax = True if (self.order == 'none') or (style.lower() == 'points'): # Then this is a set of points. ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs) elif style.lower() == 'field': if field is None: raise StriplogError('You must provide a field to plot.') ax = self.plot_field(ax=ax, legend=legend, field=field) elif style.lower() == 'tops': ax = self.plot_tops(ax=ax, legend=legend, field=field) ax.set_xticks([]) else: ax = self.plot_axis(ax=ax, legend=legend, ladder=ladder, default_width=width, match_only=kwargs.get('match_only', match_only), colour=colour, cmap=cmap, default=default, width_field=field, **kwargs ) ax.set_xlim([0, width]) ax.set_xticks([]) # Rely on interval order. lower, upper = self[-1].base.z, self[0].top.z rng = abs(upper - lower) ax.set_ylim([lower, upper]) # Make sure ticks is a tuple. try: ticks = tuple(ticks) except TypeError: ticks = (1, ticks) # Avoid MAXTICKS error. while rng/ticks[0] > 250: mi, ma = 10*ticks[0], ticks[1] if ma <= mi: ma = 10 * mi ticks = (mi, ma) # Carry on plotting... minorLocator = mpl.ticker.MultipleLocator(ticks[0]) ax.yaxis.set_minor_locator(minorLocator) majorLocator = mpl.ticker.MultipleLocator(ticks[1]) majorFormatter = mpl.ticker.FormatStrFormatter('%d') ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_major_formatter(majorFormatter) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.yaxis.set_ticks_position('left') ax.get_yaxis().set_tick_params(which='both', direction='out') # Optional title. title = getattr(self, 'title', None) if title is not None: ax.set_title(title) ax.patch.set_alpha(0) if return_ax: return ax elif return_fig: return fig else: return
Get the index of the interval at a particular depth ( though this might be an elevation or age or anything ).
def read_at(self, d, index=False): """ Get the index of the interval at a particular 'depth' (though this might be an elevation or age or anything). Args: d (Number): The 'depth' to query. index (bool): Whether to return the index instead of the interval. Returns: Interval: The interval, or if ``index==True`` the index of the interval, at the specified 'depth', or ``None`` if the depth is outside the striplog's range. """ for i, iv in enumerate(self): if iv.spans(d): return i if index else iv return None
For backwards compatibility.
def depth(self, d): """ For backwards compatibility. """ with warnings.catch_warnings(): warnings.simplefilter("always") w = "depth() is deprecated; please use read_at()" warnings.warn(w) return self.read_at(d)
Extract a log into the components of a striplog.
def extract(self, log, basis, name, function=None): """ 'Extract' a log into the components of a striplog. Args: log (array_like). A log or other 1D data. basis (array_like). The depths or elevations of the log samples. name (str). The name of the attribute to store in the components. function (function). A function that takes an array as the only input, and returns whatever you want to store in the 'name' attribute of the primary component. Returns: None. The function works on the striplog in place. """ # Build a dict of {index: [log values]} to keep track. intervals = {} previous_ix = -1 for i, z in enumerate(basis): ix = self.read_at(z, index=True) if ix is None: continue if ix == previous_ix: intervals[ix].append(log[i]) else: intervals[ix] = [log[i]] previous_ix = ix # Set the requested attribute in the primary comp of each interval. for ix, data in intervals.items(): f = function or utils.null d = f(np.array(data)) self[ix].data[name] = d return None