signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def batch_query_state_changes(<EOL>self,<EOL>batch_size: int,<EOL>filters: List[Tuple[str, Any]] = None,<EOL>logical_and: bool = True,<EOL>) -> Iterator[List[StateChangeRecord]]:
|
limit = batch_size<EOL>offset = <NUM_LIT:0><EOL>result_length = <NUM_LIT:1><EOL>while result_length != <NUM_LIT:0>:<EOL><INDENT>result = self._get_state_changes(<EOL>limit=limit,<EOL>offset=offset,<EOL>filters=filters,<EOL>logical_and=logical_and,<EOL>)<EOL>result_length = len(result)<EOL>offset += result_length<EOL>yield result<EOL><DEDENT>
|
Batch query state change records with a given batch size and an optional filter
This is a generator function returning each batch to the caller to work with.
|
f9396:c3:m15
|
def update_state_changes(self, state_changes_data: List[Tuple[str, int]]) -> None:
|
cursor = self.conn.cursor()<EOL>cursor.executemany(<EOL>'<STR_LIT>',<EOL>state_changes_data,<EOL>)<EOL>self.maybe_commit()<EOL>
|
Given a list of identifier/data state tuples update them in the DB
|
f9396:c3:m16
|
def _get_event_records(<EOL>self,<EOL>limit: int = None,<EOL>offset: int = None,<EOL>filters: List[Tuple[str, Any]] = None,<EOL>logical_and: bool = True,<EOL>) -> List[EventRecord]:
|
cursor = self._form_and_execute_json_query(<EOL>query='<STR_LIT>',<EOL>limit=limit,<EOL>offset=offset,<EOL>filters=filters,<EOL>logical_and=logical_and,<EOL>)<EOL>result = [<EOL>EventRecord(<EOL>event_identifier=row[<NUM_LIT:0>],<EOL>state_change_identifier=row[<NUM_LIT:1>],<EOL>data=row[<NUM_LIT:2>],<EOL>) for row in cursor<EOL>]<EOL>return result<EOL>
|
Return a batch of event records
The batch size can be tweaked with the `limit` and `offset` arguments.
Additionally the returned events can be optionally filtered with
the `filters` parameter to search for specific data in the event data.
|
f9396:c3:m19
|
def batch_query_event_records(<EOL>self,<EOL>batch_size: int,<EOL>filters: List[Tuple[str, Any]] = None,<EOL>logical_and: bool = True,<EOL>) -> Iterator[List[EventRecord]]:
|
limit = batch_size<EOL>offset = <NUM_LIT:0><EOL>result_length = <NUM_LIT:1><EOL>while result_length != <NUM_LIT:0>:<EOL><INDENT>result = self._get_event_records(<EOL>limit=limit,<EOL>offset=offset,<EOL>filters=filters,<EOL>logical_and=logical_and,<EOL>)<EOL>result_length = len(result)<EOL>offset += result_length<EOL>yield result<EOL><DEDENT>
|
Batch query event records with a given batch size and an optional filter
This is a generator function returning each batch to the caller to work with.
|
f9396:c3:m20
|
def update_events(self, events_data: List[Tuple[str, int]]) -> None:
|
cursor = self.conn.cursor()<EOL>cursor.executemany(<EOL>'<STR_LIT>',<EOL>events_data,<EOL>)<EOL>self.maybe_commit()<EOL>
|
Given a list of identifier/data event tuples update them in the DB
|
f9396:c3:m21
|
def update_snapshots(self, snapshots_data: List[Tuple[str, int]]):
|
cursor = self.conn.cursor()<EOL>cursor.executemany(<EOL>'<STR_LIT>',<EOL>snapshots_data,<EOL>)<EOL>self.maybe_commit()<EOL>
|
Given a list of snapshot data, update them in the DB
The snapshots_data should be a list of tuples of snapshots data
and identifiers in that order.
|
f9396:c3:m27
|
def write_events(self, state_change_identifier, events, log_time):
|
events_data = [<EOL>(None, state_change_identifier, log_time, self.serializer.serialize(event))<EOL>for event in events<EOL>]<EOL>return super().write_events(events_data)<EOL>
|
Save events.
Args:
state_change_identifier: Id of the state change that generate these events.
events: List of Event objects.
|
f9396:c4:m3
|
def get_snapshot_closest_to_state_change(<EOL>self,<EOL>state_change_identifier: int,<EOL>) -> Tuple[int, Any]:
|
row = super().get_snapshot_closest_to_state_change(state_change_identifier)<EOL>if row[<NUM_LIT:1>]:<EOL><INDENT>last_applied_state_change_id = row[<NUM_LIT:0>]<EOL>snapshot_state = self.serializer.deserialize(row[<NUM_LIT:1>])<EOL>result = (last_applied_state_change_id, snapshot_state)<EOL><DEDENT>else:<EOL><INDENT>result = (<NUM_LIT:0>, None)<EOL><DEDENT>return result<EOL>
|
Get snapshots earlier than state_change with provided ID.
|
f9396:c4:m5
|
def _add_onchain_locksroot_to_channel_new_state_changes(<EOL>storage: SQLiteStorage,<EOL>) -> None:
|
batch_size = <NUM_LIT:50><EOL>batch_query = storage.batch_query_state_changes(<EOL>batch_size=batch_size,<EOL>filters=[<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>],<EOL>)<EOL>for state_changes_batch in batch_query:<EOL><INDENT>updated_state_changes = list()<EOL>for state_change in state_changes_batch:<EOL><INDENT>state_change_data = json.loads(state_change.data)<EOL>channel_state = state_change_data['<STR_LIT>']<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in channel_state['<STR_LIT>'], msg<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in channel_state['<STR_LIT>'], msg<EOL>channel_state['<STR_LIT>']['<STR_LIT>'] = serialize_bytes(EMPTY_MERKLE_ROOT)<EOL>channel_state['<STR_LIT>']['<STR_LIT>'] = serialize_bytes(<EOL>EMPTY_MERKLE_ROOT,<EOL>)<EOL>updated_state_changes.append((<EOL>json.dumps(state_change_data),<EOL>state_change.state_change_identifier,<EOL>))<EOL><DEDENT>storage.update_state_changes(updated_state_changes)<EOL><DEDENT>
|
Adds `onchain_locksroot` to our_state/partner_state in
ContractReceiveChannelNew's channel_state object.
|
f9397:m2
|
def _add_onchain_locksroot_to_channel_settled_state_changes(<EOL>raiden: RaidenService,<EOL>storage: SQLiteStorage,<EOL>) -> None:
|
batch_size = <NUM_LIT:50><EOL>batch_query = storage.batch_query_state_changes(<EOL>batch_size=batch_size,<EOL>filters=[<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>],<EOL>)<EOL>for state_changes_batch in batch_query:<EOL><INDENT>updated_state_changes = list()<EOL>for state_change in state_changes_batch:<EOL><INDENT>state_change_data = json.loads(state_change.data)<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in state_change_data, msg<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in state_change_data, msg<EOL>token_network_identifier = state_change_data['<STR_LIT>']<EOL>channel_identifier = state_change_data['<STR_LIT>']<EOL>channel_new_state_change = _find_channel_new_state_change(<EOL>storage=storage,<EOL>token_network_address=token_network_identifier,<EOL>channel_identifier=channel_identifier,<EOL>)<EOL>if not channel_new_state_change.data:<EOL><INDENT>raise RaidenUnrecoverableError(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>',<EOL>)<EOL><DEDENT>channel_state_data = json.loads(channel_new_state_change.data)<EOL>new_channel_state = channel_state_data['<STR_LIT>']<EOL>canonical_identifier = CanonicalIdentifier(<EOL>chain_identifier=-<NUM_LIT:1>,<EOL>token_network_address=to_canonical_address(token_network_identifier),<EOL>channel_identifier=int(channel_identifier),<EOL>)<EOL>our_locksroot, partner_locksroot = get_onchain_locksroots(<EOL>chain=raiden.chain,<EOL>canonical_identifier=canonical_identifier,<EOL>participant1=to_canonical_address(new_channel_state['<STR_LIT>']['<STR_LIT:address>']),<EOL>participant2=to_canonical_address(new_channel_state['<STR_LIT>']['<STR_LIT:address>']),<EOL>block_identifier='<STR_LIT>',<EOL>)<EOL>state_change_data['<STR_LIT>'] = serialize_bytes(<EOL>our_locksroot,<EOL>)<EOL>state_change_data['<STR_LIT>'] = serialize_bytes(<EOL>partner_locksroot,<EOL>)<EOL>updated_state_changes.append((<EOL>json.dumps(state_change_data),<EOL>state_change.state_change_identifier,<EOL>))<EOL><DEDENT>storage.update_state_changes(updated_state_changes)<EOL><DEDENT>
|
Adds `our_onchain_locksroot` and `partner_onchain_locksroot` to
ContractReceiveChannelSettled.
|
f9397:m3
|
def _add_onchain_locksroot_to_snapshot(<EOL>raiden: RaidenService,<EOL>storage: SQLiteStorage,<EOL>snapshot_record: StateChangeRecord,<EOL>) -> str:
|
snapshot = json.loads(snapshot_record.data)<EOL>for payment_network in snapshot.get('<STR_LIT>', dict()).values():<EOL><INDENT>for token_network in payment_network.get('<STR_LIT>', list()):<EOL><INDENT>channelidentifiers_to_channels = token_network.get(<EOL>'<STR_LIT>',<EOL>dict(),<EOL>)<EOL>for channel in channelidentifiers_to_channels.values():<EOL><INDENT>our_locksroot, partner_locksroot = _get_onchain_locksroots(<EOL>raiden=raiden,<EOL>storage=storage,<EOL>token_network=token_network,<EOL>channel=channel,<EOL>)<EOL>channel['<STR_LIT>']['<STR_LIT>'] = serialize_bytes(our_locksroot)<EOL>channel['<STR_LIT>']['<STR_LIT>'] = serialize_bytes(partner_locksroot)<EOL><DEDENT><DEDENT><DEDENT>return json.dumps(snapshot, indent=<NUM_LIT:4>), snapshot_record.identifier<EOL>
|
Add `onchain_locksroot` to each NettingChannelEndState
|
f9397:m4
|
def _transform_snapshot(raw_snapshot: Dict[Any, Any]) -> str:
|
snapshot = json.loads(raw_snapshot)<EOL>secrethash_to_task = snapshot['<STR_LIT>']['<STR_LIT>']<EOL>for task in secrethash_to_task.values():<EOL><INDENT>if task['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>mediator_state = task.get('<STR_LIT>')<EOL>assert '<STR_LIT>' not in mediator_state<EOL>mediator_state['<STR_LIT>'] = []<EOL>waiting_transfer = mediator_state.get('<STR_LIT>')<EOL>if waiting_transfer is None:<EOL><INDENT>continue<EOL><DEDENT>transfer = waiting_transfer.get('<STR_LIT>')<EOL>token_network_identifier = transfer['<STR_LIT>']['<STR_LIT>']<EOL>token_network = get_token_network_by_identifier(<EOL>snapshot,<EOL>token_network_identifier,<EOL>)<EOL>channel_identifier = transfer['<STR_LIT>']['<STR_LIT>']<EOL>channel = token_network.get('<STR_LIT>').get(channel_identifier)<EOL>if not channel:<EOL><INDENT>raise ChannelNotFound(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>',<EOL>)<EOL><DEDENT>mediator_state['<STR_LIT>'] = [<EOL>RouteState.from_dict({<EOL>'<STR_LIT>': channel['<STR_LIT>']['<STR_LIT:address>'],<EOL>'<STR_LIT>': channel_identifier,<EOL>}).to_dict(),<EOL>]<EOL><DEDENT>return json.dumps(snapshot)<EOL>
|
This migration upgrades the object:
- `MediatorTransferState` such that a list of routes is added
to the state to be able to route a waiting transfer in case the
receiving node comes back online.
|
f9398:m1
|
def recover_chain_id(storage: SQLiteStorage) -> ChainID:
|
action_init_chain = json.loads(storage.get_state_changes(limit=<NUM_LIT:1>, offset=<NUM_LIT:0>)[<NUM_LIT:0>])<EOL>assert action_init_chain['<STR_LIT>'] == '<STR_LIT>'<EOL>return action_init_chain['<STR_LIT>']<EOL>
|
We can reasonably assume, that any database has only one value for `chain_id` at this point
in time.
|
f9399:m16
|
def _transform_snapshot(raw_snapshot):
|
snapshot = json.loads(raw_snapshot)<EOL>secrethash_to_task = snapshot['<STR_LIT>']['<STR_LIT>']<EOL>for secrethash, task in secrethash_to_task.items():<EOL><INDENT>if task['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>transfer_secrethash = task['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>task['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>task['<STR_LIT>']['<STR_LIT>'] = {<EOL>transfer_secrethash: task['<STR_LIT>']['<STR_LIT>'],<EOL>}<EOL>del task['<STR_LIT>']['<STR_LIT>']<EOL>secrethash_to_task[secrethash] = task<EOL><DEDENT>return json.dumps(snapshot, indent=<NUM_LIT:4>)<EOL>
|
Version 16 data model:
- The top-level is always a `ChainState` object, this object will always
have a `PaymentMappingState`, the attribute `secrethashes_to_task` is a
dictionary that may be empty.
- `secrethashes_to_task` may have `InitiatorTask`s in it, these objects always
have a `manager_state: InitiatorPaymentState`,which always have
`initiator: InitiatorTransferState`
This migration upgrades the objects:
- `InitiatorPaymentState`, that may be contained in `secrethashes_to_task`.
In version 16 these objects had a single `initiator` object,
where in version 17 this was changed to a `Dict[SecretHash, 'InitiatorTransferState']`
- `InitiatorTransferState` has a new attribute `transfer_state`
|
f9400:m0
|
def upgrade_v16_to_v17(storage: SQLiteStorage, old_version: int, current_version: int, **kwargs):
|
if old_version == SOURCE_VERSION:<EOL><INDENT>_transform_snapshots(storage)<EOL><DEDENT>return TARGET_VERSION<EOL>
|
InitiatorPaymentState was changed so that the "initiator"
attribute is renamed to "initiator_transfers" and converted to a list.
|
f9400:m2
|
def _transform_snapshot(raw_snapshot: str) -> str:
|
snapshot = json.loads(raw_snapshot)<EOL>for task in snapshot['<STR_LIT>']['<STR_LIT>'].values():<EOL><INDENT>if '<STR_LIT>' in task['<STR_LIT>']:<EOL><INDENT>for initiator in task['<STR_LIT>']['<STR_LIT>'].values():<EOL><INDENT>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in initiator['<STR_LIT>'], msg<EOL>initiator['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:0>'<EOL><DEDENT><DEDENT><DEDENT>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in snapshot, msg<EOL>ids_to_addrs = dict()<EOL>for payment_network in snapshot['<STR_LIT>'].values():<EOL><INDENT>for token_network in payment_network['<STR_LIT>']:<EOL><INDENT>ids_to_addrs[token_network['<STR_LIT:address>']] = payment_network['<STR_LIT:address>']<EOL><DEDENT><DEDENT>snapshot['<STR_LIT>'] = ids_to_addrs<EOL>for payment_network in snapshot['<STR_LIT>'].values():<EOL><INDENT>for token_network in payment_network['<STR_LIT>']:<EOL><INDENT>for channel_state in token_network['<STR_LIT>'].values():<EOL><INDENT>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in channel_state, msg<EOL>channel_state['<STR_LIT>'] = '<STR_LIT:0>'<EOL><DEDENT><DEDENT><DEDENT>return json.dumps(snapshot)<EOL>
|
The transformation step does the following:
1. Add `allocated_fee` to all initiator tasks
2. Adds `mediation_fee` to all channels
3. Populates tokennetworkaddresses_to_paymentnetworkaddresses in chain state
|
f9401:m0
|
def _update_statechanges(storage: SQLiteStorage):
|
batch_size = <NUM_LIT:50><EOL>batch_query = storage.batch_query_state_changes(<EOL>batch_size=batch_size,<EOL>filters=[<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>],<EOL>)<EOL>for state_changes_batch in batch_query:<EOL><INDENT>updated_state_changes = list()<EOL>for state_change in state_changes_batch:<EOL><INDENT>data = json.loads(state_change.data)<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in data['<STR_LIT>'], msg<EOL>data['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:0>'<EOL>updated_state_changes.append((<EOL>json.dumps(data),<EOL>state_change.state_change_identifier,<EOL>))<EOL><DEDENT>storage.update_state_changes(updated_state_changes)<EOL><DEDENT>batch_query = storage.batch_query_state_changes(<EOL>batch_size=batch_size,<EOL>filters=[<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>],<EOL>)<EOL>for state_changes_batch in batch_query:<EOL><INDENT>updated_state_changes = list()<EOL>for state_change in state_changes_batch:<EOL><INDENT>data = json.loads(state_change.data)<EOL>msg = '<STR_LIT>'<EOL>assert '<STR_LIT>' not in data['<STR_LIT>'], msg<EOL>data['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:0>'<EOL>updated_state_changes.append((<EOL>json.dumps(data),<EOL>state_change.state_change_identifier,<EOL>))<EOL><DEDENT>storage.update_state_changes(updated_state_changes)<EOL><DEDENT>
|
Update each ContractReceiveChannelNew's channel_state member
by setting the `mediation_fee` that was added to the NettingChannelState
|
f9401:m2
|
def _add_blockhash_to_state_changes(storage: SQLiteStorage, cache: BlockHashCache) -> None:
|
batch_size = <NUM_LIT:50><EOL>batch_query = storage.batch_query_state_changes(<EOL>batch_size=batch_size,<EOL>filters=[<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>],<EOL>logical_and=False,<EOL>)<EOL>for state_changes_batch in batch_query:<EOL><INDENT>query_records = []<EOL>for state_change in state_changes_batch:<EOL><INDENT>data = json.loads(state_change.data)<EOL>assert '<STR_LIT>' not in data, '<STR_LIT>'<EOL>record = BlockQueryAndUpdateRecord(<EOL>block_number=int(data['<STR_LIT>']),<EOL>data=data,<EOL>state_change_identifier=state_change.state_change_identifier,<EOL>cache=cache,<EOL>)<EOL>query_records.append(record)<EOL><DEDENT>updated_state_changes = []<EOL>pool_generator = Pool(batch_size).imap(<EOL>_query_blocknumber_and_update_statechange_data,<EOL>query_records,<EOL>)<EOL>for entry in pool_generator:<EOL><INDENT>updated_state_changes.append(entry)<EOL><DEDENT>storage.update_state_changes(updated_state_changes)<EOL><DEDENT>
|
Adds blockhash to ContractReceiveXXX and ActionInitChain state changes
|
f9402:m1
|
def _add_blockhash_to_events(storage: SQLiteStorage, cache: BlockHashCache) -> None:
|
batch_query = storage.batch_query_event_records(<EOL>batch_size=<NUM_LIT>,<EOL>filters=[('<STR_LIT>', '<STR_LIT>')],<EOL>)<EOL>for events_batch in batch_query:<EOL><INDENT>updated_events = []<EOL>for event in events_batch:<EOL><INDENT>data = json.loads(event.data)<EOL>assert '<STR_LIT>' not in data, '<STR_LIT>'<EOL>matched_state_changes = storage.get_statechanges_by_identifier(<EOL>from_identifier=event.state_change_identifier,<EOL>to_identifier=event.state_change_identifier,<EOL>)<EOL>result_length = len(matched_state_changes)<EOL>msg = '<STR_LIT>'<EOL>assert result_length == <NUM_LIT:1>, msg<EOL>statechange_data = json.loads(matched_state_changes[<NUM_LIT:0>])<EOL>if '<STR_LIT>' in statechange_data:<EOL><INDENT>data['<STR_LIT>'] = statechange_data['<STR_LIT>']<EOL><DEDENT>elif '<STR_LIT>' in statechange_data:<EOL><INDENT>block_number = int(statechange_data['<STR_LIT>'])<EOL>data['<STR_LIT>'] = cache.get(block_number)<EOL><DEDENT>updated_events.append((<EOL>json.dumps(data),<EOL>event.event_identifier,<EOL>))<EOL><DEDENT>storage.update_events(updated_events)<EOL><DEDENT>
|
Adds blockhash to all ContractSendXXX events
|
f9402:m2
|
def _transform_snapshot(<EOL>raw_snapshot: str,<EOL>storage: SQLiteStorage,<EOL>cache: BlockHashCache,<EOL>) -> str:
|
snapshot = json.loads(raw_snapshot)<EOL>block_number = int(snapshot['<STR_LIT>'])<EOL>snapshot['<STR_LIT>'] = cache.get(block_number)<EOL>pending_transactions = snapshot['<STR_LIT>']<EOL>new_pending_transactions = []<EOL>for transaction_data in pending_transactions:<EOL><INDENT>if '<STR_LIT>' not in transaction_data['<STR_LIT>']:<EOL><INDENT>raise InvalidDBData(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL><DEDENT>event_record = storage.get_latest_event_by_data_field(<EOL>filters=transaction_data,<EOL>)<EOL>if not event_record.data:<EOL><INDENT>raise InvalidDBData(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>)<EOL><DEDENT>event_record_data = json.loads(event_record.data)<EOL>transaction_data['<STR_LIT>'] = event_record_data['<STR_LIT>']<EOL>new_pending_transactions.append(transaction_data)<EOL><DEDENT>snapshot['<STR_LIT>'] = new_pending_transactions<EOL>return json.dumps(snapshot)<EOL>
|
Upgrades a single snapshot by adding the blockhash to it and to any pending transactions
|
f9402:m3
|
def _transform_snapshots_for_blockhash(storage: SQLiteStorage, cache: BlockHashCache) -> None:
|
snapshots = storage.get_snapshots()<EOL>snapshot_records = [<EOL>TransformSnapshotRecord(<EOL>data=snapshot.data,<EOL>identifier=snapshot.identifier,<EOL>storage=storage,<EOL>cache=cache,<EOL>)<EOL>for snapshot in snapshots<EOL>]<EOL>pool_generator = Pool(len(snapshots)).imap(_do_transform_snapshot, snapshot_records)<EOL>updated_snapshots_data = []<EOL>for result in pool_generator:<EOL><INDENT>updated_snapshots_data.append(result)<EOL><DEDENT>storage.update_snapshots(updated_snapshots_data)<EOL>
|
Upgrades the snapshots by adding the blockhash to it and to any pending transactions
|
f9402:m5
|
def get(self, block_number: BlockNumber) -> str:
|
if block_number in self.mapping:<EOL><INDENT>return self.mapping[block_number]<EOL><DEDENT>block_hash = self.web3.eth.getBlock(block_number)['<STR_LIT>']<EOL>block_hash = block_hash.hex()<EOL>self.mapping[block_number] = block_hash<EOL>return block_hash<EOL>
|
Given a block number returns the hex representation of the blockhash
|
f9402:c0:m1
|
def channel_state_until_state_change(<EOL>raiden,<EOL>canonical_identifier: CanonicalIdentifier,<EOL>state_change_identifier: int,<EOL>) -> typing.Optional[NettingChannelState]:
|
wal = restore_to_state_change(<EOL>transition_function=node.state_transition,<EOL>storage=raiden.wal.storage,<EOL>state_change_identifier=state_change_identifier,<EOL>)<EOL>msg = '<STR_LIT>'<EOL>assert wal.state_manager.current_state is not None, msg<EOL>chain_state = wal.state_manager.current_state<EOL>channel_state = views.get_channelstate_by_canonical_identifier(<EOL>chain_state=chain_state,<EOL>canonical_identifier=canonical_identifier,<EOL>)<EOL>if not channel_state:<EOL><INDENT>raise RaidenUnrecoverableError(<EOL>f"<STR_LIT>",<EOL>)<EOL><DEDENT>return channel_state<EOL>
|
Go through WAL state changes until a certain balance hash is found.
|
f9403:m0
|
def filter_db_names(paths: List[str]) -> List[str]:
|
return [<EOL>db_path<EOL>for db_path in paths<EOL>if VERSION_RE.match(os.path.basename(db_path))<EOL>]<EOL>
|
Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names.
|
f9404:m1
|
def log_and_dispatch(self, state_change):
|
with self._lock:<EOL><INDENT>timestamp = datetime.utcnow().isoformat(timespec='<STR_LIT>')<EOL>state_change_id = self.storage.write_state_change(state_change, timestamp)<EOL>self.state_change_id = state_change_id<EOL>events = self.state_manager.dispatch(state_change)<EOL>self.storage.write_events(state_change_id, events, timestamp)<EOL><DEDENT>return events<EOL>
|
Log and apply a state change.
This function will first write the state change to the write-ahead-log,
in case of a node crash the state change can be recovered and replayed
to restore the node state.
Events produced by applying state change are also saved.
|
f9406:c0:m1
|
def snapshot(self):
|
with self._lock:<EOL><INDENT>current_state = self.state_manager.current_state<EOL>state_change_id = self.state_change_id<EOL>if state_change_id:<EOL><INDENT>self.storage.write_state_snapshot(state_change_id, current_state)<EOL><DEDENT><DEDENT>
|
Snapshot the application state.
Snapshots are used to restore the application state, either after a
restart or a crash.
|
f9406:c0:m2
|
def check_version(current_version: str):
|
app_version = parse_version(current_version)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>_do_check_version(app_version)<EOL><DEDENT>except requests.exceptions.HTTPError as herr:<EOL><INDENT>click.secho('<STR_LIT>', fg='<STR_LIT>')<EOL>print(herr)<EOL><DEDENT>except ValueError as verr:<EOL><INDENT>click.secho('<STR_LIT>', fg='<STR_LIT>')<EOL>print(verr)<EOL><DEDENT>finally:<EOL><INDENT>gevent.sleep(CHECK_VERSION_INTERVAL)<EOL><DEDENT><DEDENT>
|
Check periodically for a new release
|
f9407:m1
|
def check_gas_reserve(raiden):
|
while True:<EOL><INDENT>has_enough_balance, estimated_required_balance = gas_reserve.has_enough_gas_reserve(<EOL>raiden,<EOL>channels_to_open=<NUM_LIT:1>,<EOL>)<EOL>estimated_required_balance_eth = Web3.fromWei(estimated_required_balance, '<STR_LIT>')<EOL>if not has_enough_balance:<EOL><INDENT>log.info('<STR_LIT>', required_wei=estimated_required_balance)<EOL>click.secho(<EOL>(<EOL>'<STR_LIT>'<EOL>"<STR_LIT>"<EOL>f'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>fg='<STR_LIT>',<EOL>)<EOL><DEDENT>gevent.sleep(CHECK_GAS_RESERVE_INTERVAL)<EOL><DEDENT>
|
Check periodically for gas reserve in the account
|
f9407:m2
|
def check_rdn_deposits(raiden, user_deposit_proxy: UserDeposit):
|
while True:<EOL><INDENT>rei_balance = user_deposit_proxy.effective_balance(raiden.address, "<STR_LIT>")<EOL>rdn_balance = to_rdn(rei_balance)<EOL>if rei_balance < MIN_REI_THRESHOLD:<EOL><INDENT>click.secho(<EOL>(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>),<EOL>fg='<STR_LIT>',<EOL>)<EOL><DEDENT>gevent.sleep(CHECK_RDN_MIN_DEPOSIT_INTERVAL)<EOL><DEDENT>
|
Check periodically for RDN deposits in the user-deposits contract
|
f9407:m3
|
def check_network_id(network_id, web3: Web3):
|
while True:<EOL><INDENT>current_id = int(web3.version.network)<EOL>if network_id != current_id:<EOL><INDENT>raise RuntimeError(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>',<EOL>)<EOL><DEDENT>gevent.sleep(CHECK_NETWORK_ID_INTERVAL)<EOL><DEDENT>
|
Check periodically if the underlying ethereum client's network id has changed
|
f9407:m4
|
def is_primed(self):
|
return bool(self.chain_id and self.known_block_number is not None)<EOL>
|
True if the first_run has been called.
|
f9407:c0:m4
|
def register_callback(self, callback):
|
if not callable(callback):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.callbacks.append(callback)<EOL>
|
Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
|
f9407:c0:m5
|
def remove_callback(self, callback):
|
if callback in self.callbacks:<EOL><INDENT>self.callbacks.remove(callback)<EOL><DEDENT>
|
Remove callback from the list of callbacks if it exists
|
f9407:c0:m6
|
def first_run(self, known_block_number):
|
assert self.callbacks, '<STR_LIT>'<EOL>latest_block = self.chain.get_block(block_identifier='<STR_LIT>')<EOL>log.debug(<EOL>'<STR_LIT>',<EOL>known_block_number=known_block_number,<EOL>latest_block_number=latest_block['<STR_LIT>'],<EOL>latest_gas_limit=latest_block['<STR_LIT>'],<EOL>latest_block_hash=to_hex(latest_block['<STR_LIT>']),<EOL>)<EOL>self.known_block_number = known_block_number<EOL>self.chain_id = self.chain.network_id<EOL>self._maybe_run_callbacks(latest_block)<EOL>
|
Blocking call to update the local state, if necessary.
|
f9407:c0:m8
|
def get_contract_events(<EOL>chain: BlockChainService,<EOL>abi: Dict,<EOL>contract_address: Address,<EOL>topics: Optional[List[str]],<EOL>from_block: BlockSpecification,<EOL>to_block: BlockSpecification,<EOL>) -> List[Dict]:
|
verify_block_number(from_block, '<STR_LIT>')<EOL>verify_block_number(to_block, '<STR_LIT>')<EOL>events = chain.client.get_filter_events(<EOL>contract_address,<EOL>topics=topics,<EOL>from_block=from_block,<EOL>to_block=to_block,<EOL>)<EOL>result = []<EOL>for event in events:<EOL><INDENT>decoded_event = dict(decode_event(abi, event))<EOL>if event.get('<STR_LIT>'):<EOL><INDENT>decoded_event['<STR_LIT>'] = event['<STR_LIT>']<EOL>del decoded_event['<STR_LIT>']<EOL><DEDENT>result.append(decoded_event)<EOL><DEDENT>return result<EOL>
|
Query the blockchain for all events of the smart contract at
`contract_address` that match the filters `topics`, `from_block`, and
`to_block`.
|
f9409:m1
|
def decode_event_to_internal(abi, log_event):
|
<EOL>decoded_event = decode_event(abi, log_event)<EOL>if not decoded_event:<EOL><INDENT>raise UnknownEventType()<EOL><DEDENT>data = dict(decoded_event)<EOL>args = dict(data['<STR_LIT:args>'])<EOL>data['<STR_LIT:args>'] = args<EOL>data['<STR_LIT>'] = log_event.pop('<STR_LIT>')<EOL>data['<STR_LIT>'] = log_event.pop('<STR_LIT>')<EOL>data['<STR_LIT>'] = bytes(log_event.pop('<STR_LIT>'))<EOL>assert data['<STR_LIT>'], '<STR_LIT>'<EOL>assert data['<STR_LIT>'], '<STR_LIT>'<EOL>event = data['<STR_LIT>']<EOL>if event == EVENT_TOKEN_NETWORK_CREATED:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>elif event == ChannelEvent.OPENED:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>elif event == ChannelEvent.DEPOSIT:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>elif event == ChannelEvent.BALANCE_PROOF_UPDATED:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>elif event == ChannelEvent.CLOSED:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>elif event == ChannelEvent.UNLOCKED:<EOL><INDENT>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL>args['<STR_LIT>'] = to_canonical_address(args['<STR_LIT>'])<EOL><DEDENT>return Event(<EOL>originating_contract=to_canonical_address(log_event['<STR_LIT:address>']),<EOL>event_data=data,<EOL>)<EOL>
|
Enforce the binary for internal usage.
|
f9409:m5
|
def poll_blockchain_events(self, block_number: typing.BlockNumber):
|
for event_listener in self.event_listeners:<EOL><INDENT>assert isinstance(event_listener.filter, StatelessFilter)<EOL>for log_event in event_listener.filter.get_new_entries(block_number):<EOL><INDENT>yield decode_event_to_internal(event_listener.abi, log_event)<EOL><DEDENT><DEDENT>
|
Poll for new blockchain events up to `block_number`.
|
f9409:c1:m1
|
def enable(self, app=None):
|
self.manager.set_inputhook(inputhook_gevent)<EOL>self._current_gui = GUI_GEVENT<EOL>return app<EOL>
|
Enable event loop integration with gevent.
Args:
app: Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes:
This methods sets the PyOS_InputHook for gevent, which allows
gevent greenlets to run in the background while interactively using
IPython.
|
f9414:c0:m1
|
def disable(self):
|
self.manager.clear_inputhook()<EOL>
|
Disable event loop integration with gevent.
This merely sets PyOS_InputHook to NULL.
|
f9414:c0:m2
|
def register_token(<EOL>self,<EOL>registry_address_hex: typing.AddressHex,<EOL>token_address_hex: typing.AddressHex,<EOL>retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT,<EOL>) -> TokenNetwork:
|
registry_address = decode_hex(registry_address_hex)<EOL>token_address = decode_hex(token_address_hex)<EOL>registry = self._raiden.chain.token_network_registry(registry_address)<EOL>contracts_version = self._raiden.contract_manager.contracts_version<EOL>if contracts_version == DEVELOPMENT_CONTRACT_VERSION:<EOL><INDENT>token_network_address = registry.add_token_with_limits(<EOL>token_address=token_address,<EOL>channel_participant_deposit_limit=UINT256_MAX,<EOL>token_network_deposit_limit=UINT256_MAX,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>token_network_address = registry.add_token_without_limits(<EOL>token_address=token_address,<EOL>)<EOL><DEDENT>waiting.wait_for_payment_network(<EOL>self._raiden,<EOL>registry.address,<EOL>token_address,<EOL>retry_timeout,<EOL>)<EOL>return self._raiden.chain.token_network(token_network_address)<EOL>
|
Register a token with the raiden token manager.
Args:
registry_address: registry address
token_address_hex (string): a hex encoded token address.
Returns:
The token network proxy.
|
f9414:c2:m2
|
def open_channel_with_funding(<EOL>self,<EOL>registry_address_hex,<EOL>token_address_hex,<EOL>peer_address_hex,<EOL>total_deposit,<EOL>settle_timeout=None,<EOL>):
|
<EOL>registry_address = decode_hex(registry_address_hex)<EOL>peer_address = decode_hex(peer_address_hex)<EOL>token_address = decode_hex(token_address_hex)<EOL>try:<EOL><INDENT>self._discovery.get(peer_address)<EOL><DEDENT>except KeyError:<EOL><INDENT>print('<STR_LIT>'.format(peer_address_hex))<EOL>return None<EOL><DEDENT>self._api.channel_open(<EOL>registry_address,<EOL>token_address,<EOL>peer_address,<EOL>settle_timeout=settle_timeout,<EOL>)<EOL>return self._api.set_total_channel_deposit(<EOL>registry_address,<EOL>token_address,<EOL>peer_address,<EOL>total_deposit,<EOL>)<EOL>
|
Convenience method to open a channel.
Args:
registry_address_hex (str): hex encoded address of the registry for the channel.
token_address_hex (str): hex encoded address of the token for the channel.
peer_address_hex (str): hex encoded address of the channel peer.
total_deposit (int): amount of total funding for the channel.
settle_timeout (int): amount of blocks for the settle time (if None use app defaults).
Return:
netting_channel: the (newly opened) netting channel object.
|
f9414:c2:m3
|
def wait_for_contract(self, contract_address_hex, timeout=None):
|
contract_address = decode_hex(contract_address_hex)<EOL>start_time = time.time()<EOL>result = self._raiden.chain.client.web3.eth.getCode(<EOL>to_checksum_address(contract_address),<EOL>)<EOL>current_time = time.time()<EOL>while not result:<EOL><INDENT>if timeout and start_time + timeout > current_time:<EOL><INDENT>return False<EOL><DEDENT>result = self._raiden.chain.client.web3.eth.getCode(<EOL>to_checksum_address(contract_address),<EOL>)<EOL>gevent.sleep(<NUM_LIT:0.5>)<EOL>current_time = time.time()<EOL><DEDENT>return len(result) > <NUM_LIT:0><EOL>
|
Wait until a contract is mined
Args:
contract_address_hex (string): hex encoded address of the contract
timeout (int): time to wait for the contract to get mined
Returns:
True if the contract got mined, false otherwise
|
f9414:c2:m4
|
def options(func):
|
<EOL>options_ = [<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=lambda: os.path.join(os.path.expanduser('<STR_LIT>'), '<STR_LIT>'),<EOL>type=click.Path(<EOL>exists=False,<EOL>dir_okay=True,<EOL>file_okay=False,<EOL>writable=True,<EOL>resolve_path=True,<EOL>allow_dash=False,<EOL>),<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=os.path.join('<STR_LIT>', '<STR_LIT>'),<EOL>type=PathRelativePath(<EOL>file_okay=True,<EOL>dir_okay=False,<EOL>exists=False,<EOL>readable=True,<EOL>resolve_path=True,<EOL>),<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=None,<EOL>type=click.Path(exists=True),<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=None,<EOL>type=ADDRESS_TYPE,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=None,<EOL>type=click.File(lazy=True),<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=ADDRESS_TYPE,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=ADDRESS_TYPE,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=ADDRESS_TYPE,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=ADDRESS_TYPE,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=ADDRESS_TYPE,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=click.Choice(['<STR_LIT>', '<STR_LIT>']),<EOL>default='<STR_LIT>',<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=NetworkChoiceType([<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]),<EOL>default='<STR_LIT>',<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=EnumChoiceType(Environment),<EOL>default=Environment.PRODUCTION.value,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=True,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=GasPriceChoiceType(['<STR_LIT>', '<STR_LIT>']),<EOL>default='<STR_LIT>',<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default='<STR_LIT>', <EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=EnumChoiceType(RoutingMode),<EOL>default=RoutingMode.BASIC.value,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default='<STR_LIT>',<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=str,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=DEFAULT_PATHFINDING_MAX_PATHS,<EOL>type=int,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=DEFAULT_PATHFINDING_MAX_FEE,<EOL>type=int,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=DEFAULT_PATHFINDING_IOU_TIMEOUT,<EOL>type=int,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default='<STR_LIT>'.format(INITIAL_PORT),<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=<NUM_LIT:30>,<EOL>type=int,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=<NUM_LIT>,<EOL>type=int,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>type=NATChoiceType(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:none>', '<STR_LIT>']),<EOL>default='<STR_LIT>',<EOL>show_default=True,<EOL>option_group='<STR_LIT>',<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default='<STR_LIT>',<EOL>type=MatrixServerType(['<STR_LIT>', '<STR_LIT>']),<EOL>show_default=True,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>type=LOG_LEVEL_CONFIG_TYPE,<EOL>default='<STR_LIT>',<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=None,<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>is_flag=True,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default=True,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=True,<EOL>show_default=True,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>is_flag=True,<EOL>default=False,<EOL>),<EOL>),<EOL>option_group(<EOL>'<STR_LIT>',<EOL>option(<EOL>'<STR_LIT>',<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>default=None,<EOL>type=str,<EOL>show_default=True,<EOL>),<EOL>),<EOL>]<EOL>for option_ in reversed(options_):<EOL><INDENT>func = option_(func)<EOL><DEDENT>return func<EOL>
|
Having the common app options as a decorator facilitates reuse.
|
f9415:m0
|
@run.command()<EOL>@option(<EOL>'<STR_LIT>',<EOL>is_flag=True,<EOL>help='<STR_LIT>',<EOL>)<EOL>def version(short):
|
if short:<EOL><INDENT>print(get_system_spec()['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>print(json.dumps(<EOL>get_system_spec(),<EOL>indent=<NUM_LIT:2>,<EOL>))<EOL><DEDENT>
|
Print version information and exit.
|
f9415:m2
|
@run.command(<EOL>help=(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>)<EOL>@click.option('<STR_LIT>', type=ADDRESS_TYPE, required=True)<EOL>@click.pass_context<EOL>def echonode(ctx, token_address):
|
EchoNodeRunner(ctx.obj, ctx, token_address).run()<EOL>
|
Start a raiden Echo Node that will send received transfers back to the initiator.
|
f9415:m4
|
def setup_network_id_or_exit(<EOL>config: Dict[str, Any],<EOL>given_network_id: int,<EOL>web3: Web3,<EOL>) -> Tuple[int, bool]:
|
node_network_id = int(web3.version.network) <EOL>known_given_network_id = given_network_id in ID_TO_NETWORKNAME<EOL>known_node_network_id = node_network_id in ID_TO_NETWORKNAME<EOL>if node_network_id != given_network_id:<EOL><INDENT>if known_given_network_id and known_node_network_id:<EOL><INDENT>click.secho(<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>fg='<STR_LIT>',<EOL>)<EOL><DEDENT>else:<EOL><INDENT>click.secho(<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>fg='<STR_LIT>',<EOL>)<EOL><DEDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>config['<STR_LIT>'] = given_network_id<EOL>return given_network_id, known_node_network_id<EOL>
|
Takes the given network id and checks it against the connected network
If they don't match, exits the program with an error. If they do adds it
to the configuration and then returns it and whether it is a known network
|
f9417:m1
|
def setup_environment(config: Dict[str, Any], environment_type: Environment) -> None:
|
<EOL>if environment_type == Environment.PRODUCTION:<EOL><INDENT>config['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] = True<EOL><DEDENT>config['<STR_LIT>'] = environment_type<EOL>print(f'<STR_LIT>')<EOL>
|
Sets the config depending on the environment type
|
f9417:m2
|
def setup_contracts_or_exit(<EOL>config: Dict[str, Any],<EOL>network_id: int,<EOL>) -> Dict[str, Any]:
|
environment_type = config['<STR_LIT>']<EOL>not_allowed = ( <EOL>network_id == <NUM_LIT:1> and<EOL>environment_type == Environment.DEVELOPMENT<EOL>)<EOL>if not_allowed:<EOL><INDENT>click.secho(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>',<EOL>fg='<STR_LIT>',<EOL>)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>contracts = dict()<EOL>contracts_version = environment_type_to_contracts_version(environment_type)<EOL>config['<STR_LIT>'] = contracts_precompiled_path(contracts_version)<EOL>if network_id in ID_TO_NETWORKNAME and ID_TO_NETWORKNAME[network_id] != '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>deployment_data = get_contracts_deployment_info(<EOL>chain_id=network_id,<EOL>version=contracts_version,<EOL>)<EOL><DEDENT>except ValueError:<EOL><INDENT>return contracts, False<EOL><DEDENT>contracts = deployment_data['<STR_LIT>']<EOL><DEDENT>return contracts<EOL>
|
Sets the contract deployment data depending on the network id and environment type
If an invalid combination of network id and environment type is provided, exits
the program with an error
|
f9417:m3
|
def setup_proxies_or_exit(<EOL>config: Dict[str, Any],<EOL>tokennetwork_registry_contract_address: str,<EOL>secret_registry_contract_address: str,<EOL>endpoint_registry_contract_address: str,<EOL>user_deposit_contract_address: str,<EOL>service_registry_contract_address: str,<EOL>blockchain_service: BlockChainService,<EOL>contracts: Dict[str, Any],<EOL>routing_mode: RoutingMode,<EOL>pathfinding_service_address: str,<EOL>pathfinding_eth_address: str,<EOL>) -> Proxies:
|
node_network_id = config['<STR_LIT>']<EOL>environment_type = config['<STR_LIT>']<EOL>contract_addresses_given = (<EOL>tokennetwork_registry_contract_address is not None and<EOL>secret_registry_contract_address is not None and<EOL>endpoint_registry_contract_address is not None<EOL>)<EOL>if not contract_addresses_given and not bool(contracts):<EOL><INDENT>click.secho(<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>",<EOL>fg='<STR_LIT>',<EOL>)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>token_network_registry = blockchain_service.token_network_registry(<EOL>tokennetwork_registry_contract_address or to_canonical_address(<EOL>contracts[CONTRACT_TOKEN_NETWORK_REGISTRY]['<STR_LIT:address>'],<EOL>),<EOL>)<EOL><DEDENT>except ContractVersionMismatch as e:<EOL><INDENT>handle_contract_version_mismatch(e)<EOL><DEDENT>except AddressWithoutCode:<EOL><INDENT>handle_contract_no_code('<STR_LIT>', tokennetwork_registry_contract_address)<EOL><DEDENT>except AddressWrongContract:<EOL><INDENT>handle_contract_wrong_address(<EOL>'<STR_LIT>',<EOL>tokennetwork_registry_contract_address,<EOL>)<EOL><DEDENT>try:<EOL><INDENT>secret_registry = blockchain_service.secret_registry(<EOL>secret_registry_contract_address or to_canonical_address(<EOL>contracts[CONTRACT_SECRET_REGISTRY]['<STR_LIT:address>'],<EOL>),<EOL>)<EOL><DEDENT>except ContractVersionMismatch as e:<EOL><INDENT>handle_contract_version_mismatch(e)<EOL><DEDENT>except AddressWithoutCode:<EOL><INDENT>handle_contract_no_code('<STR_LIT>', secret_registry_contract_address)<EOL><DEDENT>except AddressWrongContract:<EOL><INDENT>handle_contract_wrong_address('<STR_LIT>', secret_registry_contract_address)<EOL><DEDENT>if user_deposit_contract_address is not None:<EOL><INDENT>contracts[CONTRACT_USER_DEPOSIT] = user_deposit_contract_address<EOL><DEDENT>if service_registry_contract_address is not None:<EOL><INDENT>contracts[CONTRACT_SERVICE_REGISTRY] = (<EOL>service_registry_contract_address<EOL>)<EOL><DEDENT>user_deposit = None<EOL>should_use_user_deposit = (<EOL>environment_type == Environment.DEVELOPMENT and<EOL>ID_TO_NETWORKNAME.get(node_network_id) != '<STR_LIT>' and<EOL>CONTRACT_USER_DEPOSIT in contracts<EOL>)<EOL>if should_use_user_deposit:<EOL><INDENT>try:<EOL><INDENT>user_deposit = blockchain_service.user_deposit(<EOL>user_deposit_contract_address or to_canonical_address(<EOL>contracts[CONTRACT_USER_DEPOSIT]['<STR_LIT:address>'],<EOL>),<EOL>)<EOL><DEDENT>except ContractVersionMismatch as e:<EOL><INDENT>handle_contract_version_mismatch(e)<EOL><DEDENT>except AddressWithoutCode:<EOL><INDENT>handle_contract_no_code('<STR_LIT>', user_deposit_contract_address)<EOL><DEDENT>except AddressWrongContract:<EOL><INDENT>handle_contract_wrong_address('<STR_LIT>', user_deposit_contract_address)<EOL><DEDENT><DEDENT>service_registry = None<EOL>if CONTRACT_SERVICE_REGISTRY in contracts or service_registry_contract_address:<EOL><INDENT>try:<EOL><INDENT>service_registry = blockchain_service.service_registry(<EOL>service_registry_contract_address or to_canonical_address(<EOL>contracts[CONTRACT_SERVICE_REGISTRY]['<STR_LIT:address>'],<EOL>),<EOL>)<EOL><DEDENT>except ContractVersionMismatch as e:<EOL><INDENT>handle_contract_version_mismatch(e)<EOL><DEDENT>except AddressWithoutCode:<EOL><INDENT>handle_contract_no_code('<STR_LIT>', service_registry_contract_address)<EOL><DEDENT>except AddressWrongContract:<EOL><INDENT>handle_contract_wrong_address('<STR_LIT>', service_registry_contract_address)<EOL><DEDENT><DEDENT>if routing_mode == RoutingMode.PFS:<EOL><INDENT>if environment_type == Environment.PRODUCTION:<EOL><INDENT>click.secho(<EOL>'<STR_LIT>',<EOL>fg='<STR_LIT>',<EOL>)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if not service_registry and not pathfinding_service_address:<EOL><INDENT>click.secho(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>fg='<STR_LIT>',<EOL>)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>pfs_config = configure_pfs_or_exit(<EOL>pfs_address=pathfinding_service_address,<EOL>pfs_eth_address=pathfinding_eth_address,<EOL>routing_mode=routing_mode,<EOL>service_registry=service_registry,<EOL>)<EOL>msg = '<STR_LIT>'<EOL>assert pfs_config.eth_address is not None, msg<EOL>config['<STR_LIT>']['<STR_LIT>'] = pfs_config.url<EOL>config['<STR_LIT>']['<STR_LIT>'] = pfs_config.eth_address<EOL>config['<STR_LIT>']['<STR_LIT>'] = pfs_config.fee<EOL><DEDENT>else:<EOL><INDENT>config['<STR_LIT>']['<STR_LIT>'] = None<EOL>config['<STR_LIT>']['<STR_LIT>'] = None<EOL><DEDENT>proxies = Proxies(<EOL>token_network_registry=token_network_registry,<EOL>secret_registry=secret_registry,<EOL>user_deposit=user_deposit,<EOL>service_registry=service_registry,<EOL>)<EOL>return proxies<EOL>
|
Initialize and setup the contract proxies.
Depending on the provided contract addresses via the CLI, the routing mode,
the environment type and the network id try to initialize the proxies.
Returns the initialized proxies or exits the application with an error if
there is a problem.
Also depending on the given arguments populate config with PFS related settings
|
f9417:m7
|
def _startup_hook(self):
|
pass<EOL>
|
Hook that is called after startup is finished. Intended for subclass usage.
|
f9418:c0:m2
|
def _shutdown_hook(self):
|
pass<EOL>
|
Hook that is called just before shutdown. Intended for subclass usage.
|
f9418:c0:m3
|
def get_sender_expiration_threshold(lock: LockType) -> BlockNumber:
|
return BlockNumber(<EOL>lock.expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS * <NUM_LIT:2>,<EOL>)<EOL>
|
Returns the block number at which the sender can send the remove expired lock.
The remove lock expired message will be rejected if the expiration block
has not been confirmed. Additionally the sender can account for possible
delays in the receiver, so a few additional blocks are used to avoid hanging the channel.
|
f9421:m0
|
def get_receiver_expiration_threshold(lock: HashTimeLockState) -> BlockNumber:
|
return BlockNumber(<EOL>lock.expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,<EOL>)<EOL>
|
Returns the block number at which a remove lock expired can be accepted.
The receiver must wait for the block at which the lock expires to be confirmed.
This is necessary to handle reorgs which could hide a secret registration.
|
f9421:m1
|
def is_lock_pending(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
return (<EOL>secrethash in end_state.secrethashes_to_lockedlocks or<EOL>secrethash in end_state.secrethashes_to_unlockedlocks or<EOL>secrethash in end_state.secrethashes_to_onchain_unlockedlocks<EOL>)<EOL>
|
True if the `secrethash` corresponds to a lock that is pending to be claimed
and didn't expire.
|
f9421:m2
|
def is_deposit_confirmed(<EOL>channel_state: NettingChannelState,<EOL>block_number: BlockNumber,<EOL>) -> bool:
|
if not channel_state.deposit_transaction_queue:<EOL><INDENT>return False<EOL><DEDENT>return is_transaction_confirmed(<EOL>channel_state.deposit_transaction_queue[<NUM_LIT:0>].block_number,<EOL>block_number,<EOL>)<EOL>
|
True if the block which mined the deposit transaction has been
confirmed.
|
f9421:m3
|
def is_lock_locked(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
return secrethash in end_state.secrethashes_to_lockedlocks<EOL>
|
True if the `secrethash` is for a lock with an unknown secret.
|
f9421:m4
|
def is_lock_expired(<EOL>end_state: NettingChannelEndState,<EOL>lock: LockType,<EOL>block_number: BlockNumber,<EOL>lock_expiration_threshold: BlockNumber,<EOL>) -> SuccessOrError:
|
secret_registered_on_chain = lock.secrethash in end_state.secrethashes_to_onchain_unlockedlocks<EOL>if secret_registered_on_chain:<EOL><INDENT>return (False, '<STR_LIT>')<EOL><DEDENT>if block_number < lock_expiration_threshold:<EOL><INDENT>msg = (<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>)<EOL>return (False, msg)<EOL><DEDENT>return (True, None)<EOL>
|
Determine whether a lock has expired.
The lock has expired if both:
- The secret was not registered on-chain in time.
- The current block exceeds lock's expiration + confirmation blocks.
|
f9421:m5
|
def is_secret_known(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
return (<EOL>secrethash in end_state.secrethashes_to_unlockedlocks or<EOL>secrethash in end_state.secrethashes_to_onchain_unlockedlocks<EOL>)<EOL>
|
True if the `secrethash` is for a lock with a known secret.
|
f9421:m7
|
def is_secret_known_offchain(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
return secrethash in end_state.secrethashes_to_unlockedlocks<EOL>
|
True if the `secrethash` is for a lock with a known secret.
|
f9421:m8
|
def is_secret_known_onchain(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
return secrethash in end_state.secrethashes_to_onchain_unlockedlocks<EOL>
|
True if the `secrethash` is for a lock with a known secret.
|
f9421:m9
|
def get_secret(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> Optional[Secret]:
|
partial_unlock_proof = end_state.secrethashes_to_unlockedlocks.get(secrethash)<EOL>if partial_unlock_proof is None:<EOL><INDENT>partial_unlock_proof = end_state.secrethashes_to_onchain_unlockedlocks.get(secrethash)<EOL><DEDENT>if partial_unlock_proof is not None:<EOL><INDENT>return partial_unlock_proof.secret<EOL><DEDENT>return None<EOL>
|
Returns `secret` if the `secrethash` is for a lock with a known secret.
|
f9421:m10
|
def is_balance_proof_safe_for_onchain_operations(<EOL>balance_proof: BalanceProofSignedState,<EOL>) -> bool:
|
total_amount = balance_proof.transferred_amount + balance_proof.locked_amount<EOL>return total_amount <= UINT256_MAX<EOL>
|
Check if the balance proof would overflow onchain.
|
f9421:m12
|
def is_balance_proof_usable_onchain(<EOL>received_balance_proof: BalanceProofSignedState,<EOL>channel_state: NettingChannelState,<EOL>sender_state: NettingChannelEndState,<EOL>) -> SuccessOrError:
|
expected_nonce = get_next_nonce(sender_state)<EOL>is_valid_signature_, signature_msg = is_valid_signature(<EOL>received_balance_proof,<EOL>sender_state.address,<EOL>)<EOL>result: SuccessOrError<EOL>if get_status(channel_state) != CHANNEL_STATE_OPENED:<EOL><INDENT>msg = f'<STR_LIT>'<EOL>result = (False, msg)<EOL><DEDENT>elif received_balance_proof.channel_identifier != channel_state.identifier:<EOL><INDENT>msg = (<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>)<EOL>result = (False, msg)<EOL><DEDENT>elif received_balance_proof.token_network_identifier != channel_state.token_network_identifier:<EOL><INDENT>msg = (<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>)<EOL>result = (False, msg)<EOL><DEDENT>elif received_balance_proof.chain_id != channel_state.chain_id:<EOL><INDENT>msg = (<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>)<EOL>result = (False, msg)<EOL><DEDENT>elif not is_balance_proof_safe_for_onchain_operations(received_balance_proof):<EOL><INDENT>transferred_amount_after_unlock = (<EOL>received_balance_proof.transferred_amount +<EOL>received_balance_proof.locked_amount<EOL>)<EOL>msg = (<EOL>f"<STR_LIT>"<EOL>f"<STR_LIT>"<EOL>)<EOL>result = (False, msg)<EOL><DEDENT>elif received_balance_proof.nonce != expected_nonce:<EOL><INDENT>msg = (<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>)<EOL>result = (False, msg)<EOL><DEDENT>elif not is_valid_signature_:<EOL><INDENT>result = (False, signature_msg)<EOL><DEDENT>else:<EOL><INDENT>result = (True, None)<EOL><DEDENT>return result<EOL>
|
Checks the balance proof can be used on-chain.
For a balance proof to be valid it must be newer than the previous one,
i.e. the nonce must increase, the signature must tie the balance proof to
the correct channel, and the values must not result in an under/overflow
onchain.
Important: This predicate does not validate all the message fields. The
fields locksroot, transferred_amount, and locked_amount **MUST** be
validated elsewhere based on the message type.
|
f9421:m15
|
def get_batch_unlock_gain(<EOL>channel_state: NettingChannelState,<EOL>) -> UnlockGain:
|
gain_from_partner_locks = TokenAmount(sum(<EOL>unlock.lock.amount<EOL>for unlock in channel_state.partner_state.secrethashes_to_onchain_unlockedlocks.values()<EOL>))<EOL>"""<STR_LIT>"""<EOL>our_locked_locks_amount = sum(<EOL>lock.amount<EOL>for lock in channel_state.our_state.secrethashes_to_lockedlocks.values()<EOL>)<EOL>our_unclaimed_locks_amount = sum(<EOL>lock.amount for lock in channel_state.our_state.secrethashes_to_unlockedlocks.values()<EOL>)<EOL>gain_from_our_locks = TokenAmount(our_locked_locks_amount + our_unclaimed_locks_amount)<EOL>return UnlockGain(<EOL>from_partner_locks=gain_from_partner_locks,<EOL>from_our_locks=gain_from_our_locks,<EOL>)<EOL>
|
Collect amounts for unlocked/unclaimed locks and onchain unlocked locks.
Note: this function does not check expiry, so the values make only sense during settlement.
Returns:
gain_from_partner_locks: locks amount received and unlocked on-chain
gain_from_our_locks: locks amount which are unlocked or unclaimed
|
f9421:m24
|
def get_distributable(<EOL>sender: NettingChannelEndState,<EOL>receiver: NettingChannelEndState,<EOL>) -> TokenAmount:
|
_, _, transferred_amount, locked_amount = get_current_balanceproof(sender)<EOL>distributable = get_balance(sender, receiver) - get_amount_locked(sender)<EOL>overflow_limit = max(<EOL>UINT256_MAX - transferred_amount - locked_amount,<EOL><NUM_LIT:0>,<EOL>)<EOL>return TokenAmount(min(overflow_limit, distributable))<EOL>
|
Return the amount of tokens that can be used by the `sender`.
The returned value is limited to a UINT256, since that is the representation
used in the smart contracts and we cannot use a larger value. The limit is
enforced on transferred_amount + locked_amount to avoid overflows. This is
an additional security check.
|
f9421:m28
|
def get_batch_unlock(<EOL>end_state: NettingChannelEndState,<EOL>) -> Optional[MerkleTreeLeaves]:
|
if len(end_state.merkletree.layers[LEAVES]) == <NUM_LIT:0>: <EOL><INDENT>return None<EOL><DEDENT>lockhashes_to_locks = dict()<EOL>lockhashes_to_locks.update({<EOL>lock.lockhash: lock<EOL>for secrethash, lock in end_state.secrethashes_to_lockedlocks.items()<EOL>})<EOL>lockhashes_to_locks.update({<EOL>proof.lock.lockhash: proof.lock<EOL>for secrethash, proof in end_state.secrethashes_to_unlockedlocks.items()<EOL>})<EOL>lockhashes_to_locks.update({<EOL>proof.lock.lockhash: proof.lock<EOL>for secrethash, proof in end_state.secrethashes_to_onchain_unlockedlocks.items()<EOL>})<EOL>ordered_locks = [<EOL>lockhashes_to_locks[LockHash(lockhash)]<EOL>for lockhash in end_state.merkletree.layers[LEAVES]<EOL>]<EOL>return cast(MerkleTreeLeaves, ordered_locks)<EOL>
|
Unlock proof for an entire merkle tree of pending locks
The unlock proof contains all the merkle tree data, tightly packed, needed by the token
network contract to verify the secret expiry and calculate the token amounts to transfer.
|
f9421:m29
|
def get_lock(<EOL>end_state: NettingChannelEndState,<EOL>secrethash: SecretHash,<EOL>) -> Optional[HashTimeLockState]:
|
lock = end_state.secrethashes_to_lockedlocks.get(secrethash)<EOL>if not lock:<EOL><INDENT>partial_unlock = end_state.secrethashes_to_unlockedlocks.get(secrethash)<EOL>if not partial_unlock:<EOL><INDENT>partial_unlock = end_state.secrethashes_to_onchain_unlockedlocks.get(secrethash)<EOL><DEDENT>if partial_unlock:<EOL><INDENT>lock = partial_unlock.lock<EOL><DEDENT><DEDENT>assert isinstance(lock, HashTimeLockState) or lock is None<EOL>return lock<EOL>
|
Return the lock correspoding to `secrethash` or None if the lock is
unknown.
|
f9421:m30
|
def lock_exists_in_either_channel_side(<EOL>channel_state: NettingChannelState,<EOL>secrethash: SecretHash,<EOL>) -> bool:
|
lock = get_lock(channel_state.our_state, secrethash)<EOL>if not lock:<EOL><INDENT>lock = get_lock(channel_state.partner_state, secrethash)<EOL><DEDENT>return lock is not None<EOL>
|
Check if the lock with `secrethash` exists in either our state or the partner's state
|
f9421:m31
|
def _del_lock(end_state: NettingChannelEndState, secrethash: SecretHash) -> None:
|
assert is_lock_pending(end_state, secrethash)<EOL>_del_unclaimed_lock(end_state, secrethash)<EOL>if secrethash in end_state.secrethashes_to_onchain_unlockedlocks:<EOL><INDENT>del end_state.secrethashes_to_onchain_unlockedlocks[secrethash]<EOL><DEDENT>
|
Removes the lock from the indexing structures.
Note:
This won't change the merkletree!
|
f9421:m37
|
def compute_merkletree_with(<EOL>merkletree: MerkleTreeState,<EOL>lockhash: LockHash,<EOL>) -> Optional[MerkleTreeState]:
|
<EOL>result = None<EOL>leaves = merkletree.layers[LEAVES]<EOL>if lockhash not in leaves:<EOL><INDENT>leaves = list(leaves)<EOL>leaves.append(Keccak256(lockhash))<EOL>result = MerkleTreeState(compute_layers(leaves))<EOL><DEDENT>return result<EOL>
|
Register the given lockhash with the existing merkle tree.
|
f9421:m42
|
def register_offchain_secret(<EOL>channel_state: NettingChannelState,<EOL>secret: Secret,<EOL>secrethash: SecretHash,<EOL>) -> None:
|
our_state = channel_state.our_state<EOL>partner_state = channel_state.partner_state<EOL>register_secret_endstate(our_state, secret, secrethash)<EOL>register_secret_endstate(partner_state, secret, secrethash)<EOL>
|
This will register the secret and set the lock to the unlocked stated.
Even though the lock is unlock it is *not* claimed. The capacity will
increase once the next balance proof is received.
|
f9421:m54
|
def register_onchain_secret(<EOL>channel_state: NettingChannelState,<EOL>secret: Secret,<EOL>secrethash: SecretHash,<EOL>secret_reveal_block_number: BlockNumber,<EOL>delete_lock: bool = True,<EOL>) -> None:
|
our_state = channel_state.our_state<EOL>partner_state = channel_state.partner_state<EOL>register_onchain_secret_endstate(<EOL>our_state,<EOL>secret,<EOL>secrethash,<EOL>secret_reveal_block_number,<EOL>delete_lock,<EOL>)<EOL>register_onchain_secret_endstate(<EOL>partner_state,<EOL>secret,<EOL>secrethash,<EOL>secret_reveal_block_number,<EOL>delete_lock,<EOL>)<EOL>
|
This will register the onchain secret and set the lock to the unlocked stated.
Even though the lock is unlocked it is *not* claimed. The capacity will
increase once the next balance proof is received.
|
f9421:m55
|
def handle_receive_lock_expired(<EOL>channel_state: NettingChannelState,<EOL>state_change: ReceiveLockExpired,<EOL>block_number: BlockNumber,<EOL>) -> TransitionResult[NettingChannelState]:
|
is_valid, msg, merkletree = is_valid_lock_expired(<EOL>state_change=state_change,<EOL>channel_state=channel_state,<EOL>sender_state=channel_state.partner_state,<EOL>receiver_state=channel_state.our_state,<EOL>block_number=block_number,<EOL>)<EOL>events: List[Event] = list()<EOL>if is_valid:<EOL><INDENT>assert merkletree, '<STR_LIT>'<EOL>channel_state.partner_state.balance_proof = state_change.balance_proof<EOL>channel_state.partner_state.merkletree = merkletree<EOL>_del_unclaimed_lock(channel_state.partner_state, state_change.secrethash)<EOL>send_processed = SendProcessed(<EOL>recipient=state_change.balance_proof.sender,<EOL>channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,<EOL>message_identifier=state_change.message_identifier,<EOL>)<EOL>events = [send_processed]<EOL><DEDENT>else:<EOL><INDENT>assert msg, '<STR_LIT>'<EOL>invalid_lock_expired = EventInvalidReceivedLockExpired(<EOL>secrethash=state_change.secrethash,<EOL>reason=msg,<EOL>)<EOL>events = [invalid_lock_expired]<EOL><DEDENT>return TransitionResult(channel_state, events)<EOL>
|
Remove expired locks from channel states.
|
f9421:m59
|
def handle_receive_lockedtransfer(<EOL>channel_state: NettingChannelState,<EOL>mediated_transfer: LockedTransferSignedState,<EOL>) -> EventsOrError:
|
events: List[Event]<EOL>is_valid, msg, merkletree = is_valid_lockedtransfer(<EOL>mediated_transfer,<EOL>channel_state,<EOL>channel_state.partner_state,<EOL>channel_state.our_state,<EOL>)<EOL>if is_valid:<EOL><INDENT>assert merkletree, '<STR_LIT>'<EOL>channel_state.partner_state.balance_proof = mediated_transfer.balance_proof<EOL>channel_state.partner_state.merkletree = merkletree<EOL>lock = mediated_transfer.lock<EOL>channel_state.partner_state.secrethashes_to_lockedlocks[lock.secrethash] = lock<EOL>send_processed = SendProcessed(<EOL>recipient=mediated_transfer.balance_proof.sender,<EOL>channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,<EOL>message_identifier=mediated_transfer.message_identifier,<EOL>)<EOL>events = [send_processed]<EOL><DEDENT>else:<EOL><INDENT>assert msg, '<STR_LIT>'<EOL>invalid_locked = EventInvalidReceivedLockedTransfer(<EOL>payment_identifier=mediated_transfer.payment_identifier,<EOL>reason=msg,<EOL>)<EOL>events = [invalid_locked]<EOL><DEDENT>return is_valid, events, msg<EOL>
|
Register the latest known transfer.
The receiver needs to use this method to update the container with a
_valid_ transfer, otherwise the locksroot will not contain the pending
transfer. The receiver needs to ensure that the merkle root has the
secrethash included, otherwise it won't be able to claim it.
|
f9421:m60
|
def all_neighbour_nodes(chain_state: ChainState) -> Set[Address]:
|
addresses = set()<EOL>for payment_network in chain_state.identifiers_to_paymentnetworks.values():<EOL><INDENT>for token_network in payment_network.tokenidentifiers_to_tokennetworks.values():<EOL><INDENT>channel_states = token_network.channelidentifiers_to_channels.values()<EOL>for channel_state in channel_states:<EOL><INDENT>addresses.add(channel_state.partner_state.address)<EOL><DEDENT><DEDENT><DEDENT>return addresses<EOL>
|
Return the identifiers for all nodes accross all payment networks which
have a channel open with this one.
|
f9422:m0
|
def get_token_network_identifiers(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>) -> List[TokenNetworkID]:
|
payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id)<EOL>if payment_network is not None:<EOL><INDENT>return [<EOL>token_network.address<EOL>for token_network in payment_network.tokenidentifiers_to_tokennetworks.values()<EOL>]<EOL><DEDENT>return list()<EOL>
|
Return the list of token networks registered with the given payment network.
|
f9422:m14
|
def get_token_identifiers(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>) -> List[TokenAddress]:
|
payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id)<EOL>if payment_network is not None:<EOL><INDENT>return [<EOL>token_address<EOL>for token_address in payment_network.tokenaddresses_to_tokenidentifiers.keys()<EOL>]<EOL><DEDENT>return list()<EOL>
|
Return the list of tokens registered with the given payment network.
|
f9422:m15
|
def get_channelstate_for(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>partner_address: Address,<EOL>) -> Optional[NettingChannelState]:
|
token_network = get_token_network_by_token_address(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>)<EOL>channel_state = None<EOL>if token_network:<EOL><INDENT>channels = [<EOL>token_network.channelidentifiers_to_channels[channel_id]<EOL>for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]<EOL>]<EOL>states = filter_channels_by_status(<EOL>channels,<EOL>[CHANNEL_STATE_UNUSABLE],<EOL>)<EOL>if states:<EOL><INDENT>channel_state = states[-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return channel_state<EOL>
|
Return the NettingChannelState if it exists, None otherwise.
|
f9422:m19
|
def get_channelstate_by_token_network_and_partner(<EOL>chain_state: ChainState,<EOL>token_network_id: TokenNetworkID,<EOL>partner_address: Address,<EOL>) -> Optional[NettingChannelState]:
|
token_network = get_token_network_by_identifier(<EOL>chain_state,<EOL>token_network_id,<EOL>)<EOL>channel_state = None<EOL>if token_network:<EOL><INDENT>channels = [<EOL>token_network.channelidentifiers_to_channels[channel_id]<EOL>for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]<EOL>]<EOL>states = filter_channels_by_status(<EOL>channels,<EOL>[CHANNEL_STATE_UNUSABLE],<EOL>)<EOL>if states:<EOL><INDENT>channel_state = states[-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return channel_state<EOL>
|
Return the NettingChannelState if it exists, None otherwise.
|
f9422:m20
|
def get_channelstate_by_canonical_identifier(<EOL>chain_state: ChainState,<EOL>canonical_identifier: CanonicalIdentifier,<EOL>) -> Optional[NettingChannelState]:
|
token_network = get_token_network_by_identifier(<EOL>chain_state,<EOL>TokenNetworkID(canonical_identifier.token_network_address),<EOL>)<EOL>channel_state = None<EOL>if token_network:<EOL><INDENT>channel_state = token_network.channelidentifiers_to_channels.get(<EOL>canonical_identifier.channel_identifier,<EOL>)<EOL><DEDENT>return channel_state<EOL>
|
Return the NettingChannelState if it exists, None otherwise.
|
f9422:m21
|
def get_channelstate_filter(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>filter_fn: Callable,<EOL>) -> List[NettingChannelState]:
|
token_network = get_token_network_by_token_address(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>)<EOL>result: List[NettingChannelState] = []<EOL>if not token_network:<EOL><INDENT>return result<EOL><DEDENT>for channel_state in token_network.channelidentifiers_to_channels.values():<EOL><INDENT>if filter_fn(channel_state):<EOL><INDENT>result.append(channel_state)<EOL><DEDENT><DEDENT>return result<EOL>
|
Return the state of channels that match the condition in `filter_fn`
|
f9422:m22
|
def get_channelstate_open(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>) -> List[NettingChannelState]:
|
return get_channelstate_filter(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_OPENED,<EOL>)<EOL>
|
Return the state of open channels in a token network.
|
f9422:m23
|
def get_channelstate_closing(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>) -> List[NettingChannelState]:
|
return get_channelstate_filter(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_CLOSING,<EOL>)<EOL>
|
Return the state of closing channels in a token network.
|
f9422:m24
|
def get_channelstate_closed(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>) -> List[NettingChannelState]:
|
return get_channelstate_filter(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_CLOSED,<EOL>)<EOL>
|
Return the state of closed channels in a token network.
|
f9422:m25
|
def get_channelstate_settling(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>) -> List[NettingChannelState]:
|
return get_channelstate_filter(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_SETTLING,<EOL>)<EOL>
|
Return the state of settling channels in a token network.
|
f9422:m26
|
def get_channelstate_settled(<EOL>chain_state: ChainState,<EOL>payment_network_id: PaymentNetworkID,<EOL>token_address: TokenAddress,<EOL>) -> List[NettingChannelState]:
|
return get_channelstate_filter(<EOL>chain_state,<EOL>payment_network_id,<EOL>token_address,<EOL>lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_SETTLED,<EOL>)<EOL>
|
Return the state of settled channels in a token network.
|
f9422:m27
|
def role_from_transfer_task(transfer_task: TransferTask) -> str:
|
if isinstance(transfer_task, InitiatorTask):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if isinstance(transfer_task, MediatorTask):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if isinstance(transfer_task, TargetTask):<EOL><INDENT>return '<STR_LIT:target>'<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>
|
Return the role and type for the transfer. Throws an exception on error
|
f9422:m28
|
def secret_from_transfer_task(<EOL>transfer_task: Optional[TransferTask],<EOL>secrethash: SecretHash,<EOL>) -> Optional[Secret]:
|
assert isinstance(transfer_task, InitiatorTask)<EOL>transfer_state = transfer_task.manager_state.initiator_transfers[secrethash]<EOL>if transfer_state is None:<EOL><INDENT>return None<EOL><DEDENT>return transfer_state.transfer_description.secret<EOL>
|
Return the secret for the transfer, None on EMPTY_SECRET.
|
f9422:m29
|
def get_transfer_role(chain_state: ChainState, secrethash: SecretHash) -> Optional[str]:
|
task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash)<EOL>if not task:<EOL><INDENT>return None<EOL><DEDENT>return role_from_transfer_task(task)<EOL>
|
Returns 'initiator', 'mediator' or 'target' to signify the role the node has
in a transfer. If a transfer task is not found for the secrethash then the
function returns None
|
f9422:m30
|
def filter_channels_by_status(<EOL>channel_states: List[NettingChannelState],<EOL>exclude_states: Optional[List[str]] = None,<EOL>) -> List[NettingChannelState]:
|
if exclude_states is None:<EOL><INDENT>exclude_states = []<EOL><DEDENT>states = []<EOL>for channel_state in channel_states:<EOL><INDENT>if channel.get_status(channel_state) not in exclude_states:<EOL><INDENT>states.append(channel_state)<EOL><DEDENT><DEDENT>return states<EOL>
|
Filter the list of channels by excluding ones
for which the state exists in `exclude_states`.
|
f9422:m37
|
def detect_balance_proof_change(<EOL>old_state: ChainState,<EOL>current_state: ChainState,<EOL>) -> Iterator[Union[BalanceProofSignedState, BalanceProofUnsignedState]]:
|
if old_state == current_state:<EOL><INDENT>return<EOL><DEDENT>for payment_network_identifier in current_state.identifiers_to_paymentnetworks:<EOL><INDENT>try:<EOL><INDENT>old_payment_network = old_state.identifiers_to_paymentnetworks.get(<EOL>payment_network_identifier,<EOL>)<EOL><DEDENT>except AttributeError:<EOL><INDENT>old_payment_network = None<EOL><DEDENT>current_payment_network = current_state.identifiers_to_paymentnetworks[<EOL>payment_network_identifier<EOL>]<EOL>if old_payment_network == current_payment_network:<EOL><INDENT>continue<EOL><DEDENT>for token_network_identifier in current_payment_network.tokenidentifiers_to_tokennetworks:<EOL><INDENT>if old_payment_network:<EOL><INDENT>old_token_network = old_payment_network.tokenidentifiers_to_tokennetworks.get(<EOL>token_network_identifier,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>old_token_network = None<EOL><DEDENT>current_token_network = current_payment_network.tokenidentifiers_to_tokennetworks[<EOL>token_network_identifier<EOL>]<EOL>if old_token_network == current_token_network:<EOL><INDENT>continue<EOL><DEDENT>for channel_identifier in current_token_network.channelidentifiers_to_channels:<EOL><INDENT>if old_token_network:<EOL><INDENT>old_channel = old_token_network.channelidentifiers_to_channels.get(<EOL>channel_identifier,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>old_channel = None<EOL><DEDENT>current_channel = current_token_network.channelidentifiers_to_channels[<EOL>channel_identifier<EOL>]<EOL>if current_channel == old_channel:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>partner_state_updated = (<EOL>current_channel.partner_state.balance_proof is not None and<EOL>(<EOL>old_channel is None or<EOL>old_channel.partner_state.balance_proof !=<EOL>current_channel.partner_state.balance_proof<EOL>)<EOL>)<EOL>if partner_state_updated:<EOL><INDENT>assert current_channel.partner_state.balance_proof, MYPY_ANNOTATION<EOL>yield current_channel.partner_state.balance_proof<EOL><DEDENT>our_state_updated = (<EOL>current_channel.our_state.balance_proof is not None and<EOL>(<EOL>old_channel is None or<EOL>old_channel.our_state.balance_proof !=<EOL>current_channel.our_state.balance_proof<EOL>)<EOL>)<EOL>if our_state_updated:<EOL><INDENT>assert current_channel.our_state.balance_proof, MYPY_ANNOTATION<EOL>yield current_channel.our_state.balance_proof<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>
|
Compare two states for any received balance_proofs that are not in `old_state`.
|
f9422:m38
|
def events_for_unlock_lock(<EOL>initiator_state: InitiatorTransferState,<EOL>channel_state: NettingChannelState,<EOL>secret: Secret,<EOL>secrethash: SecretHash,<EOL>pseudo_random_generator: random.Random,<EOL>) -> List[Event]:
|
<EOL>transfer_description = initiator_state.transfer_description<EOL>message_identifier = message_identifier_from_prng(pseudo_random_generator)<EOL>unlock_lock = channel.send_unlock(<EOL>channel_state=channel_state,<EOL>message_identifier=message_identifier,<EOL>payment_identifier=transfer_description.payment_identifier,<EOL>secret=secret,<EOL>secrethash=secrethash,<EOL>)<EOL>payment_sent_success = EventPaymentSentSuccess(<EOL>payment_network_identifier=channel_state.payment_network_identifier,<EOL>token_network_identifier=TokenNetworkID(channel_state.token_network_identifier),<EOL>identifier=transfer_description.payment_identifier,<EOL>amount=transfer_description.amount,<EOL>target=transfer_description.target,<EOL>secret=secret,<EOL>)<EOL>unlock_success = EventUnlockSuccess(<EOL>transfer_description.payment_identifier,<EOL>transfer_description.secrethash,<EOL>)<EOL>return [unlock_lock, payment_sent_success, unlock_success]<EOL>
|
Unlocks the lock offchain, and emits the events for the successful payment.
|
f9423:m0
|
def handle_block(<EOL>initiator_state: InitiatorTransferState,<EOL>state_change: Block,<EOL>channel_state: NettingChannelState,<EOL>pseudo_random_generator: random.Random,<EOL>) -> TransitionResult[InitiatorTransferState]:
|
secrethash = initiator_state.transfer.lock.secrethash<EOL>locked_lock = channel_state.our_state.secrethashes_to_lockedlocks.get(secrethash)<EOL>if not locked_lock:<EOL><INDENT>if channel_state.partner_state.secrethashes_to_lockedlocks.get(secrethash):<EOL><INDENT>return TransitionResult(initiator_state, list())<EOL><DEDENT>else:<EOL><INDENT>return TransitionResult(None, list())<EOL><DEDENT><DEDENT>lock_expiration_threshold = BlockNumber(<EOL>locked_lock.expiration + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL,<EOL>)<EOL>lock_has_expired, _ = channel.is_lock_expired(<EOL>end_state=channel_state.our_state,<EOL>lock=locked_lock,<EOL>block_number=state_change.block_number,<EOL>lock_expiration_threshold=lock_expiration_threshold,<EOL>)<EOL>events: List[Event] = list()<EOL>if lock_has_expired:<EOL><INDENT>is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED<EOL>if is_channel_open:<EOL><INDENT>expired_lock_events = channel.events_for_expired_lock(<EOL>channel_state=channel_state,<EOL>locked_lock=locked_lock,<EOL>pseudo_random_generator=pseudo_random_generator,<EOL>)<EOL>events.extend(expired_lock_events)<EOL><DEDENT>if initiator_state.received_secret_request:<EOL><INDENT>reason = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>reason = '<STR_LIT>'<EOL><DEDENT>transfer_description = initiator_state.transfer_description<EOL>payment_identifier = transfer_description.payment_identifier<EOL>payment_failed = EventPaymentSentFailed(<EOL>payment_network_identifier=transfer_description.payment_network_identifier,<EOL>token_network_identifier=transfer_description.token_network_identifier,<EOL>identifier=payment_identifier,<EOL>target=transfer_description.target,<EOL>reason=reason,<EOL>)<EOL>unlock_failed = EventUnlockFailed(<EOL>identifier=payment_identifier,<EOL>secrethash=initiator_state.transfer_description.secrethash,<EOL>reason=reason,<EOL>)<EOL>lock_exists = channel.lock_exists_in_either_channel_side(<EOL>channel_state=channel_state,<EOL>secrethash=secrethash,<EOL>)<EOL>return TransitionResult(<EOL>initiator_state if lock_exists else None,<EOL>events + [payment_failed, unlock_failed],<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return TransitionResult(initiator_state, events)<EOL><DEDENT>
|
Checks if the lock has expired, and if it has sends a remove expired
lock and emits the failing events.
|
f9423:m1
|
def get_initial_lock_expiration(<EOL>block_number: BlockNumber,<EOL>reveal_timeout: BlockTimeout,<EOL>) -> BlockExpiration:
|
return BlockExpiration(block_number + reveal_timeout * <NUM_LIT:2>)<EOL>
|
Returns the expiration used for all hash-time-locks in transfer.
|
f9423:m2
|
def next_channel_from_routes(<EOL>available_routes: List[RouteState],<EOL>channelidentifiers_to_channels: ChannelMap,<EOL>transfer_amount: PaymentAmount,<EOL>) -> Optional[NettingChannelState]:
|
for route in available_routes:<EOL><INDENT>channel_identifier = route.channel_identifier<EOL>channel_state = channelidentifiers_to_channels.get(channel_identifier)<EOL>if not channel_state:<EOL><INDENT>continue<EOL><DEDENT>if channel.get_status(channel_state) != CHANNEL_STATE_OPENED:<EOL><INDENT>continue<EOL><DEDENT>pending_transfers = channel.get_number_of_pending_transfers(channel_state.our_state)<EOL>if pending_transfers >= MAXIMUM_PENDING_TRANSFERS:<EOL><INDENT>continue<EOL><DEDENT>distributable = channel.get_distributable(<EOL>channel_state.our_state,<EOL>channel_state.partner_state,<EOL>)<EOL>if transfer_amount > distributable:<EOL><INDENT>continue<EOL><DEDENT>if channel.is_valid_amount(channel_state.our_state, transfer_amount):<EOL><INDENT>return channel_state<EOL><DEDENT><DEDENT>return None<EOL>
|
Returns the first channel that can be used to start the transfer.
The routing service can race with local changes, so the recommended routes
must be validated.
|
f9423:m3
|
def send_lockedtransfer(<EOL>transfer_description: TransferDescriptionWithSecretState,<EOL>channel_state: NettingChannelState,<EOL>message_identifier: MessageID,<EOL>block_number: BlockNumber,<EOL>) -> SendLockedTransfer:
|
assert channel_state.token_network_identifier == transfer_description.token_network_identifier<EOL>lock_expiration = get_initial_lock_expiration(<EOL>block_number,<EOL>channel_state.reveal_timeout,<EOL>)<EOL>total_amount = PaymentWithFeeAmount(<EOL>transfer_description.amount + transfer_description.allocated_fee,<EOL>)<EOL>lockedtransfer_event = channel.send_lockedtransfer(<EOL>channel_state=channel_state,<EOL>initiator=transfer_description.initiator,<EOL>target=transfer_description.target,<EOL>amount=total_amount,<EOL>message_identifier=message_identifier,<EOL>payment_identifier=transfer_description.payment_identifier,<EOL>expiration=lock_expiration,<EOL>secrethash=transfer_description.secrethash,<EOL>)<EOL>return lockedtransfer_event<EOL>
|
Create a mediated transfer using channel.
|
f9423:m5
|
def handle_offchain_secretreveal(<EOL>initiator_state: InitiatorTransferState,<EOL>state_change: ReceiveSecretReveal,<EOL>channel_state: NettingChannelState,<EOL>pseudo_random_generator: random.Random,<EOL>) -> TransitionResult[InitiatorTransferState]:
|
iteration: TransitionResult[InitiatorTransferState]<EOL>valid_reveal = is_valid_secret_reveal(<EOL>state_change=state_change,<EOL>transfer_secrethash=initiator_state.transfer_description.secrethash,<EOL>secret=state_change.secret,<EOL>)<EOL>sent_by_partner = state_change.sender == channel_state.partner_state.address<EOL>is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED<EOL>if valid_reveal and is_channel_open and sent_by_partner:<EOL><INDENT>events = events_for_unlock_lock(<EOL>initiator_state=initiator_state,<EOL>channel_state=channel_state,<EOL>secret=state_change.secret,<EOL>secrethash=state_change.secrethash,<EOL>pseudo_random_generator=pseudo_random_generator,<EOL>)<EOL>iteration = TransitionResult(None, events)<EOL><DEDENT>else:<EOL><INDENT>events = list()<EOL>iteration = TransitionResult(initiator_state, events)<EOL><DEDENT>return iteration<EOL>
|
Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated.
|
f9423:m7
|
def handle_onchain_secretreveal(<EOL>initiator_state: InitiatorTransferState,<EOL>state_change: ContractReceiveSecretReveal,<EOL>channel_state: NettingChannelState,<EOL>pseudo_random_generator: random.Random,<EOL>) -> TransitionResult[InitiatorTransferState]:
|
iteration: TransitionResult[InitiatorTransferState]<EOL>secret = state_change.secret<EOL>secrethash = initiator_state.transfer_description.secrethash<EOL>is_valid_secret = is_valid_secret_reveal(<EOL>state_change=state_change,<EOL>transfer_secrethash=secrethash,<EOL>secret=secret,<EOL>)<EOL>is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED<EOL>is_lock_expired = state_change.block_number > initiator_state.transfer.lock.expiration<EOL>is_lock_unlocked = (<EOL>is_valid_secret and<EOL>not is_lock_expired<EOL>)<EOL>if is_lock_unlocked:<EOL><INDENT>channel.register_onchain_secret(<EOL>channel_state=channel_state,<EOL>secret=secret,<EOL>secrethash=secrethash,<EOL>secret_reveal_block_number=state_change.block_number,<EOL>)<EOL><DEDENT>if is_lock_unlocked and is_channel_open:<EOL><INDENT>events = events_for_unlock_lock(<EOL>initiator_state,<EOL>channel_state,<EOL>state_change.secret,<EOL>state_change.secrethash,<EOL>pseudo_random_generator,<EOL>)<EOL>iteration = TransitionResult(None, events)<EOL><DEDENT>else:<EOL><INDENT>events = list()<EOL>iteration = TransitionResult(initiator_state, events)<EOL><DEDENT>return iteration<EOL>
|
When a secret is revealed on-chain all nodes learn the secret.
This check the on-chain secret corresponds to the one used by the
initiator, and if valid a new balance proof is sent to the next hop with
the current lock removed from the merkle tree and the transferred amount
updated.
|
f9423:m8
|
def events_for_onchain_secretreveal(<EOL>target_state: TargetTransferState,<EOL>channel_state: NettingChannelState,<EOL>block_number: BlockNumber,<EOL>block_hash: BlockHash,<EOL>) -> List[Event]:
|
transfer = target_state.transfer<EOL>expiration = transfer.lock.expiration<EOL>safe_to_wait, _ = is_safe_to_wait(<EOL>expiration,<EOL>channel_state.reveal_timeout,<EOL>block_number,<EOL>)<EOL>secret_known_offchain = channel.is_secret_known_offchain(<EOL>channel_state.partner_state,<EOL>transfer.lock.secrethash,<EOL>)<EOL>has_onchain_reveal_started = (<EOL>target_state.state == TargetTransferState.ONCHAIN_SECRET_REVEAL<EOL>)<EOL>if not safe_to_wait and secret_known_offchain and not has_onchain_reveal_started:<EOL><INDENT>target_state.state = TargetTransferState.ONCHAIN_SECRET_REVEAL<EOL>secret = channel.get_secret(<EOL>channel_state.partner_state,<EOL>transfer.lock.secrethash,<EOL>)<EOL>assert secret, '<STR_LIT>'<EOL>return secret_registry.events_for_onchain_secretreveal(<EOL>channel_state=channel_state,<EOL>secret=secret,<EOL>expiration=expiration,<EOL>block_hash=block_hash,<EOL>)<EOL><DEDENT>return list()<EOL>
|
Emits the event for revealing the secret on-chain if the transfer
can not be settled off-chain.
|
f9424:m1
|
def handle_inittarget(<EOL>state_change: ActionInitTarget,<EOL>channel_state: NettingChannelState,<EOL>pseudo_random_generator: random.Random,<EOL>block_number: BlockNumber,<EOL>) -> TransitionResult[TargetTransferState]:
|
transfer = state_change.transfer<EOL>route = state_change.route<EOL>assert channel_state.identifier == transfer.balance_proof.channel_identifier<EOL>is_valid, channel_events, errormsg = channel.handle_receive_lockedtransfer(<EOL>channel_state,<EOL>transfer,<EOL>)<EOL>if is_valid:<EOL><INDENT>target_state = TargetTransferState(route, transfer)<EOL>safe_to_wait, _ = is_safe_to_wait(<EOL>transfer.lock.expiration,<EOL>channel_state.reveal_timeout,<EOL>block_number,<EOL>)<EOL>if safe_to_wait:<EOL><INDENT>message_identifier = message_identifier_from_prng(pseudo_random_generator)<EOL>recipient = transfer.initiator<EOL>secret_request = SendSecretRequest(<EOL>recipient=Address(recipient),<EOL>channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,<EOL>message_identifier=message_identifier,<EOL>payment_identifier=transfer.payment_identifier,<EOL>amount=transfer.lock.amount,<EOL>expiration=transfer.lock.expiration,<EOL>secrethash=transfer.lock.secrethash,<EOL>)<EOL>channel_events.append(secret_request)<EOL><DEDENT>iteration = TransitionResult(target_state, channel_events)<EOL><DEDENT>else:<EOL><INDENT>unlock_failed = EventUnlockClaimFailed(<EOL>identifier=transfer.payment_identifier,<EOL>secrethash=transfer.lock.secrethash,<EOL>reason=errormsg,<EOL>)<EOL>channel_events.append(unlock_failed)<EOL>iteration = TransitionResult(None, channel_events)<EOL><DEDENT>return iteration<EOL>
|
Handles an ActionInitTarget state change.
|
f9424:m2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.