id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
234,300
hyperledger/indy-plenum
plenum/server/node.py
Node.processOrdered
def processOrdered(self, ordered: Ordered): """ Execute ordered request :param ordered: an ordered request :return: whether executed """ if ordered.instId not in self.instances.ids: logger.warning('{} got ordered request for instance {} which ' 'does not exist'.format(self, ordered.instId)) return False if ordered.instId != self.instances.masterId: # Requests from backup replicas are not executed logger.trace("{} got ordered requests from backup replica {}" .format(self, ordered.instId)) with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME): self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr, ordered.instId, self.requests, byMaster=False) return False logger.trace("{} got ordered requests from master replica" .format(self)) logger.debug("{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}" .format(self.name, ordered.viewNo, ordered.ppSeqNo, len(ordered.valid_reqIdr), ordered.stateRootHash, ordered.txnRootHash)) three_pc_batch = ThreePcBatch.from_ordered(ordered) if self.db_manager.ledgers[AUDIT_LEDGER_ID].uncommittedRootHash is None: # if we order request during view change # in between catchup rounds, then the 3PC batch will not be applied, # since it was reverted before catchup started, and only COMMITs were # processed in between catchup that led to this ORDERED msg logger.info("{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}" .format(self.name, three_pc_batch.view_no, three_pc_batch.pp_seq_no, len(three_pc_batch.valid_digests), three_pc_batch.state_root, three_pc_batch.txn_root)) self.apply_stashed_reqs(three_pc_batch) self.executeBatch(three_pc_batch, ordered.valid_reqIdr, ordered.invalid_reqIdr, ordered.auditTxnRootHash) with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME): self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr, ordered.instId, self.requests, byMaster=True) return True
python
def processOrdered(self, ordered: Ordered): """ Execute ordered request :param ordered: an ordered request :return: whether executed """ if ordered.instId not in self.instances.ids: logger.warning('{} got ordered request for instance {} which ' 'does not exist'.format(self, ordered.instId)) return False if ordered.instId != self.instances.masterId: # Requests from backup replicas are not executed logger.trace("{} got ordered requests from backup replica {}" .format(self, ordered.instId)) with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME): self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr, ordered.instId, self.requests, byMaster=False) return False logger.trace("{} got ordered requests from master replica" .format(self)) logger.debug("{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}" .format(self.name, ordered.viewNo, ordered.ppSeqNo, len(ordered.valid_reqIdr), ordered.stateRootHash, ordered.txnRootHash)) three_pc_batch = ThreePcBatch.from_ordered(ordered) if self.db_manager.ledgers[AUDIT_LEDGER_ID].uncommittedRootHash is None: # if we order request during view change # in between catchup rounds, then the 3PC batch will not be applied, # since it was reverted before catchup started, and only COMMITs were # processed in between catchup that led to this ORDERED msg logger.info("{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}" .format(self.name, three_pc_batch.view_no, three_pc_batch.pp_seq_no, len(three_pc_batch.valid_digests), three_pc_batch.state_root, three_pc_batch.txn_root)) self.apply_stashed_reqs(three_pc_batch) self.executeBatch(three_pc_batch, ordered.valid_reqIdr, ordered.invalid_reqIdr, ordered.auditTxnRootHash) with self.metrics.measure_time(MetricsName.MONITOR_REQUEST_ORDERED_TIME): self.monitor.requestOrdered(ordered.valid_reqIdr + ordered.invalid_reqIdr, ordered.instId, self.requests, byMaster=True) return True
[ "def", "processOrdered", "(", "self", ",", "ordered", ":", "Ordered", ")", ":", "if", "ordered", ".", "instId", "not", "in", "self", ".", "instances", ".", "ids", ":", "logger", ".", "warning", "(", "'{} got ordered request for instance {} which '", "'does not exist'", ".", "format", "(", "self", ",", "ordered", ".", "instId", ")", ")", "return", "False", "if", "ordered", ".", "instId", "!=", "self", ".", "instances", ".", "masterId", ":", "# Requests from backup replicas are not executed", "logger", ".", "trace", "(", "\"{} got ordered requests from backup replica {}\"", ".", "format", "(", "self", ",", "ordered", ".", "instId", ")", ")", "with", "self", ".", "metrics", ".", "measure_time", "(", "MetricsName", ".", "MONITOR_REQUEST_ORDERED_TIME", ")", ":", "self", ".", "monitor", ".", "requestOrdered", "(", "ordered", ".", "valid_reqIdr", "+", "ordered", ".", "invalid_reqIdr", ",", "ordered", ".", "instId", ",", "self", ".", "requests", ",", "byMaster", "=", "False", ")", "return", "False", "logger", ".", "trace", "(", "\"{} got ordered requests from master replica\"", ".", "format", "(", "self", ")", ")", "logger", ".", "debug", "(", "\"{} executing Ordered batch {} {} of {} requests; state root {}; txn root {}\"", ".", "format", "(", "self", ".", "name", ",", "ordered", ".", "viewNo", ",", "ordered", ".", "ppSeqNo", ",", "len", "(", "ordered", ".", "valid_reqIdr", ")", ",", "ordered", ".", "stateRootHash", ",", "ordered", ".", "txnRootHash", ")", ")", "three_pc_batch", "=", "ThreePcBatch", ".", "from_ordered", "(", "ordered", ")", "if", "self", ".", "db_manager", ".", "ledgers", "[", "AUDIT_LEDGER_ID", "]", ".", "uncommittedRootHash", "is", "None", ":", "# if we order request during view change", "# in between catchup rounds, then the 3PC batch will not be applied,", "# since it was reverted before catchup started, and only COMMITs were", "# processed in between catchup that led to this ORDERED msg", "logger", ".", "info", "(", "\"{} applying stashed requests for batch {} {} of {} requests; state root {}; txn root {}\"", ".", "format", "(", "self", ".", "name", ",", "three_pc_batch", ".", "view_no", ",", "three_pc_batch", ".", "pp_seq_no", ",", "len", "(", "three_pc_batch", ".", "valid_digests", ")", ",", "three_pc_batch", ".", "state_root", ",", "three_pc_batch", ".", "txn_root", ")", ")", "self", ".", "apply_stashed_reqs", "(", "three_pc_batch", ")", "self", ".", "executeBatch", "(", "three_pc_batch", ",", "ordered", ".", "valid_reqIdr", ",", "ordered", ".", "invalid_reqIdr", ",", "ordered", ".", "auditTxnRootHash", ")", "with", "self", ".", "metrics", ".", "measure_time", "(", "MetricsName", ".", "MONITOR_REQUEST_ORDERED_TIME", ")", ":", "self", ".", "monitor", ".", "requestOrdered", "(", "ordered", ".", "valid_reqIdr", "+", "ordered", ".", "invalid_reqIdr", ",", "ordered", ".", "instId", ",", "self", ".", "requests", ",", "byMaster", "=", "True", ")", "return", "True" ]
Execute ordered request :param ordered: an ordered request :return: whether executed
[ "Execute", "ordered", "request" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L2673-L2735
234,301
hyperledger/indy-plenum
plenum/server/node.py
Node.force_process_ordered
def force_process_ordered(self): """ Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks """ for instance_id, messages in self.replicas.take_ordereds_out_of_turn(): num_processed = 0 for message in messages: self.try_processing_ordered(message) num_processed += 1 logger.info('{} processed {} Ordered batches for instance {} ' 'before starting catch up' .format(self, num_processed, instance_id))
python
def force_process_ordered(self): """ Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks """ for instance_id, messages in self.replicas.take_ordereds_out_of_turn(): num_processed = 0 for message in messages: self.try_processing_ordered(message) num_processed += 1 logger.info('{} processed {} Ordered batches for instance {} ' 'before starting catch up' .format(self, num_processed, instance_id))
[ "def", "force_process_ordered", "(", "self", ")", ":", "for", "instance_id", ",", "messages", "in", "self", ".", "replicas", ".", "take_ordereds_out_of_turn", "(", ")", ":", "num_processed", "=", "0", "for", "message", "in", "messages", ":", "self", ".", "try_processing_ordered", "(", "message", ")", "num_processed", "+=", "1", "logger", ".", "info", "(", "'{} processed {} Ordered batches for instance {} '", "'before starting catch up'", ".", "format", "(", "self", ",", "num_processed", ",", "instance_id", ")", ")" ]
Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks
[ "Take", "any", "messages", "from", "replica", "that", "have", "been", "ordered", "and", "process", "them", "this", "should", "be", "done", "rarely", "like", "before", "catchup", "starts", "so", "a", "more", "current", "LedgerStatus", "can", "be", "sent", ".", "can", "be", "called", "either", "1", ".", "when", "node", "is", "participating", "this", "happens", "just", "before", "catchup", "starts", "so", "the", "node", "can", "have", "the", "latest", "ledger", "status", "or", "2", ".", "when", "node", "is", "not", "participating", "but", "a", "round", "of", "catchup", "is", "about", "to", "be", "started", "here", "is", "forces", "all", "the", "replica", "ordered", "messages", "to", "be", "appended", "to", "the", "stashed", "ordered", "requests", "and", "the", "stashed", "ordered", "requests", "are", "processed", "with", "appropriate", "checks" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L2737-L2758
234,302
hyperledger/indy-plenum
plenum/server/node.py
Node.processEscalatedException
def processEscalatedException(self, ex): """ Process an exception escalated from a Replica """ if isinstance(ex, SuspiciousNode): self.reportSuspiciousNodeEx(ex) else: raise RuntimeError("unhandled replica-escalated exception") from ex
python
def processEscalatedException(self, ex): """ Process an exception escalated from a Replica """ if isinstance(ex, SuspiciousNode): self.reportSuspiciousNodeEx(ex) else: raise RuntimeError("unhandled replica-escalated exception") from ex
[ "def", "processEscalatedException", "(", "self", ",", "ex", ")", ":", "if", "isinstance", "(", "ex", ",", "SuspiciousNode", ")", ":", "self", ".", "reportSuspiciousNodeEx", "(", "ex", ")", "else", ":", "raise", "RuntimeError", "(", "\"unhandled replica-escalated exception\"", ")", "from", "ex" ]
Process an exception escalated from a Replica
[ "Process", "an", "exception", "escalated", "from", "a", "Replica" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L2766-L2773
234,303
hyperledger/indy-plenum
plenum/server/node.py
Node.lost_master_primary
def lost_master_primary(self): """ Schedule an primary connection check which in turn can send a view change message """ self.primaries_disconnection_times[self.master_replica.instId] = time.perf_counter() self._schedule_view_change()
python
def lost_master_primary(self): """ Schedule an primary connection check which in turn can send a view change message """ self.primaries_disconnection_times[self.master_replica.instId] = time.perf_counter() self._schedule_view_change()
[ "def", "lost_master_primary", "(", "self", ")", ":", "self", ".", "primaries_disconnection_times", "[", "self", ".", "master_replica", ".", "instId", "]", "=", "time", ".", "perf_counter", "(", ")", "self", ".", "_schedule_view_change", "(", ")" ]
Schedule an primary connection check which in turn can send a view change message
[ "Schedule", "an", "primary", "connection", "check", "which", "in", "turn", "can", "send", "a", "view", "change", "message" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3134-L3140
234,304
hyperledger/indy-plenum
plenum/server/node.py
Node.executeBatch
def executeBatch(self, three_pc_batch: ThreePcBatch, valid_reqs_keys: List, invalid_reqs_keys: List, audit_txn_root) -> None: """ Execute the REQUEST sent to this Node :param view_no: the view number (See glossary) :param pp_time: the time at which PRE-PREPARE was sent :param valid_reqs: list of valid client requests keys :param valid_reqs: list of invalid client requests keys """ # We need hashes in apply and str in commit three_pc_batch.txn_root = Ledger.hashToStr(three_pc_batch.txn_root) three_pc_batch.state_root = Ledger.hashToStr(three_pc_batch.state_root) for req_key in valid_reqs_keys: self.execute_hook(NodeHooks.PRE_REQUEST_COMMIT, req_key=req_key, pp_time=three_pc_batch.pp_time, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) self.execute_hook(NodeHooks.PRE_BATCH_COMMITTED, ledger_id=three_pc_batch.ledger_id, pp_time=three_pc_batch.pp_time, reqs_keys=valid_reqs_keys, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) try: committedTxns = self.get_executer(three_pc_batch.ledger_id)(three_pc_batch) except Exception as exc: logger.error( "{} commit failed for batch request, error {}, view no {}, " "ppSeqNo {}, ledger {}, state root {}, txn root {}, " "requests: {}".format( self, repr(exc), three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.ledger_id, three_pc_batch.state_root, three_pc_batch.txn_root, [req_idr for req_idr in valid_reqs_keys] ) ) raise for req_key in valid_reqs_keys + invalid_reqs_keys: if req_key in self.requests: self.mark_request_as_executed(self.requests[req_key].request) else: # Means that this request is dropped from the main requests queue due to timeout, # but anyway it is ordered and executed normally logger.debug('{} normally executed request {} which object has been dropped ' 'from the requests queue'.format(self, req_key)) pass # TODO is it possible to get len(committedTxns) != len(valid_reqs) # someday if not committedTxns: return logger.debug("{} committed batch request, view no {}, ppSeqNo {}, " "ledger {}, state root {}, txn root {}, requests: {}". format(self, three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.ledger_id, three_pc_batch.state_root, three_pc_batch.txn_root, [key for key in valid_reqs_keys])) for txn in committedTxns: self.execute_hook(NodeHooks.POST_REQUEST_COMMIT, txn=txn, pp_time=three_pc_batch.pp_time, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) first_txn_seq_no = get_seq_no(committedTxns[0]) last_txn_seq_no = get_seq_no(committedTxns[-1]) reqs = [] reqs_list_built = True for req_key in valid_reqs_keys: if req_key in self.requests: reqs.append(self.requests[req_key].request.as_dict) else: logger.warning("Could not build requests list for observers due to non-existent requests") reqs_list_built = False break if reqs_list_built: batch_committed_msg = BatchCommitted(reqs, three_pc_batch.ledger_id, 0, three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.pp_time, three_pc_batch.state_root, three_pc_batch.txn_root, first_txn_seq_no, last_txn_seq_no, audit_txn_root, three_pc_batch.primaries) self._observable.append_input(batch_committed_msg, self.name)
python
def executeBatch(self, three_pc_batch: ThreePcBatch, valid_reqs_keys: List, invalid_reqs_keys: List, audit_txn_root) -> None: """ Execute the REQUEST sent to this Node :param view_no: the view number (See glossary) :param pp_time: the time at which PRE-PREPARE was sent :param valid_reqs: list of valid client requests keys :param valid_reqs: list of invalid client requests keys """ # We need hashes in apply and str in commit three_pc_batch.txn_root = Ledger.hashToStr(three_pc_batch.txn_root) three_pc_batch.state_root = Ledger.hashToStr(three_pc_batch.state_root) for req_key in valid_reqs_keys: self.execute_hook(NodeHooks.PRE_REQUEST_COMMIT, req_key=req_key, pp_time=three_pc_batch.pp_time, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) self.execute_hook(NodeHooks.PRE_BATCH_COMMITTED, ledger_id=three_pc_batch.ledger_id, pp_time=three_pc_batch.pp_time, reqs_keys=valid_reqs_keys, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) try: committedTxns = self.get_executer(three_pc_batch.ledger_id)(three_pc_batch) except Exception as exc: logger.error( "{} commit failed for batch request, error {}, view no {}, " "ppSeqNo {}, ledger {}, state root {}, txn root {}, " "requests: {}".format( self, repr(exc), three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.ledger_id, three_pc_batch.state_root, three_pc_batch.txn_root, [req_idr for req_idr in valid_reqs_keys] ) ) raise for req_key in valid_reqs_keys + invalid_reqs_keys: if req_key in self.requests: self.mark_request_as_executed(self.requests[req_key].request) else: # Means that this request is dropped from the main requests queue due to timeout, # but anyway it is ordered and executed normally logger.debug('{} normally executed request {} which object has been dropped ' 'from the requests queue'.format(self, req_key)) pass # TODO is it possible to get len(committedTxns) != len(valid_reqs) # someday if not committedTxns: return logger.debug("{} committed batch request, view no {}, ppSeqNo {}, " "ledger {}, state root {}, txn root {}, requests: {}". format(self, three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.ledger_id, three_pc_batch.state_root, three_pc_batch.txn_root, [key for key in valid_reqs_keys])) for txn in committedTxns: self.execute_hook(NodeHooks.POST_REQUEST_COMMIT, txn=txn, pp_time=three_pc_batch.pp_time, state_root=three_pc_batch.state_root, txn_root=three_pc_batch.txn_root) first_txn_seq_no = get_seq_no(committedTxns[0]) last_txn_seq_no = get_seq_no(committedTxns[-1]) reqs = [] reqs_list_built = True for req_key in valid_reqs_keys: if req_key in self.requests: reqs.append(self.requests[req_key].request.as_dict) else: logger.warning("Could not build requests list for observers due to non-existent requests") reqs_list_built = False break if reqs_list_built: batch_committed_msg = BatchCommitted(reqs, three_pc_batch.ledger_id, 0, three_pc_batch.view_no, three_pc_batch.pp_seq_no, three_pc_batch.pp_time, three_pc_batch.state_root, three_pc_batch.txn_root, first_txn_seq_no, last_txn_seq_no, audit_txn_root, three_pc_batch.primaries) self._observable.append_input(batch_committed_msg, self.name)
[ "def", "executeBatch", "(", "self", ",", "three_pc_batch", ":", "ThreePcBatch", ",", "valid_reqs_keys", ":", "List", ",", "invalid_reqs_keys", ":", "List", ",", "audit_txn_root", ")", "->", "None", ":", "# We need hashes in apply and str in commit", "three_pc_batch", ".", "txn_root", "=", "Ledger", ".", "hashToStr", "(", "three_pc_batch", ".", "txn_root", ")", "three_pc_batch", ".", "state_root", "=", "Ledger", ".", "hashToStr", "(", "three_pc_batch", ".", "state_root", ")", "for", "req_key", "in", "valid_reqs_keys", ":", "self", ".", "execute_hook", "(", "NodeHooks", ".", "PRE_REQUEST_COMMIT", ",", "req_key", "=", "req_key", ",", "pp_time", "=", "three_pc_batch", ".", "pp_time", ",", "state_root", "=", "three_pc_batch", ".", "state_root", ",", "txn_root", "=", "three_pc_batch", ".", "txn_root", ")", "self", ".", "execute_hook", "(", "NodeHooks", ".", "PRE_BATCH_COMMITTED", ",", "ledger_id", "=", "three_pc_batch", ".", "ledger_id", ",", "pp_time", "=", "three_pc_batch", ".", "pp_time", ",", "reqs_keys", "=", "valid_reqs_keys", ",", "state_root", "=", "three_pc_batch", ".", "state_root", ",", "txn_root", "=", "three_pc_batch", ".", "txn_root", ")", "try", ":", "committedTxns", "=", "self", ".", "get_executer", "(", "three_pc_batch", ".", "ledger_id", ")", "(", "three_pc_batch", ")", "except", "Exception", "as", "exc", ":", "logger", ".", "error", "(", "\"{} commit failed for batch request, error {}, view no {}, \"", "\"ppSeqNo {}, ledger {}, state root {}, txn root {}, \"", "\"requests: {}\"", ".", "format", "(", "self", ",", "repr", "(", "exc", ")", ",", "three_pc_batch", ".", "view_no", ",", "three_pc_batch", ".", "pp_seq_no", ",", "three_pc_batch", ".", "ledger_id", ",", "three_pc_batch", ".", "state_root", ",", "three_pc_batch", ".", "txn_root", ",", "[", "req_idr", "for", "req_idr", "in", "valid_reqs_keys", "]", ")", ")", "raise", "for", "req_key", "in", "valid_reqs_keys", "+", "invalid_reqs_keys", ":", "if", "req_key", "in", "self", ".", "requests", ":", "self", ".", "mark_request_as_executed", "(", "self", ".", "requests", "[", "req_key", "]", ".", "request", ")", "else", ":", "# Means that this request is dropped from the main requests queue due to timeout,", "# but anyway it is ordered and executed normally", "logger", ".", "debug", "(", "'{} normally executed request {} which object has been dropped '", "'from the requests queue'", ".", "format", "(", "self", ",", "req_key", ")", ")", "pass", "# TODO is it possible to get len(committedTxns) != len(valid_reqs)", "# someday", "if", "not", "committedTxns", ":", "return", "logger", ".", "debug", "(", "\"{} committed batch request, view no {}, ppSeqNo {}, \"", "\"ledger {}, state root {}, txn root {}, requests: {}\"", ".", "format", "(", "self", ",", "three_pc_batch", ".", "view_no", ",", "three_pc_batch", ".", "pp_seq_no", ",", "three_pc_batch", ".", "ledger_id", ",", "three_pc_batch", ".", "state_root", ",", "three_pc_batch", ".", "txn_root", ",", "[", "key", "for", "key", "in", "valid_reqs_keys", "]", ")", ")", "for", "txn", "in", "committedTxns", ":", "self", ".", "execute_hook", "(", "NodeHooks", ".", "POST_REQUEST_COMMIT", ",", "txn", "=", "txn", ",", "pp_time", "=", "three_pc_batch", ".", "pp_time", ",", "state_root", "=", "three_pc_batch", ".", "state_root", ",", "txn_root", "=", "three_pc_batch", ".", "txn_root", ")", "first_txn_seq_no", "=", "get_seq_no", "(", "committedTxns", "[", "0", "]", ")", "last_txn_seq_no", "=", "get_seq_no", "(", "committedTxns", "[", "-", "1", "]", ")", "reqs", "=", "[", "]", "reqs_list_built", "=", "True", "for", "req_key", "in", "valid_reqs_keys", ":", "if", "req_key", "in", "self", ".", "requests", ":", "reqs", ".", "append", "(", "self", ".", "requests", "[", "req_key", "]", ".", "request", ".", "as_dict", ")", "else", ":", "logger", ".", "warning", "(", "\"Could not build requests list for observers due to non-existent requests\"", ")", "reqs_list_built", "=", "False", "break", "if", "reqs_list_built", ":", "batch_committed_msg", "=", "BatchCommitted", "(", "reqs", ",", "three_pc_batch", ".", "ledger_id", ",", "0", ",", "three_pc_batch", ".", "view_no", ",", "three_pc_batch", ".", "pp_seq_no", ",", "three_pc_batch", ".", "pp_time", ",", "three_pc_batch", ".", "state_root", ",", "three_pc_batch", ".", "txn_root", ",", "first_txn_seq_no", ",", "last_txn_seq_no", ",", "audit_txn_root", ",", "three_pc_batch", ".", "primaries", ")", "self", ".", "_observable", ".", "append_input", "(", "batch_committed_msg", ",", "self", ".", "name", ")" ]
Execute the REQUEST sent to this Node :param view_no: the view number (See glossary) :param pp_time: the time at which PRE-PREPARE was sent :param valid_reqs: list of valid client requests keys :param valid_reqs: list of invalid client requests keys
[ "Execute", "the", "REQUEST", "sent", "to", "this", "Node" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3251-L3346
234,305
hyperledger/indy-plenum
plenum/server/node.py
Node.addNewRole
def addNewRole(self, txn): """ Adds a new client or steward to this node based on transaction type. """ # If the client authenticator is a simple authenticator then add verkey. # For a custom authenticator, handle appropriately. # NOTE: The following code should not be used in production if isinstance(self.clientAuthNr.core_authenticator, SimpleAuthNr): txn_data = get_payload_data(txn) identifier = txn_data[TARGET_NYM] verkey = txn_data.get(VERKEY) v = DidVerifier(verkey, identifier=identifier) if identifier not in self.clientAuthNr.core_authenticator.clients: role = txn_data.get(ROLE) if role not in (STEWARD, TRUSTEE, None): logger.debug("Role if present must be {} and not {}". format(Roles.STEWARD.name, role)) return self.clientAuthNr.core_authenticator.addIdr(identifier, verkey=v.verkey, role=role)
python
def addNewRole(self, txn): """ Adds a new client or steward to this node based on transaction type. """ # If the client authenticator is a simple authenticator then add verkey. # For a custom authenticator, handle appropriately. # NOTE: The following code should not be used in production if isinstance(self.clientAuthNr.core_authenticator, SimpleAuthNr): txn_data = get_payload_data(txn) identifier = txn_data[TARGET_NYM] verkey = txn_data.get(VERKEY) v = DidVerifier(verkey, identifier=identifier) if identifier not in self.clientAuthNr.core_authenticator.clients: role = txn_data.get(ROLE) if role not in (STEWARD, TRUSTEE, None): logger.debug("Role if present must be {} and not {}". format(Roles.STEWARD.name, role)) return self.clientAuthNr.core_authenticator.addIdr(identifier, verkey=v.verkey, role=role)
[ "def", "addNewRole", "(", "self", ",", "txn", ")", ":", "# If the client authenticator is a simple authenticator then add verkey.", "# For a custom authenticator, handle appropriately.", "# NOTE: The following code should not be used in production", "if", "isinstance", "(", "self", ".", "clientAuthNr", ".", "core_authenticator", ",", "SimpleAuthNr", ")", ":", "txn_data", "=", "get_payload_data", "(", "txn", ")", "identifier", "=", "txn_data", "[", "TARGET_NYM", "]", "verkey", "=", "txn_data", ".", "get", "(", "VERKEY", ")", "v", "=", "DidVerifier", "(", "verkey", ",", "identifier", "=", "identifier", ")", "if", "identifier", "not", "in", "self", ".", "clientAuthNr", ".", "core_authenticator", ".", "clients", ":", "role", "=", "txn_data", ".", "get", "(", "ROLE", ")", "if", "role", "not", "in", "(", "STEWARD", ",", "TRUSTEE", ",", "None", ")", ":", "logger", ".", "debug", "(", "\"Role if present must be {} and not {}\"", ".", "format", "(", "Roles", ".", "STEWARD", ".", "name", ",", "role", ")", ")", "return", "self", ".", "clientAuthNr", ".", "core_authenticator", ".", "addIdr", "(", "identifier", ",", "verkey", "=", "v", ".", "verkey", ",", "role", "=", "role", ")" ]
Adds a new client or steward to this node based on transaction type.
[ "Adds", "a", "new", "client", "or", "steward", "to", "this", "node", "based", "on", "transaction", "type", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3441-L3461
234,306
hyperledger/indy-plenum
plenum/server/node.py
Node.ensureKeysAreSetup
def ensureKeysAreSetup(self): """ Check whether the keys are setup in the local STP keep. Raises KeysNotFoundException if not found. """ if not areKeysSetup(self.name, self.keys_dir): raise REx(REx.reason.format(self.name) + self.keygenScript)
python
def ensureKeysAreSetup(self): """ Check whether the keys are setup in the local STP keep. Raises KeysNotFoundException if not found. """ if not areKeysSetup(self.name, self.keys_dir): raise REx(REx.reason.format(self.name) + self.keygenScript)
[ "def", "ensureKeysAreSetup", "(", "self", ")", ":", "if", "not", "areKeysSetup", "(", "self", ".", "name", ",", "self", ".", "keys_dir", ")", ":", "raise", "REx", "(", "REx", ".", "reason", ".", "format", "(", "self", ".", "name", ")", "+", "self", ".", "keygenScript", ")" ]
Check whether the keys are setup in the local STP keep. Raises KeysNotFoundException if not found.
[ "Check", "whether", "the", "keys", "are", "setup", "in", "the", "local", "STP", "keep", ".", "Raises", "KeysNotFoundException", "if", "not", "found", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3478-L3484
234,307
hyperledger/indy-plenum
plenum/server/node.py
Node.reportSuspiciousNodeEx
def reportSuspiciousNodeEx(self, ex: SuspiciousNode): """ Report suspicion on a node on the basis of an exception """ self.reportSuspiciousNode(ex.node, ex.reason, ex.code, ex.offendingMsg)
python
def reportSuspiciousNodeEx(self, ex: SuspiciousNode): """ Report suspicion on a node on the basis of an exception """ self.reportSuspiciousNode(ex.node, ex.reason, ex.code, ex.offendingMsg)
[ "def", "reportSuspiciousNodeEx", "(", "self", ",", "ex", ":", "SuspiciousNode", ")", ":", "self", ".", "reportSuspiciousNode", "(", "ex", ".", "node", ",", "ex", ".", "reason", ",", "ex", ".", "code", ",", "ex", ".", "offendingMsg", ")" ]
Report suspicion on a node on the basis of an exception
[ "Report", "suspicion", "on", "a", "node", "on", "the", "basis", "of", "an", "exception" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3492-L3496
234,308
hyperledger/indy-plenum
plenum/server/node.py
Node.reportSuspiciousNode
def reportSuspiciousNode(self, nodeName: str, reason=None, code: int = None, offendingMsg=None): """ Report suspicion on a node and add it to this node's blacklist. :param nodeName: name of the node to report suspicion on :param reason: the reason for suspicion """ logger.warning("{} raised suspicion on node {} for {}; suspicion code " "is {}".format(self, nodeName, reason, code)) # TODO need a more general solution here # TODO: Should not blacklist client on a single InvalidSignature. # Should track if a lot of requests with incorrect signatures have been # made in a short amount of time, only then blacklist client. # if code == InvalidSignature.code: # self.blacklistNode(nodeName, # reason=InvalidSignature.reason, # code=InvalidSignature.code) # TODO: Consider blacklisting nodes again. # if code in self.suspicions: # self.blacklistNode(nodeName, # reason=self.suspicions[code], # code=code) if code in (s.code for s in (Suspicions.PPR_DIGEST_WRONG, Suspicions.PPR_REJECT_WRONG, Suspicions.PPR_TXN_WRONG, Suspicions.PPR_STATE_WRONG, Suspicions.PPR_PLUGIN_EXCEPTION, Suspicions.PPR_SUB_SEQ_NO_WRONG, Suspicions.PPR_NOT_FINAL, Suspicions.PPR_WITH_ORDERED_REQUEST, Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG, Suspicions.PPR_BLS_MULTISIG_WRONG, Suspicions.PPR_TIME_WRONG, )): logger.display('{}{} got one of primary suspicions codes {}'.format(VIEW_CHANGE_PREFIX, self, code)) self.view_changer.on_suspicious_primary(Suspicions.get_by_code(code)) if offendingMsg: self.discard(offendingMsg, reason, logger.debug)
python
def reportSuspiciousNode(self, nodeName: str, reason=None, code: int = None, offendingMsg=None): """ Report suspicion on a node and add it to this node's blacklist. :param nodeName: name of the node to report suspicion on :param reason: the reason for suspicion """ logger.warning("{} raised suspicion on node {} for {}; suspicion code " "is {}".format(self, nodeName, reason, code)) # TODO need a more general solution here # TODO: Should not blacklist client on a single InvalidSignature. # Should track if a lot of requests with incorrect signatures have been # made in a short amount of time, only then blacklist client. # if code == InvalidSignature.code: # self.blacklistNode(nodeName, # reason=InvalidSignature.reason, # code=InvalidSignature.code) # TODO: Consider blacklisting nodes again. # if code in self.suspicions: # self.blacklistNode(nodeName, # reason=self.suspicions[code], # code=code) if code in (s.code for s in (Suspicions.PPR_DIGEST_WRONG, Suspicions.PPR_REJECT_WRONG, Suspicions.PPR_TXN_WRONG, Suspicions.PPR_STATE_WRONG, Suspicions.PPR_PLUGIN_EXCEPTION, Suspicions.PPR_SUB_SEQ_NO_WRONG, Suspicions.PPR_NOT_FINAL, Suspicions.PPR_WITH_ORDERED_REQUEST, Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG, Suspicions.PPR_BLS_MULTISIG_WRONG, Suspicions.PPR_TIME_WRONG, )): logger.display('{}{} got one of primary suspicions codes {}'.format(VIEW_CHANGE_PREFIX, self, code)) self.view_changer.on_suspicious_primary(Suspicions.get_by_code(code)) if offendingMsg: self.discard(offendingMsg, reason, logger.debug)
[ "def", "reportSuspiciousNode", "(", "self", ",", "nodeName", ":", "str", ",", "reason", "=", "None", ",", "code", ":", "int", "=", "None", ",", "offendingMsg", "=", "None", ")", ":", "logger", ".", "warning", "(", "\"{} raised suspicion on node {} for {}; suspicion code \"", "\"is {}\"", ".", "format", "(", "self", ",", "nodeName", ",", "reason", ",", "code", ")", ")", "# TODO need a more general solution here", "# TODO: Should not blacklist client on a single InvalidSignature.", "# Should track if a lot of requests with incorrect signatures have been", "# made in a short amount of time, only then blacklist client.", "# if code == InvalidSignature.code:", "# self.blacklistNode(nodeName,", "# reason=InvalidSignature.reason,", "# code=InvalidSignature.code)", "# TODO: Consider blacklisting nodes again.", "# if code in self.suspicions:", "# self.blacklistNode(nodeName,", "# reason=self.suspicions[code],", "# code=code)", "if", "code", "in", "(", "s", ".", "code", "for", "s", "in", "(", "Suspicions", ".", "PPR_DIGEST_WRONG", ",", "Suspicions", ".", "PPR_REJECT_WRONG", ",", "Suspicions", ".", "PPR_TXN_WRONG", ",", "Suspicions", ".", "PPR_STATE_WRONG", ",", "Suspicions", ".", "PPR_PLUGIN_EXCEPTION", ",", "Suspicions", ".", "PPR_SUB_SEQ_NO_WRONG", ",", "Suspicions", ".", "PPR_NOT_FINAL", ",", "Suspicions", ".", "PPR_WITH_ORDERED_REQUEST", ",", "Suspicions", ".", "PPR_AUDIT_TXN_ROOT_HASH_WRONG", ",", "Suspicions", ".", "PPR_BLS_MULTISIG_WRONG", ",", "Suspicions", ".", "PPR_TIME_WRONG", ",", ")", ")", ":", "logger", ".", "display", "(", "'{}{} got one of primary suspicions codes {}'", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "self", ",", "code", ")", ")", "self", ".", "view_changer", ".", "on_suspicious_primary", "(", "Suspicions", ".", "get_by_code", "(", "code", ")", ")", "if", "offendingMsg", ":", "self", ".", "discard", "(", "offendingMsg", ",", "reason", ",", "logger", ".", "debug", ")" ]
Report suspicion on a node and add it to this node's blacklist. :param nodeName: name of the node to report suspicion on :param reason: the reason for suspicion
[ "Report", "suspicion", "on", "a", "node", "and", "add", "it", "to", "this", "node", "s", "blacklist", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3498-L3543
234,309
hyperledger/indy-plenum
plenum/server/node.py
Node.reportSuspiciousClient
def reportSuspiciousClient(self, clientName: str, reason): """ Report suspicion on a client and add it to this node's blacklist. :param clientName: name of the client to report suspicion on :param reason: the reason for suspicion """ logger.warning("{} raised suspicion on client {} for {}" .format(self, clientName, reason)) self.blacklistClient(clientName)
python
def reportSuspiciousClient(self, clientName: str, reason): """ Report suspicion on a client and add it to this node's blacklist. :param clientName: name of the client to report suspicion on :param reason: the reason for suspicion """ logger.warning("{} raised suspicion on client {} for {}" .format(self, clientName, reason)) self.blacklistClient(clientName)
[ "def", "reportSuspiciousClient", "(", "self", ",", "clientName", ":", "str", ",", "reason", ")", ":", "logger", ".", "warning", "(", "\"{} raised suspicion on client {} for {}\"", ".", "format", "(", "self", ",", "clientName", ",", "reason", ")", ")", "self", ".", "blacklistClient", "(", "clientName", ")" ]
Report suspicion on a client and add it to this node's blacklist. :param clientName: name of the client to report suspicion on :param reason: the reason for suspicion
[ "Report", "suspicion", "on", "a", "client", "and", "add", "it", "to", "this", "node", "s", "blacklist", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3545-L3554
234,310
hyperledger/indy-plenum
plenum/server/node.py
Node.blacklistClient
def blacklistClient(self, clientName: str, reason: str = None, code: int = None): """ Add the client specified by `clientName` to this node's blacklist """ msg = "{} blacklisting client {}".format(self, clientName) if reason: msg += " for reason {}".format(reason) logger.display(msg) self.clientBlacklister.blacklist(clientName)
python
def blacklistClient(self, clientName: str, reason: str = None, code: int = None): """ Add the client specified by `clientName` to this node's blacklist """ msg = "{} blacklisting client {}".format(self, clientName) if reason: msg += " for reason {}".format(reason) logger.display(msg) self.clientBlacklister.blacklist(clientName)
[ "def", "blacklistClient", "(", "self", ",", "clientName", ":", "str", ",", "reason", ":", "str", "=", "None", ",", "code", ":", "int", "=", "None", ")", ":", "msg", "=", "\"{} blacklisting client {}\"", ".", "format", "(", "self", ",", "clientName", ")", "if", "reason", ":", "msg", "+=", "\" for reason {}\"", ".", "format", "(", "reason", ")", "logger", ".", "display", "(", "msg", ")", "self", ".", "clientBlacklister", ".", "blacklist", "(", "clientName", ")" ]
Add the client specified by `clientName` to this node's blacklist
[ "Add", "the", "client", "specified", "by", "clientName", "to", "this", "node", "s", "blacklist" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3565-L3574
234,311
hyperledger/indy-plenum
plenum/server/node.py
Node.blacklistNode
def blacklistNode(self, nodeName: str, reason: str = None, code: int = None): """ Add the node specified by `nodeName` to this node's blacklist """ msg = "{} blacklisting node {}".format(self, nodeName) if reason: msg += " for reason {}".format(reason) if code: msg += " for code {}".format(code) logger.display(msg) self.nodeBlacklister.blacklist(nodeName)
python
def blacklistNode(self, nodeName: str, reason: str = None, code: int = None): """ Add the node specified by `nodeName` to this node's blacklist """ msg = "{} blacklisting node {}".format(self, nodeName) if reason: msg += " for reason {}".format(reason) if code: msg += " for code {}".format(code) logger.display(msg) self.nodeBlacklister.blacklist(nodeName)
[ "def", "blacklistNode", "(", "self", ",", "nodeName", ":", "str", ",", "reason", ":", "str", "=", "None", ",", "code", ":", "int", "=", "None", ")", ":", "msg", "=", "\"{} blacklisting node {}\"", ".", "format", "(", "self", ",", "nodeName", ")", "if", "reason", ":", "msg", "+=", "\" for reason {}\"", ".", "format", "(", "reason", ")", "if", "code", ":", "msg", "+=", "\" for code {}\"", ".", "format", "(", "code", ")", "logger", ".", "display", "(", "msg", ")", "self", ".", "nodeBlacklister", ".", "blacklist", "(", "nodeName", ")" ]
Add the node specified by `nodeName` to this node's blacklist
[ "Add", "the", "node", "specified", "by", "nodeName", "to", "this", "node", "s", "blacklist" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3585-L3595
234,312
hyperledger/indy-plenum
plenum/server/node.py
Node.logstats
def logstats(self): """ Print the node's current statistics to log. """ lines = [ "node {} current stats".format(self), "--------------------------------------------------------", "node inbox size : {}".format(len(self.nodeInBox)), "client inbox size : {}".format(len(self.clientInBox)), "age (seconds) : {}".format(time.time() - self.created), "next check for reconnect: {}".format(time.perf_counter() - self.nodestack.nextCheck), "node connections : {}".format(self.nodestack.conns), "f : {}".format(self.f), "master instance : {}".format(self.instances.masterId), "replicas : {}".format(len(self.replicas)), "view no : {}".format(self.viewNo), "rank : {}".format(self.rank), "msgs to replicas : {}".format(self.replicas.sum_inbox_len), "msgs to view changer : {}".format(len(self.msgsToViewChanger)), "action queue : {} {}".format(len(self.actionQueue), id(self.actionQueue)), "action queue stash : {} {}".format(len(self.aqStash), id(self.aqStash)), ] logger.info("\n".join(lines), extra={"cli": False})
python
def logstats(self): """ Print the node's current statistics to log. """ lines = [ "node {} current stats".format(self), "--------------------------------------------------------", "node inbox size : {}".format(len(self.nodeInBox)), "client inbox size : {}".format(len(self.clientInBox)), "age (seconds) : {}".format(time.time() - self.created), "next check for reconnect: {}".format(time.perf_counter() - self.nodestack.nextCheck), "node connections : {}".format(self.nodestack.conns), "f : {}".format(self.f), "master instance : {}".format(self.instances.masterId), "replicas : {}".format(len(self.replicas)), "view no : {}".format(self.viewNo), "rank : {}".format(self.rank), "msgs to replicas : {}".format(self.replicas.sum_inbox_len), "msgs to view changer : {}".format(len(self.msgsToViewChanger)), "action queue : {} {}".format(len(self.actionQueue), id(self.actionQueue)), "action queue stash : {} {}".format(len(self.aqStash), id(self.aqStash)), ] logger.info("\n".join(lines), extra={"cli": False})
[ "def", "logstats", "(", "self", ")", ":", "lines", "=", "[", "\"node {} current stats\"", ".", "format", "(", "self", ")", ",", "\"--------------------------------------------------------\"", ",", "\"node inbox size : {}\"", ".", "format", "(", "len", "(", "self", ".", "nodeInBox", ")", ")", ",", "\"client inbox size : {}\"", ".", "format", "(", "len", "(", "self", ".", "clientInBox", ")", ")", ",", "\"age (seconds) : {}\"", ".", "format", "(", "time", ".", "time", "(", ")", "-", "self", ".", "created", ")", ",", "\"next check for reconnect: {}\"", ".", "format", "(", "time", ".", "perf_counter", "(", ")", "-", "self", ".", "nodestack", ".", "nextCheck", ")", ",", "\"node connections : {}\"", ".", "format", "(", "self", ".", "nodestack", ".", "conns", ")", ",", "\"f : {}\"", ".", "format", "(", "self", ".", "f", ")", ",", "\"master instance : {}\"", ".", "format", "(", "self", ".", "instances", ".", "masterId", ")", ",", "\"replicas : {}\"", ".", "format", "(", "len", "(", "self", ".", "replicas", ")", ")", ",", "\"view no : {}\"", ".", "format", "(", "self", ".", "viewNo", ")", ",", "\"rank : {}\"", ".", "format", "(", "self", ".", "rank", ")", ",", "\"msgs to replicas : {}\"", ".", "format", "(", "self", ".", "replicas", ".", "sum_inbox_len", ")", ",", "\"msgs to view changer : {}\"", ".", "format", "(", "len", "(", "self", ".", "msgsToViewChanger", ")", ")", ",", "\"action queue : {} {}\"", ".", "format", "(", "len", "(", "self", ".", "actionQueue", ")", ",", "id", "(", "self", ".", "actionQueue", ")", ")", ",", "\"action queue stash : {} {}\"", ".", "format", "(", "len", "(", "self", ".", "aqStash", ")", ",", "id", "(", "self", ".", "aqStash", ")", ")", ",", "]", "logger", ".", "info", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ",", "extra", "=", "{", "\"cli\"", ":", "False", "}", ")" ]
Print the node's current statistics to log.
[ "Print", "the", "node", "s", "current", "statistics", "to", "log", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3685-L3711
234,313
hyperledger/indy-plenum
plenum/server/node.py
Node.logNodeInfo
def logNodeInfo(self): """ Print the node's info to log for the REST backend to read. """ self.nodeInfo['data'] = self.collectNodeInfo() with closing(open(os.path.join(self.ledger_dir, 'node_info'), 'w')) \ as logNodeInfoFile: logNodeInfoFile.write(json.dumps(self.nodeInfo['data']))
python
def logNodeInfo(self): """ Print the node's info to log for the REST backend to read. """ self.nodeInfo['data'] = self.collectNodeInfo() with closing(open(os.path.join(self.ledger_dir, 'node_info'), 'w')) \ as logNodeInfoFile: logNodeInfoFile.write(json.dumps(self.nodeInfo['data']))
[ "def", "logNodeInfo", "(", "self", ")", ":", "self", ".", "nodeInfo", "[", "'data'", "]", "=", "self", ".", "collectNodeInfo", "(", ")", "with", "closing", "(", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "ledger_dir", ",", "'node_info'", ")", ",", "'w'", ")", ")", "as", "logNodeInfoFile", ":", "logNodeInfoFile", ".", "write", "(", "json", ".", "dumps", "(", "self", ".", "nodeInfo", "[", "'data'", "]", ")", ")" ]
Print the node's info to log for the REST backend to read.
[ "Print", "the", "node", "s", "info", "to", "log", "for", "the", "REST", "backend", "to", "read", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3738-L3746
234,314
hyperledger/indy-plenum
plenum/common/perf_util.py
get_collection_sizes
def get_collection_sizes(obj, collections: Optional[Tuple]=None, get_only_non_empty=False): """ Iterates over `collections` of the gives object and gives its byte size and number of items in collection """ from pympler import asizeof collections = collections or (list, dict, set, deque, abc.Sized) if not isinstance(collections, tuple): collections = tuple(collections) result = [] for attr_name in dir(obj): attr = getattr(obj, attr_name) if isinstance(attr, collections) and ( not get_only_non_empty or len(attr) > 0): result.append( (attr_name, len(attr), asizeof.asizeof(attr, detail=1))) return result
python
def get_collection_sizes(obj, collections: Optional[Tuple]=None, get_only_non_empty=False): """ Iterates over `collections` of the gives object and gives its byte size and number of items in collection """ from pympler import asizeof collections = collections or (list, dict, set, deque, abc.Sized) if not isinstance(collections, tuple): collections = tuple(collections) result = [] for attr_name in dir(obj): attr = getattr(obj, attr_name) if isinstance(attr, collections) and ( not get_only_non_empty or len(attr) > 0): result.append( (attr_name, len(attr), asizeof.asizeof(attr, detail=1))) return result
[ "def", "get_collection_sizes", "(", "obj", ",", "collections", ":", "Optional", "[", "Tuple", "]", "=", "None", ",", "get_only_non_empty", "=", "False", ")", ":", "from", "pympler", "import", "asizeof", "collections", "=", "collections", "or", "(", "list", ",", "dict", ",", "set", ",", "deque", ",", "abc", ".", "Sized", ")", "if", "not", "isinstance", "(", "collections", ",", "tuple", ")", ":", "collections", "=", "tuple", "(", "collections", ")", "result", "=", "[", "]", "for", "attr_name", "in", "dir", "(", "obj", ")", ":", "attr", "=", "getattr", "(", "obj", ",", "attr_name", ")", "if", "isinstance", "(", "attr", ",", "collections", ")", "and", "(", "not", "get_only_non_empty", "or", "len", "(", "attr", ")", ">", "0", ")", ":", "result", ".", "append", "(", "(", "attr_name", ",", "len", "(", "attr", ")", ",", "asizeof", ".", "asizeof", "(", "attr", ",", "detail", "=", "1", ")", ")", ")", "return", "result" ]
Iterates over `collections` of the gives object and gives its byte size and number of items in collection
[ "Iterates", "over", "collections", "of", "the", "gives", "object", "and", "gives", "its", "byte", "size", "and", "number", "of", "items", "in", "collection" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/perf_util.py#L52-L70
234,315
hyperledger/indy-plenum
ledger/error.py
returns_true_or_raises
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
python
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
[ "def", "returns_true_or_raises", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "ret", "is", "not", "True", ":", "raise", "RuntimeError", "(", "\"Unexpected return value %r\"", "%", "ret", ")", "return", "True", "return", "wrapped" ]
A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True.
[ "A", "safety", "net", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/ledger/error.py#L126-L144
234,316
hyperledger/indy-plenum
plenum/server/instances.py
Instances.backupIds
def backupIds(self) -> Sequence[int]: """ Return the list of replicas that don't belong to the master protocol instance """ return [id for id in self.started.keys() if id != 0]
python
def backupIds(self) -> Sequence[int]: """ Return the list of replicas that don't belong to the master protocol instance """ return [id for id in self.started.keys() if id != 0]
[ "def", "backupIds", "(", "self", ")", "->", "Sequence", "[", "int", "]", ":", "return", "[", "id", "for", "id", "in", "self", ".", "started", ".", "keys", "(", ")", "if", "id", "!=", "0", "]" ]
Return the list of replicas that don't belong to the master protocol instance
[ "Return", "the", "list", "of", "replicas", "that", "don", "t", "belong", "to", "the", "master", "protocol", "instance" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/instances.py#L37-L42
234,317
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger._hasViewChangeQuorum
def _hasViewChangeQuorum(self): # This method should just be present for master instance. """ Checks whether n-f nodes completed view change and whether one of them is the next primary """ num_of_ready_nodes = len(self._view_change_done) diff = self.quorum - num_of_ready_nodes if diff > 0: logger.info('{} needs {} ViewChangeDone messages'.format(self, diff)) return False logger.info("{} got view change quorum ({} >= {})". format(self.name, num_of_ready_nodes, self.quorum)) return True
python
def _hasViewChangeQuorum(self): # This method should just be present for master instance. """ Checks whether n-f nodes completed view change and whether one of them is the next primary """ num_of_ready_nodes = len(self._view_change_done) diff = self.quorum - num_of_ready_nodes if diff > 0: logger.info('{} needs {} ViewChangeDone messages'.format(self, diff)) return False logger.info("{} got view change quorum ({} >= {})". format(self.name, num_of_ready_nodes, self.quorum)) return True
[ "def", "_hasViewChangeQuorum", "(", "self", ")", ":", "# This method should just be present for master instance.", "num_of_ready_nodes", "=", "len", "(", "self", ".", "_view_change_done", ")", "diff", "=", "self", ".", "quorum", "-", "num_of_ready_nodes", "if", "diff", ">", "0", ":", "logger", ".", "info", "(", "'{} needs {} ViewChangeDone messages'", ".", "format", "(", "self", ",", "diff", ")", ")", "return", "False", "logger", ".", "info", "(", "\"{} got view change quorum ({} >= {})\"", ".", "format", "(", "self", ".", "name", ",", "num_of_ready_nodes", ",", "self", ".", "quorum", ")", ")", "return", "True" ]
Checks whether n-f nodes completed view change and whether one of them is the next primary
[ "Checks", "whether", "n", "-", "f", "nodes", "completed", "view", "change", "and", "whether", "one", "of", "them", "is", "the", "next", "primary" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L234-L248
234,318
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.process_instance_change_msg
def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None: """ Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg` """ if frm not in self.provider.connected_nodes(): self.provider.discard( instChg, "received instance change request: {} from {} " "which is not in connected list: {}".format( instChg, frm, self.provider.connected_nodes()), logger.info) return logger.info("{} received instance change request: {} from {}".format(self, instChg, frm)) # TODO: add sender to blacklist? if not isinstance(instChg.viewNo, int): self.provider.discard( instChg, "{}field view_no has incorrect type: {}".format( VIEW_CHANGE_PREFIX, type(instChg.viewNo))) elif instChg.viewNo <= self.view_no: self.provider.discard( instChg, "Received instance change request with view no {} " "which is not more than its view no {}".format( instChg.viewNo, self.view_no), logger.info) else: # Record instance changes for views but send instance change # only when found master to be degraded. if quorum of view changes # found then change view even if master not degraded self._on_verified_instance_change_msg(instChg, frm) if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name): logger.info("{} received instance change message {} but has already " "sent an instance change message".format(self, instChg)) elif not self.provider.is_master_degraded(): logger.info("{} received instance change message {} but did not " "find the master to be slow".format(self, instChg)) else: logger.display("{}{} found master degraded after receiving instance change" " message from {}".format(VIEW_CHANGE_PREFIX, self, frm)) self.sendInstanceChange(instChg.viewNo)
python
def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None: """ Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg` """ if frm not in self.provider.connected_nodes(): self.provider.discard( instChg, "received instance change request: {} from {} " "which is not in connected list: {}".format( instChg, frm, self.provider.connected_nodes()), logger.info) return logger.info("{} received instance change request: {} from {}".format(self, instChg, frm)) # TODO: add sender to blacklist? if not isinstance(instChg.viewNo, int): self.provider.discard( instChg, "{}field view_no has incorrect type: {}".format( VIEW_CHANGE_PREFIX, type(instChg.viewNo))) elif instChg.viewNo <= self.view_no: self.provider.discard( instChg, "Received instance change request with view no {} " "which is not more than its view no {}".format( instChg.viewNo, self.view_no), logger.info) else: # Record instance changes for views but send instance change # only when found master to be degraded. if quorum of view changes # found then change view even if master not degraded self._on_verified_instance_change_msg(instChg, frm) if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name): logger.info("{} received instance change message {} but has already " "sent an instance change message".format(self, instChg)) elif not self.provider.is_master_degraded(): logger.info("{} received instance change message {} but did not " "find the master to be slow".format(self, instChg)) else: logger.display("{}{} found master degraded after receiving instance change" " message from {}".format(VIEW_CHANGE_PREFIX, self, frm)) self.sendInstanceChange(instChg.viewNo)
[ "def", "process_instance_change_msg", "(", "self", ",", "instChg", ":", "InstanceChange", ",", "frm", ":", "str", ")", "->", "None", ":", "if", "frm", "not", "in", "self", ".", "provider", ".", "connected_nodes", "(", ")", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"received instance change request: {} from {} \"", "\"which is not in connected list: {}\"", ".", "format", "(", "instChg", ",", "frm", ",", "self", ".", "provider", ".", "connected_nodes", "(", ")", ")", ",", "logger", ".", "info", ")", "return", "logger", ".", "info", "(", "\"{} received instance change request: {} from {}\"", ".", "format", "(", "self", ",", "instChg", ",", "frm", ")", ")", "# TODO: add sender to blacklist?", "if", "not", "isinstance", "(", "instChg", ".", "viewNo", ",", "int", ")", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"{}field view_no has incorrect type: {}\"", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "type", "(", "instChg", ".", "viewNo", ")", ")", ")", "elif", "instChg", ".", "viewNo", "<=", "self", ".", "view_no", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"Received instance change request with view no {} \"", "\"which is not more than its view no {}\"", ".", "format", "(", "instChg", ".", "viewNo", ",", "self", ".", "view_no", ")", ",", "logger", ".", "info", ")", "else", ":", "# Record instance changes for views but send instance change", "# only when found master to be degraded. if quorum of view changes", "# found then change view even if master not degraded", "self", ".", "_on_verified_instance_change_msg", "(", "instChg", ",", "frm", ")", "if", "self", ".", "instance_changes", ".", "has_inst_chng_from", "(", "instChg", ".", "viewNo", ",", "self", ".", "name", ")", ":", "logger", ".", "info", "(", "\"{} received instance change message {} but has already \"", "\"sent an instance change message\"", ".", "format", "(", "self", ",", "instChg", ")", ")", "elif", "not", "self", ".", "provider", ".", "is_master_degraded", "(", ")", ":", "logger", ".", "info", "(", "\"{} received instance change message {} but did not \"", "\"find the master to be slow\"", ".", "format", "(", "self", ",", "instChg", ")", ")", "else", ":", "logger", ".", "display", "(", "\"{}{} found master degraded after receiving instance change\"", "\" message from {}\"", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "self", ",", "frm", ")", ")", "self", ".", "sendInstanceChange", "(", "instChg", ".", "viewNo", ")" ]
Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg`
[ "Validate", "and", "process", "an", "instance", "change", "request", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L373-L416
234,319
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.process_vchd_msg
def process_vchd_msg(self, msg: ViewChangeDone, sender: str) -> bool: """ Processes ViewChangeDone messages. Once n-f messages have been received, decides on a primary for specific replica. :param msg: ViewChangeDone message :param sender: the name of the node from which this message was sent """ logger.info("{}'s primary selector started processing of ViewChangeDone msg from {} : {}". format(self.name, sender, msg)) view_no = msg.viewNo if self.view_no != view_no: self.provider.discard(msg, '{} got Primary from {} for view no {} ' 'whereas current view no is {}'. format(self, sender, view_no, self.view_no), logMethod=logger.info) return False new_primary_name = msg.name if new_primary_name == self.previous_master_primary: self.provider.discard(msg, '{} got Primary from {} for {} who was primary of ' 'master in previous view too'. format(self, sender, new_primary_name), logMethod=logger.info) return False # Since a node can send ViewChangeDone more than one time self._on_verified_view_change_done_msg(msg, sender) # TODO why do we check that after the message tracking if self.provider.has_primary(): self.provider.discard(msg, "it already decided primary which is {}". format(self.provider.current_primary_name()), logger.info) return False self._start_selection()
python
def process_vchd_msg(self, msg: ViewChangeDone, sender: str) -> bool: """ Processes ViewChangeDone messages. Once n-f messages have been received, decides on a primary for specific replica. :param msg: ViewChangeDone message :param sender: the name of the node from which this message was sent """ logger.info("{}'s primary selector started processing of ViewChangeDone msg from {} : {}". format(self.name, sender, msg)) view_no = msg.viewNo if self.view_no != view_no: self.provider.discard(msg, '{} got Primary from {} for view no {} ' 'whereas current view no is {}'. format(self, sender, view_no, self.view_no), logMethod=logger.info) return False new_primary_name = msg.name if new_primary_name == self.previous_master_primary: self.provider.discard(msg, '{} got Primary from {} for {} who was primary of ' 'master in previous view too'. format(self, sender, new_primary_name), logMethod=logger.info) return False # Since a node can send ViewChangeDone more than one time self._on_verified_view_change_done_msg(msg, sender) # TODO why do we check that after the message tracking if self.provider.has_primary(): self.provider.discard(msg, "it already decided primary which is {}". format(self.provider.current_primary_name()), logger.info) return False self._start_selection()
[ "def", "process_vchd_msg", "(", "self", ",", "msg", ":", "ViewChangeDone", ",", "sender", ":", "str", ")", "->", "bool", ":", "logger", ".", "info", "(", "\"{}'s primary selector started processing of ViewChangeDone msg from {} : {}\"", ".", "format", "(", "self", ".", "name", ",", "sender", ",", "msg", ")", ")", "view_no", "=", "msg", ".", "viewNo", "if", "self", ".", "view_no", "!=", "view_no", ":", "self", ".", "provider", ".", "discard", "(", "msg", ",", "'{} got Primary from {} for view no {} '", "'whereas current view no is {}'", ".", "format", "(", "self", ",", "sender", ",", "view_no", ",", "self", ".", "view_no", ")", ",", "logMethod", "=", "logger", ".", "info", ")", "return", "False", "new_primary_name", "=", "msg", ".", "name", "if", "new_primary_name", "==", "self", ".", "previous_master_primary", ":", "self", ".", "provider", ".", "discard", "(", "msg", ",", "'{} got Primary from {} for {} who was primary of '", "'master in previous view too'", ".", "format", "(", "self", ",", "sender", ",", "new_primary_name", ")", ",", "logMethod", "=", "logger", ".", "info", ")", "return", "False", "# Since a node can send ViewChangeDone more than one time", "self", ".", "_on_verified_view_change_done_msg", "(", "msg", ",", "sender", ")", "# TODO why do we check that after the message tracking", "if", "self", ".", "provider", ".", "has_primary", "(", ")", ":", "self", ".", "provider", ".", "discard", "(", "msg", ",", "\"it already decided primary which is {}\"", ".", "format", "(", "self", ".", "provider", ".", "current_primary_name", "(", ")", ")", ",", "logger", ".", "info", ")", "return", "False", "self", ".", "_start_selection", "(", ")" ]
Processes ViewChangeDone messages. Once n-f messages have been received, decides on a primary for specific replica. :param msg: ViewChangeDone message :param sender: the name of the node from which this message was sent
[ "Processes", "ViewChangeDone", "messages", ".", "Once", "n", "-", "f", "messages", "have", "been", "received", "decides", "on", "a", "primary", "for", "specific", "replica", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L418-L452
234,320
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.sendInstanceChange
def sendInstanceChange(self, view_no: int, suspicion=Suspicions.PRIMARY_DEGRADED): """ Broadcast an instance change request to all the remaining nodes :param view_no: the view number when the instance change is requested """ # If not found any sent instance change messages in last # `ViewChangeWindowSize` seconds or the last sent instance change # message was sent long enough ago then instance change message can be # sent otherwise no. canSendInsChange, cooldown = self.insChngThrottler.acquire() if canSendInsChange: logger.info( "{}{} sending an instance change with view_no {}" " since {}".format( VIEW_CHANGE_PREFIX, self, view_no, suspicion.reason)) logger.info("{}{} metrics for monitor: {}" .format(MONITORING_PREFIX, self, self.provider.pretty_metrics())) msg = self._create_instance_change_msg(view_no, suspicion.code) self.send(msg) # record instance change vote for self and try to change the view # if quorum is reached self._on_verified_instance_change_msg(msg, self.name) else: logger.info("{} cannot send instance change sooner then {} seconds".format(self, cooldown))
python
def sendInstanceChange(self, view_no: int, suspicion=Suspicions.PRIMARY_DEGRADED): """ Broadcast an instance change request to all the remaining nodes :param view_no: the view number when the instance change is requested """ # If not found any sent instance change messages in last # `ViewChangeWindowSize` seconds or the last sent instance change # message was sent long enough ago then instance change message can be # sent otherwise no. canSendInsChange, cooldown = self.insChngThrottler.acquire() if canSendInsChange: logger.info( "{}{} sending an instance change with view_no {}" " since {}".format( VIEW_CHANGE_PREFIX, self, view_no, suspicion.reason)) logger.info("{}{} metrics for monitor: {}" .format(MONITORING_PREFIX, self, self.provider.pretty_metrics())) msg = self._create_instance_change_msg(view_no, suspicion.code) self.send(msg) # record instance change vote for self and try to change the view # if quorum is reached self._on_verified_instance_change_msg(msg, self.name) else: logger.info("{} cannot send instance change sooner then {} seconds".format(self, cooldown))
[ "def", "sendInstanceChange", "(", "self", ",", "view_no", ":", "int", ",", "suspicion", "=", "Suspicions", ".", "PRIMARY_DEGRADED", ")", ":", "# If not found any sent instance change messages in last", "# `ViewChangeWindowSize` seconds or the last sent instance change", "# message was sent long enough ago then instance change message can be", "# sent otherwise no.", "canSendInsChange", ",", "cooldown", "=", "self", ".", "insChngThrottler", ".", "acquire", "(", ")", "if", "canSendInsChange", ":", "logger", ".", "info", "(", "\"{}{} sending an instance change with view_no {}\"", "\" since {}\"", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "self", ",", "view_no", ",", "suspicion", ".", "reason", ")", ")", "logger", ".", "info", "(", "\"{}{} metrics for monitor: {}\"", ".", "format", "(", "MONITORING_PREFIX", ",", "self", ",", "self", ".", "provider", ".", "pretty_metrics", "(", ")", ")", ")", "msg", "=", "self", ".", "_create_instance_change_msg", "(", "view_no", ",", "suspicion", ".", "code", ")", "self", ".", "send", "(", "msg", ")", "# record instance change vote for self and try to change the view", "# if quorum is reached", "self", ".", "_on_verified_instance_change_msg", "(", "msg", ",", "self", ".", "name", ")", "else", ":", "logger", ".", "info", "(", "\"{} cannot send instance change sooner then {} seconds\"", ".", "format", "(", "self", ",", "cooldown", ")", ")" ]
Broadcast an instance change request to all the remaining nodes :param view_no: the view number when the instance change is requested
[ "Broadcast", "an", "instance", "change", "request", "to", "all", "the", "remaining", "nodes" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L475-L505
234,321
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger._canViewChange
def _canViewChange(self, proposedViewNo: int) -> (bool, str): """ Return whether there's quorum for view change for the proposed view number and its view is less than or equal to the proposed view """ msg = None quorum = self.quorums.view_change.value if not self.instance_changes.has_quorum(proposedViewNo, quorum): msg = '{} has no quorum for view {}'.format(self, proposedViewNo) elif not proposedViewNo > self.view_no: msg = '{} is in higher view more than {}'.format( self, proposedViewNo) return not bool(msg), msg
python
def _canViewChange(self, proposedViewNo: int) -> (bool, str): """ Return whether there's quorum for view change for the proposed view number and its view is less than or equal to the proposed view """ msg = None quorum = self.quorums.view_change.value if not self.instance_changes.has_quorum(proposedViewNo, quorum): msg = '{} has no quorum for view {}'.format(self, proposedViewNo) elif not proposedViewNo > self.view_no: msg = '{} is in higher view more than {}'.format( self, proposedViewNo) return not bool(msg), msg
[ "def", "_canViewChange", "(", "self", ",", "proposedViewNo", ":", "int", ")", "->", "(", "bool", ",", "str", ")", ":", "msg", "=", "None", "quorum", "=", "self", ".", "quorums", ".", "view_change", ".", "value", "if", "not", "self", ".", "instance_changes", ".", "has_quorum", "(", "proposedViewNo", ",", "quorum", ")", ":", "msg", "=", "'{} has no quorum for view {}'", ".", "format", "(", "self", ",", "proposedViewNo", ")", "elif", "not", "proposedViewNo", ">", "self", ".", "view_no", ":", "msg", "=", "'{} is in higher view more than {}'", ".", "format", "(", "self", ",", "proposedViewNo", ")", "return", "not", "bool", "(", "msg", ")", ",", "msg" ]
Return whether there's quorum for view change for the proposed view number and its view is less than or equal to the proposed view
[ "Return", "whether", "there", "s", "quorum", "for", "view", "change", "for", "the", "proposed", "view", "number", "and", "its", "view", "is", "less", "than", "or", "equal", "to", "the", "proposed", "view" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L548-L561
234,322
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.start_view_change
def start_view_change(self, proposed_view_no: int, continue_vc=False): """ Trigger the view change process. :param proposed_view_no: the new view number after view change. """ # TODO: consider moving this to pool manager # TODO: view change is a special case, which can have different # implementations - we need to make this logic pluggable if self.pre_vc_strategy and (not continue_vc): self.pre_view_change_in_progress = True self.pre_vc_strategy.prepare_view_change(proposed_view_no) return elif self.pre_vc_strategy: self.pre_vc_strategy.on_strategy_complete() self.previous_view_no = self.view_no self.view_no = proposed_view_no self.pre_view_change_in_progress = False self.view_change_in_progress = True self.previous_master_primary = self.provider.current_primary_name() self.set_defaults() self._process_vcd_for_future_view() self.initInsChngThrottling() self.provider.notify_view_change_start() self.provider.start_catchup()
python
def start_view_change(self, proposed_view_no: int, continue_vc=False): """ Trigger the view change process. :param proposed_view_no: the new view number after view change. """ # TODO: consider moving this to pool manager # TODO: view change is a special case, which can have different # implementations - we need to make this logic pluggable if self.pre_vc_strategy and (not continue_vc): self.pre_view_change_in_progress = True self.pre_vc_strategy.prepare_view_change(proposed_view_no) return elif self.pre_vc_strategy: self.pre_vc_strategy.on_strategy_complete() self.previous_view_no = self.view_no self.view_no = proposed_view_no self.pre_view_change_in_progress = False self.view_change_in_progress = True self.previous_master_primary = self.provider.current_primary_name() self.set_defaults() self._process_vcd_for_future_view() self.initInsChngThrottling() self.provider.notify_view_change_start() self.provider.start_catchup()
[ "def", "start_view_change", "(", "self", ",", "proposed_view_no", ":", "int", ",", "continue_vc", "=", "False", ")", ":", "# TODO: consider moving this to pool manager", "# TODO: view change is a special case, which can have different", "# implementations - we need to make this logic pluggable", "if", "self", ".", "pre_vc_strategy", "and", "(", "not", "continue_vc", ")", ":", "self", ".", "pre_view_change_in_progress", "=", "True", "self", ".", "pre_vc_strategy", ".", "prepare_view_change", "(", "proposed_view_no", ")", "return", "elif", "self", ".", "pre_vc_strategy", ":", "self", ".", "pre_vc_strategy", ".", "on_strategy_complete", "(", ")", "self", ".", "previous_view_no", "=", "self", ".", "view_no", "self", ".", "view_no", "=", "proposed_view_no", "self", ".", "pre_view_change_in_progress", "=", "False", "self", ".", "view_change_in_progress", "=", "True", "self", ".", "previous_master_primary", "=", "self", ".", "provider", ".", "current_primary_name", "(", ")", "self", ".", "set_defaults", "(", ")", "self", ".", "_process_vcd_for_future_view", "(", ")", "self", ".", "initInsChngThrottling", "(", ")", "self", ".", "provider", ".", "notify_view_change_start", "(", ")", "self", ".", "provider", ".", "start_catchup", "(", ")" ]
Trigger the view change process. :param proposed_view_no: the new view number after view change.
[ "Trigger", "the", "view", "change", "process", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L563-L591
234,323
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger._verify_primary
def _verify_primary(self, new_primary, ledger_info): """ This method is called when sufficient number of ViewChangeDone received and makes steps to switch to the new primary """ expected_primary = self.provider.next_primary_name() if new_primary != expected_primary: logger.error("{}{} expected next primary to be {}, but majority " "declared {} instead for view {}" .format(PRIMARY_SELECTION_PREFIX, self.name, expected_primary, new_primary, self.view_no)) return False self._primary_verified = True return True
python
def _verify_primary(self, new_primary, ledger_info): """ This method is called when sufficient number of ViewChangeDone received and makes steps to switch to the new primary """ expected_primary = self.provider.next_primary_name() if new_primary != expected_primary: logger.error("{}{} expected next primary to be {}, but majority " "declared {} instead for view {}" .format(PRIMARY_SELECTION_PREFIX, self.name, expected_primary, new_primary, self.view_no)) return False self._primary_verified = True return True
[ "def", "_verify_primary", "(", "self", ",", "new_primary", ",", "ledger_info", ")", ":", "expected_primary", "=", "self", ".", "provider", ".", "next_primary_name", "(", ")", "if", "new_primary", "!=", "expected_primary", ":", "logger", ".", "error", "(", "\"{}{} expected next primary to be {}, but majority \"", "\"declared {} instead for view {}\"", ".", "format", "(", "PRIMARY_SELECTION_PREFIX", ",", "self", ".", "name", ",", "expected_primary", ",", "new_primary", ",", "self", ".", "view_no", ")", ")", "return", "False", "self", ".", "_primary_verified", "=", "True", "return", "True" ]
This method is called when sufficient number of ViewChangeDone received and makes steps to switch to the new primary
[ "This", "method", "is", "called", "when", "sufficient", "number", "of", "ViewChangeDone", "received", "and", "makes", "steps", "to", "switch", "to", "the", "new", "primary" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L691-L705
234,324
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger._send_view_change_done_message
def _send_view_change_done_message(self): """ Sends ViewChangeDone message to other protocol participants """ new_primary_name = self.provider.next_primary_name() ledger_summary = self.provider.ledger_summary() message = ViewChangeDone(self.view_no, new_primary_name, ledger_summary) logger.info("{} is sending ViewChangeDone msg to all : {}".format(self, message)) self.send(message) self._on_verified_view_change_done_msg(message, self.name)
python
def _send_view_change_done_message(self): """ Sends ViewChangeDone message to other protocol participants """ new_primary_name = self.provider.next_primary_name() ledger_summary = self.provider.ledger_summary() message = ViewChangeDone(self.view_no, new_primary_name, ledger_summary) logger.info("{} is sending ViewChangeDone msg to all : {}".format(self, message)) self.send(message) self._on_verified_view_change_done_msg(message, self.name)
[ "def", "_send_view_change_done_message", "(", "self", ")", ":", "new_primary_name", "=", "self", ".", "provider", ".", "next_primary_name", "(", ")", "ledger_summary", "=", "self", ".", "provider", ".", "ledger_summary", "(", ")", "message", "=", "ViewChangeDone", "(", "self", ".", "view_no", ",", "new_primary_name", ",", "ledger_summary", ")", "logger", ".", "info", "(", "\"{} is sending ViewChangeDone msg to all : {}\"", ".", "format", "(", "self", ",", "message", ")", ")", "self", ".", "send", "(", "message", ")", "self", ".", "_on_verified_view_change_done_msg", "(", "message", ",", "self", ".", "name", ")" ]
Sends ViewChangeDone message to other protocol participants
[ "Sends", "ViewChangeDone", "message", "to", "other", "protocol", "participants" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L708-L721
234,325
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.get_msgs_for_lagged_nodes
def get_msgs_for_lagged_nodes(self) -> List[ViewChangeDone]: # Should not return a list, only done for compatibility with interface """ Returns the last accepted `ViewChangeDone` message. If no view change has happened returns ViewChangeDone with view no 0 to a newly joined node """ # TODO: Consider a case where more than one node joins immediately, # then one of the node might not have an accepted # ViewChangeDone message messages = [] accepted = self._accepted_view_change_done_message if accepted: messages.append(ViewChangeDone(self.last_completed_view_no, *accepted)) elif self.name in self._view_change_done: messages.append(ViewChangeDone(self.last_completed_view_no, *self._view_change_done[self.name])) else: logger.info('{} has no ViewChangeDone message to send for view {}'. format(self, self.view_no)) return messages
python
def get_msgs_for_lagged_nodes(self) -> List[ViewChangeDone]: # Should not return a list, only done for compatibility with interface """ Returns the last accepted `ViewChangeDone` message. If no view change has happened returns ViewChangeDone with view no 0 to a newly joined node """ # TODO: Consider a case where more than one node joins immediately, # then one of the node might not have an accepted # ViewChangeDone message messages = [] accepted = self._accepted_view_change_done_message if accepted: messages.append(ViewChangeDone(self.last_completed_view_no, *accepted)) elif self.name in self._view_change_done: messages.append(ViewChangeDone(self.last_completed_view_no, *self._view_change_done[self.name])) else: logger.info('{} has no ViewChangeDone message to send for view {}'. format(self, self.view_no)) return messages
[ "def", "get_msgs_for_lagged_nodes", "(", "self", ")", "->", "List", "[", "ViewChangeDone", "]", ":", "# Should not return a list, only done for compatibility with interface", "# TODO: Consider a case where more than one node joins immediately,", "# then one of the node might not have an accepted", "# ViewChangeDone message", "messages", "=", "[", "]", "accepted", "=", "self", ".", "_accepted_view_change_done_message", "if", "accepted", ":", "messages", ".", "append", "(", "ViewChangeDone", "(", "self", ".", "last_completed_view_no", ",", "*", "accepted", ")", ")", "elif", "self", ".", "name", "in", "self", ".", "_view_change_done", ":", "messages", ".", "append", "(", "ViewChangeDone", "(", "self", ".", "last_completed_view_no", ",", "*", "self", ".", "_view_change_done", "[", "self", ".", "name", "]", ")", ")", "else", ":", "logger", ".", "info", "(", "'{} has no ViewChangeDone message to send for view {}'", ".", "format", "(", "self", ",", "self", ".", "view_no", ")", ")", "return", "messages" ]
Returns the last accepted `ViewChangeDone` message. If no view change has happened returns ViewChangeDone with view no 0 to a newly joined node
[ "Returns", "the", "last", "accepted", "ViewChangeDone", "message", ".", "If", "no", "view", "change", "has", "happened", "returns", "ViewChangeDone", "with", "view", "no", "0", "to", "a", "newly", "joined", "node" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L724-L744
234,326
hyperledger/indy-plenum
plenum/common/messages/fields.py
FieldBase.validate
def validate(self, val): """ Performs basic validation of field value and then passes it for specific validation. :param val: field value to validate :return: error message or None """ if self.nullable and val is None: return type_er = self.__type_check(val) if type_er: return type_er spec_err = self._specific_validation(val) if spec_err: return spec_err
python
def validate(self, val): """ Performs basic validation of field value and then passes it for specific validation. :param val: field value to validate :return: error message or None """ if self.nullable and val is None: return type_er = self.__type_check(val) if type_er: return type_er spec_err = self._specific_validation(val) if spec_err: return spec_err
[ "def", "validate", "(", "self", ",", "val", ")", ":", "if", "self", ".", "nullable", "and", "val", "is", "None", ":", "return", "type_er", "=", "self", ".", "__type_check", "(", "val", ")", "if", "type_er", ":", "return", "type_er", "spec_err", "=", "self", ".", "_specific_validation", "(", "val", ")", "if", "spec_err", ":", "return", "spec_err" ]
Performs basic validation of field value and then passes it for specific validation. :param val: field value to validate :return: error message or None
[ "Performs", "basic", "validation", "of", "field", "value", "and", "then", "passes", "it", "for", "specific", "validation", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/messages/fields.py#L51-L68
234,327
hyperledger/indy-plenum
plenum/common/signer_did.py
DidSigner.sign
def sign(self, msg: Dict) -> Dict: """ Return a signature for the given message. """ ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm]) bsig = self.naclSigner.signature(ser) sig = base58.b58encode(bsig).decode("utf-8") return sig
python
def sign(self, msg: Dict) -> Dict: """ Return a signature for the given message. """ ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm]) bsig = self.naclSigner.signature(ser) sig = base58.b58encode(bsig).decode("utf-8") return sig
[ "def", "sign", "(", "self", ",", "msg", ":", "Dict", ")", "->", "Dict", ":", "ser", "=", "serialize_msg_for_signing", "(", "msg", ",", "topLevelKeysToIgnore", "=", "[", "f", ".", "SIG", ".", "nm", "]", ")", "bsig", "=", "self", ".", "naclSigner", ".", "signature", "(", "ser", ")", "sig", "=", "base58", ".", "b58encode", "(", "bsig", ")", ".", "decode", "(", "\"utf-8\"", ")", "return", "sig" ]
Return a signature for the given message.
[ "Return", "a", "signature", "for", "the", "given", "message", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/signer_did.py#L122-L129
234,328
hyperledger/indy-plenum
plenum/server/replica.py
Replica.lastPrePrepareSeqNo
def lastPrePrepareSeqNo(self, n): """ This will _lastPrePrepareSeqNo to values greater than its previous values else it will not. To forcefully override as in case of `revert`, directly set `self._lastPrePrepareSeqNo` """ if n > self._lastPrePrepareSeqNo: self._lastPrePrepareSeqNo = n else: self.logger.debug( '{} cannot set lastPrePrepareSeqNo to {} as its ' 'already {}'.format( self, n, self._lastPrePrepareSeqNo))
python
def lastPrePrepareSeqNo(self, n): """ This will _lastPrePrepareSeqNo to values greater than its previous values else it will not. To forcefully override as in case of `revert`, directly set `self._lastPrePrepareSeqNo` """ if n > self._lastPrePrepareSeqNo: self._lastPrePrepareSeqNo = n else: self.logger.debug( '{} cannot set lastPrePrepareSeqNo to {} as its ' 'already {}'.format( self, n, self._lastPrePrepareSeqNo))
[ "def", "lastPrePrepareSeqNo", "(", "self", ",", "n", ")", ":", "if", "n", ">", "self", ".", "_lastPrePrepareSeqNo", ":", "self", ".", "_lastPrePrepareSeqNo", "=", "n", "else", ":", "self", ".", "logger", ".", "debug", "(", "'{} cannot set lastPrePrepareSeqNo to {} as its '", "'already {}'", ".", "format", "(", "self", ",", "n", ",", "self", ".", "_lastPrePrepareSeqNo", ")", ")" ]
This will _lastPrePrepareSeqNo to values greater than its previous values else it will not. To forcefully override as in case of `revert`, directly set `self._lastPrePrepareSeqNo`
[ "This", "will", "_lastPrePrepareSeqNo", "to", "values", "greater", "than", "its", "previous", "values", "else", "it", "will", "not", ".", "To", "forcefully", "override", "as", "in", "case", "of", "revert", "directly", "set", "self", ".", "_lastPrePrepareSeqNo" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L515-L527
234,329
hyperledger/indy-plenum
plenum/server/replica.py
Replica.primaryName
def primaryName(self, value: Optional[str]) -> None: """ Set the value of isPrimary. :param value: the value to set isPrimary to """ if value is not None: self.warned_no_primary = False self.primaryNames[self.viewNo] = value self.compact_primary_names() if value != self._primaryName: self._primaryName = value self.logger.info("{} setting primaryName for view no {} to: {}". format(self, self.viewNo, value)) if value is None: # Since the GC needs to happen after a primary has been # decided. return self._gc_before_new_view() if self.__should_reset_watermarks_before_new_view(): self._reset_watermarks_before_new_view()
python
def primaryName(self, value: Optional[str]) -> None: """ Set the value of isPrimary. :param value: the value to set isPrimary to """ if value is not None: self.warned_no_primary = False self.primaryNames[self.viewNo] = value self.compact_primary_names() if value != self._primaryName: self._primaryName = value self.logger.info("{} setting primaryName for view no {} to: {}". format(self, self.viewNo, value)) if value is None: # Since the GC needs to happen after a primary has been # decided. return self._gc_before_new_view() if self.__should_reset_watermarks_before_new_view(): self._reset_watermarks_before_new_view()
[ "def", "primaryName", "(", "self", ",", "value", ":", "Optional", "[", "str", "]", ")", "->", "None", ":", "if", "value", "is", "not", "None", ":", "self", ".", "warned_no_primary", "=", "False", "self", ".", "primaryNames", "[", "self", ".", "viewNo", "]", "=", "value", "self", ".", "compact_primary_names", "(", ")", "if", "value", "!=", "self", ".", "_primaryName", ":", "self", ".", "_primaryName", "=", "value", "self", ".", "logger", ".", "info", "(", "\"{} setting primaryName for view no {} to: {}\"", ".", "format", "(", "self", ",", "self", ".", "viewNo", ",", "value", ")", ")", "if", "value", "is", "None", ":", "# Since the GC needs to happen after a primary has been", "# decided.", "return", "self", ".", "_gc_before_new_view", "(", ")", "if", "self", ".", "__should_reset_watermarks_before_new_view", "(", ")", ":", "self", ".", "_reset_watermarks_before_new_view", "(", ")" ]
Set the value of isPrimary. :param value: the value to set isPrimary to
[ "Set", "the", "value", "of", "isPrimary", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L598-L618
234,330
hyperledger/indy-plenum
plenum/server/replica.py
Replica.get_lowest_probable_prepared_certificate_in_view
def get_lowest_probable_prepared_certificate_in_view( self, view_no) -> Optional[int]: """ Return lowest pp_seq_no of the view for which can be prepared but choose from unprocessed PRE-PREPAREs and PREPAREs. """ # TODO: Naive implementation, dont need to iterate over the complete # data structures, fix this later seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs # pp_seq_no of PREPAREs with count of PREPAREs for each seq_no_p = set() for (v, p) in self.prePreparesPendingPrevPP: if v == view_no: seq_no_pp.add(p) if v > view_no: break for (v, p), pr in self.preparesWaitingForPrePrepare.items(): if v == view_no and len(pr) >= self.quorums.prepare.value: seq_no_p.add(p) for n in seq_no_pp: if n in seq_no_p: return n return None
python
def get_lowest_probable_prepared_certificate_in_view( self, view_no) -> Optional[int]: """ Return lowest pp_seq_no of the view for which can be prepared but choose from unprocessed PRE-PREPAREs and PREPAREs. """ # TODO: Naive implementation, dont need to iterate over the complete # data structures, fix this later seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs # pp_seq_no of PREPAREs with count of PREPAREs for each seq_no_p = set() for (v, p) in self.prePreparesPendingPrevPP: if v == view_no: seq_no_pp.add(p) if v > view_no: break for (v, p), pr in self.preparesWaitingForPrePrepare.items(): if v == view_no and len(pr) >= self.quorums.prepare.value: seq_no_p.add(p) for n in seq_no_pp: if n in seq_no_p: return n return None
[ "def", "get_lowest_probable_prepared_certificate_in_view", "(", "self", ",", "view_no", ")", "->", "Optional", "[", "int", "]", ":", "# TODO: Naive implementation, dont need to iterate over the complete", "# data structures, fix this later", "seq_no_pp", "=", "SortedList", "(", ")", "# pp_seq_no of PRE-PREPAREs", "# pp_seq_no of PREPAREs with count of PREPAREs for each", "seq_no_p", "=", "set", "(", ")", "for", "(", "v", ",", "p", ")", "in", "self", ".", "prePreparesPendingPrevPP", ":", "if", "v", "==", "view_no", ":", "seq_no_pp", ".", "add", "(", "p", ")", "if", "v", ">", "view_no", ":", "break", "for", "(", "v", ",", "p", ")", ",", "pr", "in", "self", ".", "preparesWaitingForPrePrepare", ".", "items", "(", ")", ":", "if", "v", "==", "view_no", "and", "len", "(", "pr", ")", ">=", "self", ".", "quorums", ".", "prepare", ".", "value", ":", "seq_no_p", ".", "add", "(", "p", ")", "for", "n", "in", "seq_no_pp", ":", "if", "n", "in", "seq_no_p", ":", "return", "n", "return", "None" ]
Return lowest pp_seq_no of the view for which can be prepared but choose from unprocessed PRE-PREPAREs and PREPAREs.
[ "Return", "lowest", "pp_seq_no", "of", "the", "view", "for", "which", "can", "be", "prepared", "but", "choose", "from", "unprocessed", "PRE", "-", "PREPAREs", "and", "PREPAREs", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L692-L717
234,331
hyperledger/indy-plenum
plenum/server/replica.py
Replica.is_primary_in_view
def is_primary_in_view(self, viewNo: int) -> Optional[bool]: """ Return whether this replica was primary in the given view """ if viewNo not in self.primaryNames: return False return self.primaryNames[viewNo] == self.name
python
def is_primary_in_view(self, viewNo: int) -> Optional[bool]: """ Return whether this replica was primary in the given view """ if viewNo not in self.primaryNames: return False return self.primaryNames[viewNo] == self.name
[ "def", "is_primary_in_view", "(", "self", ",", "viewNo", ":", "int", ")", "->", "Optional", "[", "bool", "]", ":", "if", "viewNo", "not", "in", "self", ".", "primaryNames", ":", "return", "False", "return", "self", ".", "primaryNames", "[", "viewNo", "]", "==", "self", ".", "name" ]
Return whether this replica was primary in the given view
[ "Return", "whether", "this", "replica", "was", "primary", "in", "the", "given", "view" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L747-L753
234,332
hyperledger/indy-plenum
plenum/server/replica.py
Replica.processReqDuringBatch
def processReqDuringBatch( self, req: Request, cons_time: int): """ This method will do dynamic validation and apply requests. If there is any errors during validation it would be raised """ if self.isMaster: self.node.doDynamicValidation(req) self.node.applyReq(req, cons_time)
python
def processReqDuringBatch( self, req: Request, cons_time: int): """ This method will do dynamic validation and apply requests. If there is any errors during validation it would be raised """ if self.isMaster: self.node.doDynamicValidation(req) self.node.applyReq(req, cons_time)
[ "def", "processReqDuringBatch", "(", "self", ",", "req", ":", "Request", ",", "cons_time", ":", "int", ")", ":", "if", "self", ".", "isMaster", ":", "self", ".", "node", ".", "doDynamicValidation", "(", "req", ")", "self", ".", "node", ".", "applyReq", "(", "req", ",", "cons_time", ")" ]
This method will do dynamic validation and apply requests. If there is any errors during validation it would be raised
[ "This", "method", "will", "do", "dynamic", "validation", "and", "apply", "requests", ".", "If", "there", "is", "any", "errors", "during", "validation", "it", "would", "be", "raised" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L902-L912
234,333
hyperledger/indy-plenum
plenum/server/replica.py
Replica.serviceQueues
def serviceQueues(self, limit=None): """ Process `limit` number of messages in the inBox. :param limit: the maximum number of messages to process :return: the number of messages successfully processed """ # TODO should handle SuspiciousNode here r = self.dequeue_pre_prepares() r += self.inBoxRouter.handleAllSync(self.inBox, limit) r += self.send_3pc_batch() r += self._serviceActions() return r
python
def serviceQueues(self, limit=None): """ Process `limit` number of messages in the inBox. :param limit: the maximum number of messages to process :return: the number of messages successfully processed """ # TODO should handle SuspiciousNode here r = self.dequeue_pre_prepares() r += self.inBoxRouter.handleAllSync(self.inBox, limit) r += self.send_3pc_batch() r += self._serviceActions() return r
[ "def", "serviceQueues", "(", "self", ",", "limit", "=", "None", ")", ":", "# TODO should handle SuspiciousNode here", "r", "=", "self", ".", "dequeue_pre_prepares", "(", ")", "r", "+=", "self", ".", "inBoxRouter", ".", "handleAllSync", "(", "self", ".", "inBox", ",", "limit", ")", "r", "+=", "self", ".", "send_3pc_batch", "(", ")", "r", "+=", "self", ".", "_serviceActions", "(", ")", "return", "r" ]
Process `limit` number of messages in the inBox. :param limit: the maximum number of messages to process :return: the number of messages successfully processed
[ "Process", "limit", "number", "of", "messages", "in", "the", "inBox", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1043-L1055
234,334
hyperledger/indy-plenum
plenum/server/replica.py
Replica.tryPrepare
def tryPrepare(self, pp: PrePrepare): """ Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase. """ rv, msg = self.canPrepare(pp) if rv: self.doPrepare(pp) else: self.logger.debug("{} cannot send PREPARE since {}".format(self, msg))
python
def tryPrepare(self, pp: PrePrepare): """ Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase. """ rv, msg = self.canPrepare(pp) if rv: self.doPrepare(pp) else: self.logger.debug("{} cannot send PREPARE since {}".format(self, msg))
[ "def", "tryPrepare", "(", "self", ",", "pp", ":", "PrePrepare", ")", ":", "rv", ",", "msg", "=", "self", ".", "canPrepare", "(", "pp", ")", "if", "rv", ":", "self", ".", "doPrepare", "(", "pp", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"{} cannot send PREPARE since {}\"", ".", "format", "(", "self", ",", "msg", ")", ")" ]
Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase.
[ "Try", "to", "send", "the", "Prepare", "message", "if", "the", "PrePrepare", "message", "is", "ready", "to", "be", "passed", "into", "the", "Prepare", "phase", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1293-L1302
234,335
hyperledger/indy-plenum
plenum/server/replica.py
Replica.processPrepare
def processPrepare(self, prepare: Prepare, sender: str) -> None: """ Validate and process the PREPARE specified. If validation is successful, create a COMMIT and broadcast it. :param prepare: a PREPARE msg :param sender: name of the node that sent the PREPARE """ key = (prepare.viewNo, prepare.ppSeqNo) self.logger.debug("{} received PREPARE{} from {}".format(self, key, sender)) # TODO move this try/except up higher try: if self.validatePrepare(prepare, sender): self.addToPrepares(prepare, sender) self.stats.inc(TPCStat.PrepareRcvd) self.logger.debug("{} processed incoming PREPARE {}".format( self, (prepare.viewNo, prepare.ppSeqNo))) else: # TODO let's have isValidPrepare throw an exception that gets # handled and possibly logged higher self.logger.trace("{} cannot process incoming PREPARE".format(self)) except SuspiciousNode as ex: self.report_suspicious_node(ex)
python
def processPrepare(self, prepare: Prepare, sender: str) -> None: """ Validate and process the PREPARE specified. If validation is successful, create a COMMIT and broadcast it. :param prepare: a PREPARE msg :param sender: name of the node that sent the PREPARE """ key = (prepare.viewNo, prepare.ppSeqNo) self.logger.debug("{} received PREPARE{} from {}".format(self, key, sender)) # TODO move this try/except up higher try: if self.validatePrepare(prepare, sender): self.addToPrepares(prepare, sender) self.stats.inc(TPCStat.PrepareRcvd) self.logger.debug("{} processed incoming PREPARE {}".format( self, (prepare.viewNo, prepare.ppSeqNo))) else: # TODO let's have isValidPrepare throw an exception that gets # handled and possibly logged higher self.logger.trace("{} cannot process incoming PREPARE".format(self)) except SuspiciousNode as ex: self.report_suspicious_node(ex)
[ "def", "processPrepare", "(", "self", ",", "prepare", ":", "Prepare", ",", "sender", ":", "str", ")", "->", "None", ":", "key", "=", "(", "prepare", ".", "viewNo", ",", "prepare", ".", "ppSeqNo", ")", "self", ".", "logger", ".", "debug", "(", "\"{} received PREPARE{} from {}\"", ".", "format", "(", "self", ",", "key", ",", "sender", ")", ")", "# TODO move this try/except up higher", "try", ":", "if", "self", ".", "validatePrepare", "(", "prepare", ",", "sender", ")", ":", "self", ".", "addToPrepares", "(", "prepare", ",", "sender", ")", "self", ".", "stats", ".", "inc", "(", "TPCStat", ".", "PrepareRcvd", ")", "self", ".", "logger", ".", "debug", "(", "\"{} processed incoming PREPARE {}\"", ".", "format", "(", "self", ",", "(", "prepare", ".", "viewNo", ",", "prepare", ".", "ppSeqNo", ")", ")", ")", "else", ":", "# TODO let's have isValidPrepare throw an exception that gets", "# handled and possibly logged higher", "self", ".", "logger", ".", "trace", "(", "\"{} cannot process incoming PREPARE\"", ".", "format", "(", "self", ")", ")", "except", "SuspiciousNode", "as", "ex", ":", "self", ".", "report_suspicious_node", "(", "ex", ")" ]
Validate and process the PREPARE specified. If validation is successful, create a COMMIT and broadcast it. :param prepare: a PREPARE msg :param sender: name of the node that sent the PREPARE
[ "Validate", "and", "process", "the", "PREPARE", "specified", ".", "If", "validation", "is", "successful", "create", "a", "COMMIT", "and", "broadcast", "it", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1306-L1329
234,336
hyperledger/indy-plenum
plenum/server/replica.py
Replica.processCommit
def processCommit(self, commit: Commit, sender: str) -> None: """ Validate and process the COMMIT specified. If validation is successful, return the message to the node. :param commit: an incoming COMMIT message :param sender: name of the node that sent the COMMIT """ self.logger.debug("{} received COMMIT{} from {}".format( self, (commit.viewNo, commit.ppSeqNo), sender)) if self.validateCommit(commit, sender): self.stats.inc(TPCStat.CommitRcvd) self.addToCommits(commit, sender) self.logger.debug("{} processed incoming COMMIT{}".format( self, (commit.viewNo, commit.ppSeqNo)))
python
def processCommit(self, commit: Commit, sender: str) -> None: """ Validate and process the COMMIT specified. If validation is successful, return the message to the node. :param commit: an incoming COMMIT message :param sender: name of the node that sent the COMMIT """ self.logger.debug("{} received COMMIT{} from {}".format( self, (commit.viewNo, commit.ppSeqNo), sender)) if self.validateCommit(commit, sender): self.stats.inc(TPCStat.CommitRcvd) self.addToCommits(commit, sender) self.logger.debug("{} processed incoming COMMIT{}".format( self, (commit.viewNo, commit.ppSeqNo)))
[ "def", "processCommit", "(", "self", ",", "commit", ":", "Commit", ",", "sender", ":", "str", ")", "->", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"{} received COMMIT{} from {}\"", ".", "format", "(", "self", ",", "(", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", ")", ",", "sender", ")", ")", "if", "self", ".", "validateCommit", "(", "commit", ",", "sender", ")", ":", "self", ".", "stats", ".", "inc", "(", "TPCStat", ".", "CommitRcvd", ")", "self", ".", "addToCommits", "(", "commit", ",", "sender", ")", "self", ".", "logger", ".", "debug", "(", "\"{} processed incoming COMMIT{}\"", ".", "format", "(", "self", ",", "(", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", ")", ")", ")" ]
Validate and process the COMMIT specified. If validation is successful, return the message to the node. :param commit: an incoming COMMIT message :param sender: name of the node that sent the COMMIT
[ "Validate", "and", "process", "the", "COMMIT", "specified", ".", "If", "validation", "is", "successful", "return", "the", "message", "to", "the", "node", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1333-L1348
234,337
hyperledger/indy-plenum
plenum/server/replica.py
Replica.tryCommit
def tryCommit(self, prepare: Prepare): """ Try to commit if the Prepare message is ready to be passed into the commit phase. """ rv, reason = self.canCommit(prepare) if rv: self.doCommit(prepare) else: self.logger.debug("{} cannot send COMMIT since {}".format(self, reason))
python
def tryCommit(self, prepare: Prepare): """ Try to commit if the Prepare message is ready to be passed into the commit phase. """ rv, reason = self.canCommit(prepare) if rv: self.doCommit(prepare) else: self.logger.debug("{} cannot send COMMIT since {}".format(self, reason))
[ "def", "tryCommit", "(", "self", ",", "prepare", ":", "Prepare", ")", ":", "rv", ",", "reason", "=", "self", ".", "canCommit", "(", "prepare", ")", "if", "rv", ":", "self", ".", "doCommit", "(", "prepare", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"{} cannot send COMMIT since {}\"", ".", "format", "(", "self", ",", "reason", ")", ")" ]
Try to commit if the Prepare message is ready to be passed into the commit phase.
[ "Try", "to", "commit", "if", "the", "Prepare", "message", "is", "ready", "to", "be", "passed", "into", "the", "commit", "phase", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1350-L1359
234,338
hyperledger/indy-plenum
plenum/server/replica.py
Replica.tryOrder
def tryOrder(self, commit: Commit): """ Try to order if the Commit message is ready to be ordered. """ canOrder, reason = self.canOrder(commit) if canOrder: self.logger.trace("{} returning request to node".format(self)) self.doOrder(commit) else: self.logger.debug("{} cannot return request to node: {}".format(self, reason)) return canOrder
python
def tryOrder(self, commit: Commit): """ Try to order if the Commit message is ready to be ordered. """ canOrder, reason = self.canOrder(commit) if canOrder: self.logger.trace("{} returning request to node".format(self)) self.doOrder(commit) else: self.logger.debug("{} cannot return request to node: {}".format(self, reason)) return canOrder
[ "def", "tryOrder", "(", "self", ",", "commit", ":", "Commit", ")", ":", "canOrder", ",", "reason", "=", "self", ".", "canOrder", "(", "commit", ")", "if", "canOrder", ":", "self", ".", "logger", ".", "trace", "(", "\"{} returning request to node\"", ".", "format", "(", "self", ")", ")", "self", ".", "doOrder", "(", "commit", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"{} cannot return request to node: {}\"", ".", "format", "(", "self", ",", "reason", ")", ")", "return", "canOrder" ]
Try to order if the Commit message is ready to be ordered.
[ "Try", "to", "order", "if", "the", "Commit", "message", "is", "ready", "to", "be", "ordered", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1361-L1371
234,339
hyperledger/indy-plenum
plenum/server/replica.py
Replica.nonFinalisedReqs
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]): """ Check if there are any requests which are not finalised, i.e for which there are not enough PROPAGATEs """ return {key for key in reqKeys if not self.requests.is_finalised(key)}
python
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]): """ Check if there are any requests which are not finalised, i.e for which there are not enough PROPAGATEs """ return {key for key in reqKeys if not self.requests.is_finalised(key)}
[ "def", "nonFinalisedReqs", "(", "self", ",", "reqKeys", ":", "List", "[", "Tuple", "[", "str", ",", "int", "]", "]", ")", ":", "return", "{", "key", "for", "key", "in", "reqKeys", "if", "not", "self", ".", "requests", ".", "is_finalised", "(", "key", ")", "}" ]
Check if there are any requests which are not finalised, i.e for which there are not enough PROPAGATEs
[ "Check", "if", "there", "are", "any", "requests", "which", "are", "not", "finalised", "i", ".", "e", "for", "which", "there", "are", "not", "enough", "PROPAGATEs" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1428-L1433
234,340
hyperledger/indy-plenum
plenum/server/replica.py
Replica._can_process_pre_prepare
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str) -> Optional[int]: """ Decide whether this replica is eligible to process a PRE-PREPARE. :param pre_prepare: a PRE-PREPARE msg to process :param sender: the name of the node that sent the PRE-PREPARE msg """ # TODO: Check whether it is rejecting PRE-PREPARE from previous view # PRE-PREPARE should not be sent from non primary if not self.isMsgFromPrimary(pre_prepare, sender): return PP_CHECK_NOT_FROM_PRIMARY # Already has a PRE-PREPARE with same 3 phase key if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares: return PP_CHECK_DUPLICATE if not self.is_pre_prepare_time_acceptable(pre_prepare, sender): return PP_CHECK_WRONG_TIME if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo), self.__last_pp_3pc) > 0: return PP_CHECK_OLD # ignore old pre-prepare if self.nonFinalisedReqs(pre_prepare.reqIdr): return PP_CHECK_REQUEST_NOT_FINALIZED if not self.__is_next_pre_prepare(pre_prepare.viewNo, pre_prepare.ppSeqNo): return PP_CHECK_NOT_NEXT if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \ pre_prepare.poolStateRootHash != self.stateRootHash(POOL_LEDGER_ID): return PP_CHECK_INCORRECT_POOL_STATE_ROOT # BLS multi-sig: status = self._bls_bft_replica.validate_pre_prepare(pre_prepare, sender) if status is not None: return status return None
python
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str) -> Optional[int]: """ Decide whether this replica is eligible to process a PRE-PREPARE. :param pre_prepare: a PRE-PREPARE msg to process :param sender: the name of the node that sent the PRE-PREPARE msg """ # TODO: Check whether it is rejecting PRE-PREPARE from previous view # PRE-PREPARE should not be sent from non primary if not self.isMsgFromPrimary(pre_prepare, sender): return PP_CHECK_NOT_FROM_PRIMARY # Already has a PRE-PREPARE with same 3 phase key if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares: return PP_CHECK_DUPLICATE if not self.is_pre_prepare_time_acceptable(pre_prepare, sender): return PP_CHECK_WRONG_TIME if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo), self.__last_pp_3pc) > 0: return PP_CHECK_OLD # ignore old pre-prepare if self.nonFinalisedReqs(pre_prepare.reqIdr): return PP_CHECK_REQUEST_NOT_FINALIZED if not self.__is_next_pre_prepare(pre_prepare.viewNo, pre_prepare.ppSeqNo): return PP_CHECK_NOT_NEXT if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \ pre_prepare.poolStateRootHash != self.stateRootHash(POOL_LEDGER_ID): return PP_CHECK_INCORRECT_POOL_STATE_ROOT # BLS multi-sig: status = self._bls_bft_replica.validate_pre_prepare(pre_prepare, sender) if status is not None: return status return None
[ "def", "_can_process_pre_prepare", "(", "self", ",", "pre_prepare", ":", "PrePrepare", ",", "sender", ":", "str", ")", "->", "Optional", "[", "int", "]", ":", "# TODO: Check whether it is rejecting PRE-PREPARE from previous view", "# PRE-PREPARE should not be sent from non primary", "if", "not", "self", ".", "isMsgFromPrimary", "(", "pre_prepare", ",", "sender", ")", ":", "return", "PP_CHECK_NOT_FROM_PRIMARY", "# Already has a PRE-PREPARE with same 3 phase key", "if", "(", "pre_prepare", ".", "viewNo", ",", "pre_prepare", ".", "ppSeqNo", ")", "in", "self", ".", "prePrepares", ":", "return", "PP_CHECK_DUPLICATE", "if", "not", "self", ".", "is_pre_prepare_time_acceptable", "(", "pre_prepare", ",", "sender", ")", ":", "return", "PP_CHECK_WRONG_TIME", "if", "compare_3PC_keys", "(", "(", "pre_prepare", ".", "viewNo", ",", "pre_prepare", ".", "ppSeqNo", ")", ",", "self", ".", "__last_pp_3pc", ")", ">", "0", ":", "return", "PP_CHECK_OLD", "# ignore old pre-prepare", "if", "self", ".", "nonFinalisedReqs", "(", "pre_prepare", ".", "reqIdr", ")", ":", "return", "PP_CHECK_REQUEST_NOT_FINALIZED", "if", "not", "self", ".", "__is_next_pre_prepare", "(", "pre_prepare", ".", "viewNo", ",", "pre_prepare", ".", "ppSeqNo", ")", ":", "return", "PP_CHECK_NOT_NEXT", "if", "f", ".", "POOL_STATE_ROOT_HASH", ".", "nm", "in", "pre_prepare", "and", "pre_prepare", ".", "poolStateRootHash", "!=", "self", ".", "stateRootHash", "(", "POOL_LEDGER_ID", ")", ":", "return", "PP_CHECK_INCORRECT_POOL_STATE_ROOT", "# BLS multi-sig:", "status", "=", "self", ".", "_bls_bft_replica", ".", "validate_pre_prepare", "(", "pre_prepare", ",", "sender", ")", "if", "status", "is", "not", "None", ":", "return", "status", "return", "None" ]
Decide whether this replica is eligible to process a PRE-PREPARE. :param pre_prepare: a PRE-PREPARE msg to process :param sender: the name of the node that sent the PRE-PREPARE msg
[ "Decide", "whether", "this", "replica", "is", "eligible", "to", "process", "a", "PRE", "-", "PREPARE", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1539-L1579
234,341
hyperledger/indy-plenum
plenum/server/replica.py
Replica.addToPrePrepares
def addToPrePrepares(self, pp: PrePrepare) -> None: """ Add the specified PRE-PREPARE to this replica's list of received PRE-PREPAREs and try sending PREPARE :param pp: the PRE-PREPARE to add to the list """ key = (pp.viewNo, pp.ppSeqNo) self.prePrepares[key] = pp self.lastPrePrepareSeqNo = pp.ppSeqNo self.last_accepted_pre_prepare_time = pp.ppTime self.dequeue_prepares(*key) self.dequeue_commits(*key) self.stats.inc(TPCStat.PrePrepareRcvd) self.tryPrepare(pp)
python
def addToPrePrepares(self, pp: PrePrepare) -> None: """ Add the specified PRE-PREPARE to this replica's list of received PRE-PREPAREs and try sending PREPARE :param pp: the PRE-PREPARE to add to the list """ key = (pp.viewNo, pp.ppSeqNo) self.prePrepares[key] = pp self.lastPrePrepareSeqNo = pp.ppSeqNo self.last_accepted_pre_prepare_time = pp.ppTime self.dequeue_prepares(*key) self.dequeue_commits(*key) self.stats.inc(TPCStat.PrePrepareRcvd) self.tryPrepare(pp)
[ "def", "addToPrePrepares", "(", "self", ",", "pp", ":", "PrePrepare", ")", "->", "None", ":", "key", "=", "(", "pp", ".", "viewNo", ",", "pp", ".", "ppSeqNo", ")", "self", ".", "prePrepares", "[", "key", "]", "=", "pp", "self", ".", "lastPrePrepareSeqNo", "=", "pp", ".", "ppSeqNo", "self", ".", "last_accepted_pre_prepare_time", "=", "pp", ".", "ppTime", "self", ".", "dequeue_prepares", "(", "*", "key", ")", "self", ".", "dequeue_commits", "(", "*", "key", ")", "self", ".", "stats", ".", "inc", "(", "TPCStat", ".", "PrePrepareRcvd", ")", "self", ".", "tryPrepare", "(", "pp", ")" ]
Add the specified PRE-PREPARE to this replica's list of received PRE-PREPAREs and try sending PREPARE :param pp: the PRE-PREPARE to add to the list
[ "Add", "the", "specified", "PRE", "-", "PREPARE", "to", "this", "replica", "s", "list", "of", "received", "PRE", "-", "PREPAREs", "and", "try", "sending", "PREPARE" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1581-L1595
234,342
hyperledger/indy-plenum
plenum/server/replica.py
Replica.canPrepare
def canPrepare(self, ppReq) -> (bool, str): """ Return whether the batch of requests in the PRE-PREPARE can proceed to the PREPARE step. :param ppReq: any object with identifier and requestId attributes """ if self.has_sent_prepare(ppReq): return False, 'has already sent PREPARE for {}'.format(ppReq) return True, ''
python
def canPrepare(self, ppReq) -> (bool, str): """ Return whether the batch of requests in the PRE-PREPARE can proceed to the PREPARE step. :param ppReq: any object with identifier and requestId attributes """ if self.has_sent_prepare(ppReq): return False, 'has already sent PREPARE for {}'.format(ppReq) return True, ''
[ "def", "canPrepare", "(", "self", ",", "ppReq", ")", "->", "(", "bool", ",", "str", ")", ":", "if", "self", ".", "has_sent_prepare", "(", "ppReq", ")", ":", "return", "False", ",", "'has already sent PREPARE for {}'", ".", "format", "(", "ppReq", ")", "return", "True", ",", "''" ]
Return whether the batch of requests in the PRE-PREPARE can proceed to the PREPARE step. :param ppReq: any object with identifier and requestId attributes
[ "Return", "whether", "the", "batch", "of", "requests", "in", "the", "PRE", "-", "PREPARE", "can", "proceed", "to", "the", "PREPARE", "step", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1600-L1609
234,343
hyperledger/indy-plenum
plenum/server/replica.py
Replica.validatePrepare
def validatePrepare(self, prepare: Prepare, sender: str) -> bool: """ Return whether the PREPARE specified is valid. :param prepare: the PREPARE to validate :param sender: the name of the node that sent the PREPARE :return: True if PREPARE is valid, False otherwise """ key = (prepare.viewNo, prepare.ppSeqNo) primaryStatus = self.isPrimaryForMsg(prepare) ppReq = self.getPrePrepare(*key) # If a non primary replica and receiving a PREPARE request before a # PRE-PREPARE request, then proceed # PREPARE should not be sent from primary if self.isMsgFromPrimary(prepare, sender): raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare) # If non primary replica if primaryStatus is False: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE not received for the PREPARE, might be slow # network if not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False # If primary replica if primaryStatus is True: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE was not sent for this PREPARE, certainly # malicious behavior elif not ppReq: raise SuspiciousNode( sender, Suspicions.UNKNOWN_PR_SENT, prepare) if primaryStatus is None and not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False if prepare.digest != ppReq.digest: raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare) elif prepare.stateRootHash != ppReq.stateRootHash: raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG, prepare) elif prepare.txnRootHash != ppReq.txnRootHash: raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG, prepare) elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash: raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG, prepare) try: self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq) except Exception as ex: self.logger.warning('{} encountered exception in replica ' 'hook {} : {}'. format(self, ReplicaHooks.VALIDATE_PR, ex)) raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION, prepare) # BLS multi-sig: self._bls_bft_replica.validate_prepare(prepare, sender) return True
python
def validatePrepare(self, prepare: Prepare, sender: str) -> bool: """ Return whether the PREPARE specified is valid. :param prepare: the PREPARE to validate :param sender: the name of the node that sent the PREPARE :return: True if PREPARE is valid, False otherwise """ key = (prepare.viewNo, prepare.ppSeqNo) primaryStatus = self.isPrimaryForMsg(prepare) ppReq = self.getPrePrepare(*key) # If a non primary replica and receiving a PREPARE request before a # PRE-PREPARE request, then proceed # PREPARE should not be sent from primary if self.isMsgFromPrimary(prepare, sender): raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare) # If non primary replica if primaryStatus is False: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE not received for the PREPARE, might be slow # network if not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False # If primary replica if primaryStatus is True: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE was not sent for this PREPARE, certainly # malicious behavior elif not ppReq: raise SuspiciousNode( sender, Suspicions.UNKNOWN_PR_SENT, prepare) if primaryStatus is None and not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False if prepare.digest != ppReq.digest: raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare) elif prepare.stateRootHash != ppReq.stateRootHash: raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG, prepare) elif prepare.txnRootHash != ppReq.txnRootHash: raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG, prepare) elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash: raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG, prepare) try: self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq) except Exception as ex: self.logger.warning('{} encountered exception in replica ' 'hook {} : {}'. format(self, ReplicaHooks.VALIDATE_PR, ex)) raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION, prepare) # BLS multi-sig: self._bls_bft_replica.validate_prepare(prepare, sender) return True
[ "def", "validatePrepare", "(", "self", ",", "prepare", ":", "Prepare", ",", "sender", ":", "str", ")", "->", "bool", ":", "key", "=", "(", "prepare", ".", "viewNo", ",", "prepare", ".", "ppSeqNo", ")", "primaryStatus", "=", "self", ".", "isPrimaryForMsg", "(", "prepare", ")", "ppReq", "=", "self", ".", "getPrePrepare", "(", "*", "key", ")", "# If a non primary replica and receiving a PREPARE request before a", "# PRE-PREPARE request, then proceed", "# PREPARE should not be sent from primary", "if", "self", ".", "isMsgFromPrimary", "(", "prepare", ",", "sender", ")", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_FRM_PRIMARY", ",", "prepare", ")", "# If non primary replica", "if", "primaryStatus", "is", "False", ":", "if", "self", ".", "prepares", ".", "hasPrepareFrom", "(", "prepare", ",", "sender", ")", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "DUPLICATE_PR_SENT", ",", "prepare", ")", "# If PRE-PREPARE not received for the PREPARE, might be slow", "# network", "if", "not", "ppReq", ":", "self", ".", "enqueue_prepare", "(", "prepare", ",", "sender", ")", "self", ".", "_setup_last_ordered_for_non_master", "(", ")", "return", "False", "# If primary replica", "if", "primaryStatus", "is", "True", ":", "if", "self", ".", "prepares", ".", "hasPrepareFrom", "(", "prepare", ",", "sender", ")", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "DUPLICATE_PR_SENT", ",", "prepare", ")", "# If PRE-PREPARE was not sent for this PREPARE, certainly", "# malicious behavior", "elif", "not", "ppReq", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "UNKNOWN_PR_SENT", ",", "prepare", ")", "if", "primaryStatus", "is", "None", "and", "not", "ppReq", ":", "self", ".", "enqueue_prepare", "(", "prepare", ",", "sender", ")", "self", ".", "_setup_last_ordered_for_non_master", "(", ")", "return", "False", "if", "prepare", ".", "digest", "!=", "ppReq", ".", "digest", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_DIGEST_WRONG", ",", "prepare", ")", "elif", "prepare", ".", "stateRootHash", "!=", "ppReq", ".", "stateRootHash", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_STATE_WRONG", ",", "prepare", ")", "elif", "prepare", ".", "txnRootHash", "!=", "ppReq", ".", "txnRootHash", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_TXN_WRONG", ",", "prepare", ")", "elif", "prepare", ".", "auditTxnRootHash", "!=", "ppReq", ".", "auditTxnRootHash", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_AUDIT_TXN_ROOT_HASH_WRONG", ",", "prepare", ")", "try", ":", "self", ".", "execute_hook", "(", "ReplicaHooks", ".", "VALIDATE_PR", ",", "prepare", ",", "ppReq", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "warning", "(", "'{} encountered exception in replica '", "'hook {} : {}'", ".", "format", "(", "self", ",", "ReplicaHooks", ".", "VALIDATE_PR", ",", "ex", ")", ")", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "PR_PLUGIN_EXCEPTION", ",", "prepare", ")", "# BLS multi-sig:", "self", ".", "_bls_bft_replica", ".", "validate_prepare", "(", "prepare", ",", "sender", ")", "return", "True" ]
Return whether the PREPARE specified is valid. :param prepare: the PREPARE to validate :param sender: the name of the node that sent the PREPARE :return: True if PREPARE is valid, False otherwise
[ "Return", "whether", "the", "PREPARE", "specified", "is", "valid", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1611-L1682
234,344
hyperledger/indy-plenum
plenum/server/replica.py
Replica.addToPrepares
def addToPrepares(self, prepare: Prepare, sender: str): """ Add the specified PREPARE to this replica's list of received PREPAREs and try sending COMMIT :param prepare: the PREPARE to add to the list """ # BLS multi-sig: self._bls_bft_replica.process_prepare(prepare, sender) self.prepares.addVote(prepare, sender) self.dequeue_commits(prepare.viewNo, prepare.ppSeqNo) self.tryCommit(prepare)
python
def addToPrepares(self, prepare: Prepare, sender: str): """ Add the specified PREPARE to this replica's list of received PREPAREs and try sending COMMIT :param prepare: the PREPARE to add to the list """ # BLS multi-sig: self._bls_bft_replica.process_prepare(prepare, sender) self.prepares.addVote(prepare, sender) self.dequeue_commits(prepare.viewNo, prepare.ppSeqNo) self.tryCommit(prepare)
[ "def", "addToPrepares", "(", "self", ",", "prepare", ":", "Prepare", ",", "sender", ":", "str", ")", ":", "# BLS multi-sig:", "self", ".", "_bls_bft_replica", ".", "process_prepare", "(", "prepare", ",", "sender", ")", "self", ".", "prepares", ".", "addVote", "(", "prepare", ",", "sender", ")", "self", ".", "dequeue_commits", "(", "prepare", ".", "viewNo", ",", "prepare", ".", "ppSeqNo", ")", "self", ".", "tryCommit", "(", "prepare", ")" ]
Add the specified PREPARE to this replica's list of received PREPAREs and try sending COMMIT :param prepare: the PREPARE to add to the list
[ "Add", "the", "specified", "PREPARE", "to", "this", "replica", "s", "list", "of", "received", "PREPAREs", "and", "try", "sending", "COMMIT" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1684-L1696
234,345
hyperledger/indy-plenum
plenum/server/replica.py
Replica.canCommit
def canCommit(self, prepare: Prepare) -> (bool, str): """ Return whether the specified PREPARE can proceed to the Commit step. Decision criteria: - If this replica has got just n-f-1 PREPARE requests then commit request. - If less than n-f-1 PREPARE requests then probably there's no consensus on the request; don't commit - If more than n-f-1 then already sent COMMIT; don't commit :param prepare: the PREPARE """ quorum = self.quorums.prepare.value if not self.prepares.hasQuorum(prepare, quorum): return False, 'does not have prepare quorum for {}'.format(prepare) if self.hasCommitted(prepare): return False, 'has already sent COMMIT for {}'.format(prepare) return True, ''
python
def canCommit(self, prepare: Prepare) -> (bool, str): """ Return whether the specified PREPARE can proceed to the Commit step. Decision criteria: - If this replica has got just n-f-1 PREPARE requests then commit request. - If less than n-f-1 PREPARE requests then probably there's no consensus on the request; don't commit - If more than n-f-1 then already sent COMMIT; don't commit :param prepare: the PREPARE """ quorum = self.quorums.prepare.value if not self.prepares.hasQuorum(prepare, quorum): return False, 'does not have prepare quorum for {}'.format(prepare) if self.hasCommitted(prepare): return False, 'has already sent COMMIT for {}'.format(prepare) return True, ''
[ "def", "canCommit", "(", "self", ",", "prepare", ":", "Prepare", ")", "->", "(", "bool", ",", "str", ")", ":", "quorum", "=", "self", ".", "quorums", ".", "prepare", ".", "value", "if", "not", "self", ".", "prepares", ".", "hasQuorum", "(", "prepare", ",", "quorum", ")", ":", "return", "False", ",", "'does not have prepare quorum for {}'", ".", "format", "(", "prepare", ")", "if", "self", ".", "hasCommitted", "(", "prepare", ")", ":", "return", "False", ",", "'has already sent COMMIT for {}'", ".", "format", "(", "prepare", ")", "return", "True", ",", "''" ]
Return whether the specified PREPARE can proceed to the Commit step. Decision criteria: - If this replica has got just n-f-1 PREPARE requests then commit request. - If less than n-f-1 PREPARE requests then probably there's no consensus on the request; don't commit - If more than n-f-1 then already sent COMMIT; don't commit :param prepare: the PREPARE
[ "Return", "whether", "the", "specified", "PREPARE", "can", "proceed", "to", "the", "Commit", "step", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1740-L1759
234,346
hyperledger/indy-plenum
plenum/server/replica.py
Replica.validateCommit
def validateCommit(self, commit: Commit, sender: str) -> bool: """ Return whether the COMMIT specified is valid. :param commit: the COMMIT to validate :return: True if `request` is valid, False otherwise """ key = (commit.viewNo, commit.ppSeqNo) if not self.has_prepared(key): self.enqueue_commit(commit, sender) return False if self.commits.hasCommitFrom(commit, sender): raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit) # BLS multi-sig: pre_prepare = self.getPrePrepare(commit.viewNo, commit.ppSeqNo) why_not = self._bls_bft_replica.validate_commit(commit, sender, pre_prepare) if why_not == BlsBftReplica.CM_BLS_SIG_WRONG: self.logger.warning("{} discard Commit message from " "{}:{}".format(self, sender, commit)) raise SuspiciousNode(sender, Suspicions.CM_BLS_SIG_WRONG, commit) elif why_not is not None: self.logger.warning("Unknown error code returned for bls commit " "validation {}".format(why_not)) return True
python
def validateCommit(self, commit: Commit, sender: str) -> bool: """ Return whether the COMMIT specified is valid. :param commit: the COMMIT to validate :return: True if `request` is valid, False otherwise """ key = (commit.viewNo, commit.ppSeqNo) if not self.has_prepared(key): self.enqueue_commit(commit, sender) return False if self.commits.hasCommitFrom(commit, sender): raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit) # BLS multi-sig: pre_prepare = self.getPrePrepare(commit.viewNo, commit.ppSeqNo) why_not = self._bls_bft_replica.validate_commit(commit, sender, pre_prepare) if why_not == BlsBftReplica.CM_BLS_SIG_WRONG: self.logger.warning("{} discard Commit message from " "{}:{}".format(self, sender, commit)) raise SuspiciousNode(sender, Suspicions.CM_BLS_SIG_WRONG, commit) elif why_not is not None: self.logger.warning("Unknown error code returned for bls commit " "validation {}".format(why_not)) return True
[ "def", "validateCommit", "(", "self", ",", "commit", ":", "Commit", ",", "sender", ":", "str", ")", "->", "bool", ":", "key", "=", "(", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", ")", "if", "not", "self", ".", "has_prepared", "(", "key", ")", ":", "self", ".", "enqueue_commit", "(", "commit", ",", "sender", ")", "return", "False", "if", "self", ".", "commits", ".", "hasCommitFrom", "(", "commit", ",", "sender", ")", ":", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "DUPLICATE_CM_SENT", ",", "commit", ")", "# BLS multi-sig:", "pre_prepare", "=", "self", ".", "getPrePrepare", "(", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", ")", "why_not", "=", "self", ".", "_bls_bft_replica", ".", "validate_commit", "(", "commit", ",", "sender", ",", "pre_prepare", ")", "if", "why_not", "==", "BlsBftReplica", ".", "CM_BLS_SIG_WRONG", ":", "self", ".", "logger", ".", "warning", "(", "\"{} discard Commit message from \"", "\"{}:{}\"", ".", "format", "(", "self", ",", "sender", ",", "commit", ")", ")", "raise", "SuspiciousNode", "(", "sender", ",", "Suspicions", ".", "CM_BLS_SIG_WRONG", ",", "commit", ")", "elif", "why_not", "is", "not", "None", ":", "self", ".", "logger", ".", "warning", "(", "\"Unknown error code returned for bls commit \"", "\"validation {}\"", ".", "format", "(", "why_not", ")", ")", "return", "True" ]
Return whether the COMMIT specified is valid. :param commit: the COMMIT to validate :return: True if `request` is valid, False otherwise
[ "Return", "whether", "the", "COMMIT", "specified", "is", "valid", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1761-L1790
234,347
hyperledger/indy-plenum
plenum/server/replica.py
Replica.addToCommits
def addToCommits(self, commit: Commit, sender: str): """ Add the specified COMMIT to this replica's list of received commit requests. :param commit: the COMMIT to add to the list :param sender: the name of the node that sent the COMMIT """ # BLS multi-sig: self._bls_bft_replica.process_commit(commit, sender) self.commits.addVote(commit, sender) self.tryOrder(commit)
python
def addToCommits(self, commit: Commit, sender: str): """ Add the specified COMMIT to this replica's list of received commit requests. :param commit: the COMMIT to add to the list :param sender: the name of the node that sent the COMMIT """ # BLS multi-sig: self._bls_bft_replica.process_commit(commit, sender) self.commits.addVote(commit, sender) self.tryOrder(commit)
[ "def", "addToCommits", "(", "self", ",", "commit", ":", "Commit", ",", "sender", ":", "str", ")", ":", "# BLS multi-sig:", "self", ".", "_bls_bft_replica", ".", "process_commit", "(", "commit", ",", "sender", ")", "self", ".", "commits", ".", "addVote", "(", "commit", ",", "sender", ")", "self", ".", "tryOrder", "(", "commit", ")" ]
Add the specified COMMIT to this replica's list of received commit requests. :param commit: the COMMIT to add to the list :param sender: the name of the node that sent the COMMIT
[ "Add", "the", "specified", "COMMIT", "to", "this", "replica", "s", "list", "of", "received", "commit", "requests", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1792-L1804
234,348
hyperledger/indy-plenum
plenum/server/replica.py
Replica.canOrder
def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]: """ Return whether the specified commitRequest can be returned to the node. Decision criteria: - If have got just n-f Commit requests then return request to node - If less than n-f of commit requests then probably don't have consensus on the request; don't return request to node - If more than n-f then already returned to node; don't return request to node :param commit: the COMMIT """ quorum = self.quorums.commit.value if not self.commits.hasQuorum(commit, quorum): return False, "no quorum ({}): {} commits where f is {}". \ format(quorum, commit, self.f) key = (commit.viewNo, commit.ppSeqNo) if self.has_already_ordered(*key): return False, "already ordered" if commit.ppSeqNo > 1 and not self.all_prev_ordered(commit): viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo if viewNo not in self.stashed_out_of_order_commits: self.stashed_out_of_order_commits[viewNo] = {} self.stashed_out_of_order_commits[viewNo][ppSeqNo] = commit self.startRepeating(self.process_stashed_out_of_order_commits, self.config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL) return False, "stashing {} since out of order". \ format(commit) return True, None
python
def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]: """ Return whether the specified commitRequest can be returned to the node. Decision criteria: - If have got just n-f Commit requests then return request to node - If less than n-f of commit requests then probably don't have consensus on the request; don't return request to node - If more than n-f then already returned to node; don't return request to node :param commit: the COMMIT """ quorum = self.quorums.commit.value if not self.commits.hasQuorum(commit, quorum): return False, "no quorum ({}): {} commits where f is {}". \ format(quorum, commit, self.f) key = (commit.viewNo, commit.ppSeqNo) if self.has_already_ordered(*key): return False, "already ordered" if commit.ppSeqNo > 1 and not self.all_prev_ordered(commit): viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo if viewNo not in self.stashed_out_of_order_commits: self.stashed_out_of_order_commits[viewNo] = {} self.stashed_out_of_order_commits[viewNo][ppSeqNo] = commit self.startRepeating(self.process_stashed_out_of_order_commits, self.config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL) return False, "stashing {} since out of order". \ format(commit) return True, None
[ "def", "canOrder", "(", "self", ",", "commit", ":", "Commit", ")", "->", "Tuple", "[", "bool", ",", "Optional", "[", "str", "]", "]", ":", "quorum", "=", "self", ".", "quorums", ".", "commit", ".", "value", "if", "not", "self", ".", "commits", ".", "hasQuorum", "(", "commit", ",", "quorum", ")", ":", "return", "False", ",", "\"no quorum ({}): {} commits where f is {}\"", ".", "format", "(", "quorum", ",", "commit", ",", "self", ".", "f", ")", "key", "=", "(", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", ")", "if", "self", ".", "has_already_ordered", "(", "*", "key", ")", ":", "return", "False", ",", "\"already ordered\"", "if", "commit", ".", "ppSeqNo", ">", "1", "and", "not", "self", ".", "all_prev_ordered", "(", "commit", ")", ":", "viewNo", ",", "ppSeqNo", "=", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", "if", "viewNo", "not", "in", "self", ".", "stashed_out_of_order_commits", ":", "self", ".", "stashed_out_of_order_commits", "[", "viewNo", "]", "=", "{", "}", "self", ".", "stashed_out_of_order_commits", "[", "viewNo", "]", "[", "ppSeqNo", "]", "=", "commit", "self", ".", "startRepeating", "(", "self", ".", "process_stashed_out_of_order_commits", ",", "self", ".", "config", ".", "PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL", ")", "return", "False", ",", "\"stashing {} since out of order\"", ".", "format", "(", "commit", ")", "return", "True", ",", "None" ]
Return whether the specified commitRequest can be returned to the node. Decision criteria: - If have got just n-f Commit requests then return request to node - If less than n-f of commit requests then probably don't have consensus on the request; don't return request to node - If more than n-f then already returned to node; don't return request to node :param commit: the COMMIT
[ "Return", "whether", "the", "specified", "commitRequest", "can", "be", "returned", "to", "the", "node", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1806-L1839
234,349
hyperledger/indy-plenum
plenum/server/replica.py
Replica.all_prev_ordered
def all_prev_ordered(self, commit: Commit): """ Return True if all previous COMMITs have been ordered """ # TODO: This method does a lot of work, choose correct data # structures to make it efficient. viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo if self.last_ordered_3pc == (viewNo, ppSeqNo - 1): # Last ordered was in same view as this COMMIT return True # if some PREPAREs/COMMITs were completely missed in the same view toCheck = set() toCheck.update(set(self.sentPrePrepares.keys())) toCheck.update(set(self.prePrepares.keys())) toCheck.update(set(self.prepares.keys())) toCheck.update(set(self.commits.keys())) for (v, p) in toCheck: if v < viewNo and (v, p) not in self.ordered: # Have commits from previous view that are unordered. return False if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered: # If unordered commits are found with lower ppSeqNo then this # cannot be ordered. return False return True
python
def all_prev_ordered(self, commit: Commit): """ Return True if all previous COMMITs have been ordered """ # TODO: This method does a lot of work, choose correct data # structures to make it efficient. viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo if self.last_ordered_3pc == (viewNo, ppSeqNo - 1): # Last ordered was in same view as this COMMIT return True # if some PREPAREs/COMMITs were completely missed in the same view toCheck = set() toCheck.update(set(self.sentPrePrepares.keys())) toCheck.update(set(self.prePrepares.keys())) toCheck.update(set(self.prepares.keys())) toCheck.update(set(self.commits.keys())) for (v, p) in toCheck: if v < viewNo and (v, p) not in self.ordered: # Have commits from previous view that are unordered. return False if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered: # If unordered commits are found with lower ppSeqNo then this # cannot be ordered. return False return True
[ "def", "all_prev_ordered", "(", "self", ",", "commit", ":", "Commit", ")", ":", "# TODO: This method does a lot of work, choose correct data", "# structures to make it efficient.", "viewNo", ",", "ppSeqNo", "=", "commit", ".", "viewNo", ",", "commit", ".", "ppSeqNo", "if", "self", ".", "last_ordered_3pc", "==", "(", "viewNo", ",", "ppSeqNo", "-", "1", ")", ":", "# Last ordered was in same view as this COMMIT", "return", "True", "# if some PREPAREs/COMMITs were completely missed in the same view", "toCheck", "=", "set", "(", ")", "toCheck", ".", "update", "(", "set", "(", "self", ".", "sentPrePrepares", ".", "keys", "(", ")", ")", ")", "toCheck", ".", "update", "(", "set", "(", "self", ".", "prePrepares", ".", "keys", "(", ")", ")", ")", "toCheck", ".", "update", "(", "set", "(", "self", ".", "prepares", ".", "keys", "(", ")", ")", ")", "toCheck", ".", "update", "(", "set", "(", "self", ".", "commits", ".", "keys", "(", ")", ")", ")", "for", "(", "v", ",", "p", ")", "in", "toCheck", ":", "if", "v", "<", "viewNo", "and", "(", "v", ",", "p", ")", "not", "in", "self", ".", "ordered", ":", "# Have commits from previous view that are unordered.", "return", "False", "if", "v", "==", "viewNo", "and", "p", "<", "ppSeqNo", "and", "(", "v", ",", "p", ")", "not", "in", "self", ".", "ordered", ":", "# If unordered commits are found with lower ppSeqNo then this", "# cannot be ordered.", "return", "False", "return", "True" ]
Return True if all previous COMMITs have been ordered
[ "Return", "True", "if", "all", "previous", "COMMITs", "have", "been", "ordered" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1841-L1869
234,350
hyperledger/indy-plenum
plenum/server/replica.py
Replica.process_checkpoint
def process_checkpoint(self, msg: Checkpoint, sender: str) -> bool: """ Process checkpoint messages :return: whether processed (True) or stashed (False) """ self.logger.info('{} processing checkpoint {} from {}'.format(self, msg, sender)) result, reason = self.validator.validate_checkpoint_msg(msg) if result == DISCARD: self.discard(msg, "{} discard message {} from {} " "with the reason: {}".format(self, msg, sender, reason), self.logger.trace) elif result == PROCESS: self._do_process_checkpoint(msg, sender) else: self.logger.debug("{} stashing checkpoint message {} with " "the reason: {}".format(self, msg, reason)) self.stasher.stash((msg, sender), result) return False return True
python
def process_checkpoint(self, msg: Checkpoint, sender: str) -> bool: """ Process checkpoint messages :return: whether processed (True) or stashed (False) """ self.logger.info('{} processing checkpoint {} from {}'.format(self, msg, sender)) result, reason = self.validator.validate_checkpoint_msg(msg) if result == DISCARD: self.discard(msg, "{} discard message {} from {} " "with the reason: {}".format(self, msg, sender, reason), self.logger.trace) elif result == PROCESS: self._do_process_checkpoint(msg, sender) else: self.logger.debug("{} stashing checkpoint message {} with " "the reason: {}".format(self, msg, reason)) self.stasher.stash((msg, sender), result) return False return True
[ "def", "process_checkpoint", "(", "self", ",", "msg", ":", "Checkpoint", ",", "sender", ":", "str", ")", "->", "bool", ":", "self", ".", "logger", ".", "info", "(", "'{} processing checkpoint {} from {}'", ".", "format", "(", "self", ",", "msg", ",", "sender", ")", ")", "result", ",", "reason", "=", "self", ".", "validator", ".", "validate_checkpoint_msg", "(", "msg", ")", "if", "result", "==", "DISCARD", ":", "self", ".", "discard", "(", "msg", ",", "\"{} discard message {} from {} \"", "\"with the reason: {}\"", ".", "format", "(", "self", ",", "msg", ",", "sender", ",", "reason", ")", ",", "self", ".", "logger", ".", "trace", ")", "elif", "result", "==", "PROCESS", ":", "self", ".", "_do_process_checkpoint", "(", "msg", ",", "sender", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"{} stashing checkpoint message {} with \"", "\"the reason: {}\"", ".", "format", "(", "self", ",", "msg", ",", "reason", ")", ")", "self", ".", "stasher", ".", "stash", "(", "(", "msg", ",", "sender", ")", ",", "result", ")", "return", "False", "return", "True" ]
Process checkpoint messages :return: whether processed (True) or stashed (False)
[ "Process", "checkpoint", "messages" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2050-L2069
234,351
hyperledger/indy-plenum
plenum/server/replica.py
Replica._process_stashed_pre_prepare_for_time_if_possible
def _process_stashed_pre_prepare_for_time_if_possible( self, key: Tuple[int, int]): """ Check if any PRE-PREPAREs that were stashed since their time was not acceptable, can now be accepted since enough PREPAREs are received """ self.logger.debug('{} going to process stashed PRE-PREPAREs with ' 'incorrect times'.format(self)) q = self.quorums.f if len(self.preparesWaitingForPrePrepare[key]) > q: times = [pr.ppTime for (pr, _) in self.preparesWaitingForPrePrepare[key]] most_common_time, freq = mostCommonElement(times) if self.quorums.timestamp.is_reached(freq): self.logger.debug('{} found sufficient PREPAREs for the ' 'PRE-PREPARE{}'.format(self, key)) stashed_pp = self.pre_prepares_stashed_for_incorrect_time pp, sender, done = stashed_pp[key] if done: self.logger.debug('{} already processed PRE-PREPARE{}'.format(self, key)) return True # True is set since that will indicate to `is_pre_prepare_time_acceptable` # that sufficient PREPAREs are received stashed_pp[key] = (pp, sender, True) self.process_three_phase_msg(pp, sender) return True return False
python
def _process_stashed_pre_prepare_for_time_if_possible( self, key: Tuple[int, int]): """ Check if any PRE-PREPAREs that were stashed since their time was not acceptable, can now be accepted since enough PREPAREs are received """ self.logger.debug('{} going to process stashed PRE-PREPAREs with ' 'incorrect times'.format(self)) q = self.quorums.f if len(self.preparesWaitingForPrePrepare[key]) > q: times = [pr.ppTime for (pr, _) in self.preparesWaitingForPrePrepare[key]] most_common_time, freq = mostCommonElement(times) if self.quorums.timestamp.is_reached(freq): self.logger.debug('{} found sufficient PREPAREs for the ' 'PRE-PREPARE{}'.format(self, key)) stashed_pp = self.pre_prepares_stashed_for_incorrect_time pp, sender, done = stashed_pp[key] if done: self.logger.debug('{} already processed PRE-PREPARE{}'.format(self, key)) return True # True is set since that will indicate to `is_pre_prepare_time_acceptable` # that sufficient PREPAREs are received stashed_pp[key] = (pp, sender, True) self.process_three_phase_msg(pp, sender) return True return False
[ "def", "_process_stashed_pre_prepare_for_time_if_possible", "(", "self", ",", "key", ":", "Tuple", "[", "int", ",", "int", "]", ")", ":", "self", ".", "logger", ".", "debug", "(", "'{} going to process stashed PRE-PREPAREs with '", "'incorrect times'", ".", "format", "(", "self", ")", ")", "q", "=", "self", ".", "quorums", ".", "f", "if", "len", "(", "self", ".", "preparesWaitingForPrePrepare", "[", "key", "]", ")", ">", "q", ":", "times", "=", "[", "pr", ".", "ppTime", "for", "(", "pr", ",", "_", ")", "in", "self", ".", "preparesWaitingForPrePrepare", "[", "key", "]", "]", "most_common_time", ",", "freq", "=", "mostCommonElement", "(", "times", ")", "if", "self", ".", "quorums", ".", "timestamp", ".", "is_reached", "(", "freq", ")", ":", "self", ".", "logger", ".", "debug", "(", "'{} found sufficient PREPAREs for the '", "'PRE-PREPARE{}'", ".", "format", "(", "self", ",", "key", ")", ")", "stashed_pp", "=", "self", ".", "pre_prepares_stashed_for_incorrect_time", "pp", ",", "sender", ",", "done", "=", "stashed_pp", "[", "key", "]", "if", "done", ":", "self", ".", "logger", ".", "debug", "(", "'{} already processed PRE-PREPARE{}'", ".", "format", "(", "self", ",", "key", ")", ")", "return", "True", "# True is set since that will indicate to `is_pre_prepare_time_acceptable`", "# that sufficient PREPAREs are received", "stashed_pp", "[", "key", "]", "=", "(", "pp", ",", "sender", ",", "True", ")", "self", ".", "process_three_phase_msg", "(", "pp", ",", "sender", ")", "return", "True", "return", "False" ]
Check if any PRE-PREPAREs that were stashed since their time was not acceptable, can now be accepted since enough PREPAREs are received
[ "Check", "if", "any", "PRE", "-", "PREPAREs", "that", "were", "stashed", "since", "their", "time", "was", "not", "acceptable", "can", "now", "be", "accepted", "since", "enough", "PREPAREs", "are", "received" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2742-L2768
234,352
hyperledger/indy-plenum
plenum/server/replica.py
Replica._remove_till_caught_up_3pc
def _remove_till_caught_up_3pc(self, last_caught_up_3PC): """ Remove any 3 phase messages till the last ordered key and also remove any corresponding request keys """ outdated_pre_prepares = {} for key, pp in self.prePrepares.items(): if compare_3PC_keys(key, last_caught_up_3PC) >= 0: outdated_pre_prepares[key] = pp for key, pp in self.sentPrePrepares.items(): if compare_3PC_keys(key, last_caught_up_3PC) >= 0: outdated_pre_prepares[key] = pp self.logger.trace('{} going to remove messages for {} 3PC keys'.format( self, len(outdated_pre_prepares))) for key, pp in outdated_pre_prepares.items(): self.batches.pop(key, None) self.sentPrePrepares.pop(key, None) self.prePrepares.pop(key, None) self.prepares.pop(key, None) self.commits.pop(key, None) self._discard_ordered_req_keys(pp)
python
def _remove_till_caught_up_3pc(self, last_caught_up_3PC): """ Remove any 3 phase messages till the last ordered key and also remove any corresponding request keys """ outdated_pre_prepares = {} for key, pp in self.prePrepares.items(): if compare_3PC_keys(key, last_caught_up_3PC) >= 0: outdated_pre_prepares[key] = pp for key, pp in self.sentPrePrepares.items(): if compare_3PC_keys(key, last_caught_up_3PC) >= 0: outdated_pre_prepares[key] = pp self.logger.trace('{} going to remove messages for {} 3PC keys'.format( self, len(outdated_pre_prepares))) for key, pp in outdated_pre_prepares.items(): self.batches.pop(key, None) self.sentPrePrepares.pop(key, None) self.prePrepares.pop(key, None) self.prepares.pop(key, None) self.commits.pop(key, None) self._discard_ordered_req_keys(pp)
[ "def", "_remove_till_caught_up_3pc", "(", "self", ",", "last_caught_up_3PC", ")", ":", "outdated_pre_prepares", "=", "{", "}", "for", "key", ",", "pp", "in", "self", ".", "prePrepares", ".", "items", "(", ")", ":", "if", "compare_3PC_keys", "(", "key", ",", "last_caught_up_3PC", ")", ">=", "0", ":", "outdated_pre_prepares", "[", "key", "]", "=", "pp", "for", "key", ",", "pp", "in", "self", ".", "sentPrePrepares", ".", "items", "(", ")", ":", "if", "compare_3PC_keys", "(", "key", ",", "last_caught_up_3PC", ")", ">=", "0", ":", "outdated_pre_prepares", "[", "key", "]", "=", "pp", "self", ".", "logger", ".", "trace", "(", "'{} going to remove messages for {} 3PC keys'", ".", "format", "(", "self", ",", "len", "(", "outdated_pre_prepares", ")", ")", ")", "for", "key", ",", "pp", "in", "outdated_pre_prepares", ".", "items", "(", ")", ":", "self", ".", "batches", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "sentPrePrepares", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "prePrepares", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "prepares", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "commits", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "_discard_ordered_req_keys", "(", "pp", ")" ]
Remove any 3 phase messages till the last ordered key and also remove any corresponding request keys
[ "Remove", "any", "3", "phase", "messages", "till", "the", "last", "ordered", "key", "and", "also", "remove", "any", "corresponding", "request", "keys" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2843-L2865
234,353
hyperledger/indy-plenum
plenum/server/replica.py
Replica._remove_ordered_from_queue
def _remove_ordered_from_queue(self, last_caught_up_3PC=None): """ Remove any Ordered that the replica might be sending to node which is less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is passed else remove all ordered, needed in catchup """ to_remove = [] for i, msg in enumerate(self.outBox): if isinstance(msg, Ordered) and \ (not last_caught_up_3PC or compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0): to_remove.append(i) self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove))) # Removing Ordered from queue but returning `Ordered` in order that # they should be processed. removed = [] for i in reversed(to_remove): removed.insert(0, self.outBox[i]) del self.outBox[i] return removed
python
def _remove_ordered_from_queue(self, last_caught_up_3PC=None): """ Remove any Ordered that the replica might be sending to node which is less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is passed else remove all ordered, needed in catchup """ to_remove = [] for i, msg in enumerate(self.outBox): if isinstance(msg, Ordered) and \ (not last_caught_up_3PC or compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0): to_remove.append(i) self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove))) # Removing Ordered from queue but returning `Ordered` in order that # they should be processed. removed = [] for i in reversed(to_remove): removed.insert(0, self.outBox[i]) del self.outBox[i] return removed
[ "def", "_remove_ordered_from_queue", "(", "self", ",", "last_caught_up_3PC", "=", "None", ")", ":", "to_remove", "=", "[", "]", "for", "i", ",", "msg", "in", "enumerate", "(", "self", ".", "outBox", ")", ":", "if", "isinstance", "(", "msg", ",", "Ordered", ")", "and", "(", "not", "last_caught_up_3PC", "or", "compare_3PC_keys", "(", "(", "msg", ".", "viewNo", ",", "msg", ".", "ppSeqNo", ")", ",", "last_caught_up_3PC", ")", ">=", "0", ")", ":", "to_remove", ".", "append", "(", "i", ")", "self", ".", "logger", ".", "trace", "(", "'{} going to remove {} Ordered messages from outbox'", ".", "format", "(", "self", ",", "len", "(", "to_remove", ")", ")", ")", "# Removing Ordered from queue but returning `Ordered` in order that", "# they should be processed.", "removed", "=", "[", "]", "for", "i", "in", "reversed", "(", "to_remove", ")", ":", "removed", ".", "insert", "(", "0", ",", "self", ".", "outBox", "[", "i", "]", ")", "del", "self", ".", "outBox", "[", "i", "]", "return", "removed" ]
Remove any Ordered that the replica might be sending to node which is less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is passed else remove all ordered, needed in catchup
[ "Remove", "any", "Ordered", "that", "the", "replica", "might", "be", "sending", "to", "node", "which", "is", "less", "than", "or", "equal", "to", "last_caught_up_3PC", "if", "last_caught_up_3PC", "is", "passed", "else", "remove", "all", "ordered", "needed", "in", "catchup" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2867-L2888
234,354
hyperledger/indy-plenum
plenum/server/replica.py
Replica._remove_stashed_checkpoints
def _remove_stashed_checkpoints(self, till_3pc_key=None): """ Remove stashed received checkpoints up to `till_3pc_key` if provided, otherwise remove all stashed received checkpoints """ if till_3pc_key is None: self.stashedRecvdCheckpoints.clear() self.logger.info('{} removing all stashed checkpoints'.format(self)) return for view_no in list(self.stashedRecvdCheckpoints.keys()): if view_no < till_3pc_key[0]: self.logger.info('{} removing stashed checkpoints for view {}'.format(self, view_no)) del self.stashedRecvdCheckpoints[view_no] elif view_no == till_3pc_key[0]: for (s, e) in list(self.stashedRecvdCheckpoints[view_no].keys()): if e <= till_3pc_key[1]: self.logger.info('{} removing stashed checkpoints: ' 'viewNo={}, seqNoStart={}, seqNoEnd={}'. format(self, view_no, s, e)) del self.stashedRecvdCheckpoints[view_no][(s, e)] if len(self.stashedRecvdCheckpoints[view_no]) == 0: del self.stashedRecvdCheckpoints[view_no]
python
def _remove_stashed_checkpoints(self, till_3pc_key=None): """ Remove stashed received checkpoints up to `till_3pc_key` if provided, otherwise remove all stashed received checkpoints """ if till_3pc_key is None: self.stashedRecvdCheckpoints.clear() self.logger.info('{} removing all stashed checkpoints'.format(self)) return for view_no in list(self.stashedRecvdCheckpoints.keys()): if view_no < till_3pc_key[0]: self.logger.info('{} removing stashed checkpoints for view {}'.format(self, view_no)) del self.stashedRecvdCheckpoints[view_no] elif view_no == till_3pc_key[0]: for (s, e) in list(self.stashedRecvdCheckpoints[view_no].keys()): if e <= till_3pc_key[1]: self.logger.info('{} removing stashed checkpoints: ' 'viewNo={}, seqNoStart={}, seqNoEnd={}'. format(self, view_no, s, e)) del self.stashedRecvdCheckpoints[view_no][(s, e)] if len(self.stashedRecvdCheckpoints[view_no]) == 0: del self.stashedRecvdCheckpoints[view_no]
[ "def", "_remove_stashed_checkpoints", "(", "self", ",", "till_3pc_key", "=", "None", ")", ":", "if", "till_3pc_key", "is", "None", ":", "self", ".", "stashedRecvdCheckpoints", ".", "clear", "(", ")", "self", ".", "logger", ".", "info", "(", "'{} removing all stashed checkpoints'", ".", "format", "(", "self", ")", ")", "return", "for", "view_no", "in", "list", "(", "self", ".", "stashedRecvdCheckpoints", ".", "keys", "(", ")", ")", ":", "if", "view_no", "<", "till_3pc_key", "[", "0", "]", ":", "self", ".", "logger", ".", "info", "(", "'{} removing stashed checkpoints for view {}'", ".", "format", "(", "self", ",", "view_no", ")", ")", "del", "self", ".", "stashedRecvdCheckpoints", "[", "view_no", "]", "elif", "view_no", "==", "till_3pc_key", "[", "0", "]", ":", "for", "(", "s", ",", "e", ")", "in", "list", "(", "self", ".", "stashedRecvdCheckpoints", "[", "view_no", "]", ".", "keys", "(", ")", ")", ":", "if", "e", "<=", "till_3pc_key", "[", "1", "]", ":", "self", ".", "logger", ".", "info", "(", "'{} removing stashed checkpoints: '", "'viewNo={}, seqNoStart={}, seqNoEnd={}'", ".", "format", "(", "self", ",", "view_no", ",", "s", ",", "e", ")", ")", "del", "self", ".", "stashedRecvdCheckpoints", "[", "view_no", "]", "[", "(", "s", ",", "e", ")", "]", "if", "len", "(", "self", ".", "stashedRecvdCheckpoints", "[", "view_no", "]", ")", "==", "0", ":", "del", "self", ".", "stashedRecvdCheckpoints", "[", "view_no", "]" ]
Remove stashed received checkpoints up to `till_3pc_key` if provided, otherwise remove all stashed received checkpoints
[ "Remove", "stashed", "received", "checkpoints", "up", "to", "till_3pc_key", "if", "provided", "otherwise", "remove", "all", "stashed", "received", "checkpoints" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2890-L2914
234,355
hyperledger/indy-plenum
stp_core/network/util.py
checkPortAvailable
def checkPortAvailable(ha): """Checks whether the given port is available""" # Not sure why OS would allow binding to one type and not other. # Checking for port available for TCP and UDP. sockTypes = (socket.SOCK_DGRAM, socket.SOCK_STREAM) for typ in sockTypes: sock = socket.socket(socket.AF_INET, typ) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(ha) if typ == socket.SOCK_STREAM: l_onoff = 1 l_linger = 0 sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', l_onoff, l_linger)) except OSError as exc: if exc.errno in [ errno.EADDRINUSE, errno.EADDRNOTAVAIL, WS_SOCKET_BIND_ERROR_ALREADY_IN_USE, WS_SOCKET_BIND_ERROR_NOT_AVAILABLE ]: raise PortNotAvailable(ha) else: raise exc finally: sock.close()
python
def checkPortAvailable(ha): """Checks whether the given port is available""" # Not sure why OS would allow binding to one type and not other. # Checking for port available for TCP and UDP. sockTypes = (socket.SOCK_DGRAM, socket.SOCK_STREAM) for typ in sockTypes: sock = socket.socket(socket.AF_INET, typ) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(ha) if typ == socket.SOCK_STREAM: l_onoff = 1 l_linger = 0 sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', l_onoff, l_linger)) except OSError as exc: if exc.errno in [ errno.EADDRINUSE, errno.EADDRNOTAVAIL, WS_SOCKET_BIND_ERROR_ALREADY_IN_USE, WS_SOCKET_BIND_ERROR_NOT_AVAILABLE ]: raise PortNotAvailable(ha) else: raise exc finally: sock.close()
[ "def", "checkPortAvailable", "(", "ha", ")", ":", "# Not sure why OS would allow binding to one type and not other.", "# Checking for port available for TCP and UDP.", "sockTypes", "=", "(", "socket", ".", "SOCK_DGRAM", ",", "socket", ".", "SOCK_STREAM", ")", "for", "typ", "in", "sockTypes", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "typ", ")", "try", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "sock", ".", "bind", "(", "ha", ")", "if", "typ", "==", "socket", ".", "SOCK_STREAM", ":", "l_onoff", "=", "1", "l_linger", "=", "0", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_LINGER", ",", "struct", ".", "pack", "(", "'ii'", ",", "l_onoff", ",", "l_linger", ")", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "in", "[", "errno", ".", "EADDRINUSE", ",", "errno", ".", "EADDRNOTAVAIL", ",", "WS_SOCKET_BIND_ERROR_ALREADY_IN_USE", ",", "WS_SOCKET_BIND_ERROR_NOT_AVAILABLE", "]", ":", "raise", "PortNotAvailable", "(", "ha", ")", "else", ":", "raise", "exc", "finally", ":", "sock", ".", "close", "(", ")" ]
Checks whether the given port is available
[ "Checks", "whether", "the", "given", "port", "is", "available" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/util.py#L19-L44
234,356
hyperledger/indy-plenum
stp_core/network/util.py
evenCompare
def evenCompare(a: str, b: str) -> bool: """ A deterministic but more evenly distributed comparator than simple alphabetical. Useful when comparing consecutive strings and an even distribution is needed. Provides an even chance of returning true as often as false """ ab = a.encode('utf-8') bb = b.encode('utf-8') ac = crypto_hash_sha256(ab) bc = crypto_hash_sha256(bb) return ac < bc
python
def evenCompare(a: str, b: str) -> bool: """ A deterministic but more evenly distributed comparator than simple alphabetical. Useful when comparing consecutive strings and an even distribution is needed. Provides an even chance of returning true as often as false """ ab = a.encode('utf-8') bb = b.encode('utf-8') ac = crypto_hash_sha256(ab) bc = crypto_hash_sha256(bb) return ac < bc
[ "def", "evenCompare", "(", "a", ":", "str", ",", "b", ":", "str", ")", "->", "bool", ":", "ab", "=", "a", ".", "encode", "(", "'utf-8'", ")", "bb", "=", "b", ".", "encode", "(", "'utf-8'", ")", "ac", "=", "crypto_hash_sha256", "(", "ab", ")", "bc", "=", "crypto_hash_sha256", "(", "bb", ")", "return", "ac", "<", "bc" ]
A deterministic but more evenly distributed comparator than simple alphabetical. Useful when comparing consecutive strings and an even distribution is needed. Provides an even chance of returning true as often as false
[ "A", "deterministic", "but", "more", "evenly", "distributed", "comparator", "than", "simple", "alphabetical", ".", "Useful", "when", "comparing", "consecutive", "strings", "and", "an", "even", "distribution", "is", "needed", ".", "Provides", "an", "even", "chance", "of", "returning", "true", "as", "often", "as", "false" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/util.py#L47-L57
234,357
kylejusticemagnuson/pyti
pyti/keltner_bands.py
center_band
def center_band(close_data, high_data, low_data, period): """ Center Band. Formula: CB = SMA(TP) """ tp = typical_price(close_data, high_data, low_data) cb = sma(tp, period) return cb
python
def center_band(close_data, high_data, low_data, period): """ Center Band. Formula: CB = SMA(TP) """ tp = typical_price(close_data, high_data, low_data) cb = sma(tp, period) return cb
[ "def", "center_band", "(", "close_data", ",", "high_data", ",", "low_data", ",", "period", ")", ":", "tp", "=", "typical_price", "(", "close_data", ",", "high_data", ",", "low_data", ")", "cb", "=", "sma", "(", "tp", ",", "period", ")", "return", "cb" ]
Center Band. Formula: CB = SMA(TP)
[ "Center", "Band", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/keltner_bands.py#L23-L32
234,358
kylejusticemagnuson/pyti
pyti/simple_moving_average.py
simple_moving_average
def simple_moving_average(data, period): """ Simple Moving Average. Formula: SUM(data / N) """ catch_errors.check_for_period_error(data, period) # Mean of Empty Slice RuntimeWarning doesn't affect output so it is # supressed with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))] sma = fill_for_noncomputable_vals(data, sma) return sma
python
def simple_moving_average(data, period): """ Simple Moving Average. Formula: SUM(data / N) """ catch_errors.check_for_period_error(data, period) # Mean of Empty Slice RuntimeWarning doesn't affect output so it is # supressed with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))] sma = fill_for_noncomputable_vals(data, sma) return sma
[ "def", "simple_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "# Mean of Empty Slice RuntimeWarning doesn't affect output so it is", "# supressed", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "sma", "=", "[", "np", ".", "mean", "(", "data", "[", "idx", "-", "(", "period", "-", "1", ")", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "data", ")", ")", "]", "sma", "=", "fill_for_noncomputable_vals", "(", "data", ",", "sma", ")", "return", "sma" ]
Simple Moving Average. Formula: SUM(data / N)
[ "Simple", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/simple_moving_average.py#L9-L23
234,359
kylejusticemagnuson/pyti
pyti/average_true_range_percent.py
average_true_range_percent
def average_true_range_percent(close_data, period): """ Average True Range Percent. Formula: ATRP = (ATR / CLOSE) * 100 """ catch_errors.check_for_period_error(close_data, period) atrp = (atr(close_data, period) / np.array(close_data)) * 100 return atrp
python
def average_true_range_percent(close_data, period): """ Average True Range Percent. Formula: ATRP = (ATR / CLOSE) * 100 """ catch_errors.check_for_period_error(close_data, period) atrp = (atr(close_data, period) / np.array(close_data)) * 100 return atrp
[ "def", "average_true_range_percent", "(", "close_data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "atrp", "=", "(", "atr", "(", "close_data", ",", "period", ")", "/", "np", ".", "array", "(", "close_data", ")", ")", "*", "100", "return", "atrp" ]
Average True Range Percent. Formula: ATRP = (ATR / CLOSE) * 100
[ "Average", "True", "Range", "Percent", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/average_true_range_percent.py#L9-L18
234,360
kylejusticemagnuson/pyti
pyti/on_balance_volume.py
on_balance_volume
def on_balance_volume(close_data, volume): """ On Balance Volume. Formula: start = 1 if CLOSEt > CLOSEt-1 obv = obvt-1 + volumet elif CLOSEt < CLOSEt-1 obv = obvt-1 - volumet elif CLOSEt == CLOSTt-1 obv = obvt-1 """ catch_errors.check_for_input_len_diff(close_data, volume) obv = np.zeros(len(volume)) obv[0] = 1 for idx in range(1, len(obv)): if close_data[idx] > close_data[idx-1]: obv[idx] = obv[idx-1] + volume[idx] elif close_data[idx] < close_data[idx-1]: obv[idx] = obv[idx-1] - volume[idx] elif close_data[idx] == close_data[idx-1]: obv[idx] = obv[idx-1] return obv
python
def on_balance_volume(close_data, volume): """ On Balance Volume. Formula: start = 1 if CLOSEt > CLOSEt-1 obv = obvt-1 + volumet elif CLOSEt < CLOSEt-1 obv = obvt-1 - volumet elif CLOSEt == CLOSTt-1 obv = obvt-1 """ catch_errors.check_for_input_len_diff(close_data, volume) obv = np.zeros(len(volume)) obv[0] = 1 for idx in range(1, len(obv)): if close_data[idx] > close_data[idx-1]: obv[idx] = obv[idx-1] + volume[idx] elif close_data[idx] < close_data[idx-1]: obv[idx] = obv[idx-1] - volume[idx] elif close_data[idx] == close_data[idx-1]: obv[idx] = obv[idx-1] return obv
[ "def", "on_balance_volume", "(", "close_data", ",", "volume", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "volume", ")", "obv", "=", "np", ".", "zeros", "(", "len", "(", "volume", ")", ")", "obv", "[", "0", "]", "=", "1", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "obv", ")", ")", ":", "if", "close_data", "[", "idx", "]", ">", "close_data", "[", "idx", "-", "1", "]", ":", "obv", "[", "idx", "]", "=", "obv", "[", "idx", "-", "1", "]", "+", "volume", "[", "idx", "]", "elif", "close_data", "[", "idx", "]", "<", "close_data", "[", "idx", "-", "1", "]", ":", "obv", "[", "idx", "]", "=", "obv", "[", "idx", "-", "1", "]", "-", "volume", "[", "idx", "]", "elif", "close_data", "[", "idx", "]", "==", "close_data", "[", "idx", "-", "1", "]", ":", "obv", "[", "idx", "]", "=", "obv", "[", "idx", "-", "1", "]", "return", "obv" ]
On Balance Volume. Formula: start = 1 if CLOSEt > CLOSEt-1 obv = obvt-1 + volumet elif CLOSEt < CLOSEt-1 obv = obvt-1 - volumet elif CLOSEt == CLOSTt-1 obv = obvt-1
[ "On", "Balance", "Volume", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/on_balance_volume.py#L7-L30
234,361
kylejusticemagnuson/pyti
pyti/rate_of_change.py
rate_of_change
def rate_of_change(data, period): """ Rate of Change. Formula: (Close - Close n periods ago) / (Close n periods ago) * 100 """ catch_errors.check_for_period_error(data, period) rocs = [((data[idx] - data[idx - (period - 1)]) / data[idx - (period - 1)]) * 100 for idx in range(period - 1, len(data))] rocs = fill_for_noncomputable_vals(data, rocs) return rocs
python
def rate_of_change(data, period): """ Rate of Change. Formula: (Close - Close n periods ago) / (Close n periods ago) * 100 """ catch_errors.check_for_period_error(data, period) rocs = [((data[idx] - data[idx - (period - 1)]) / data[idx - (period - 1)]) * 100 for idx in range(period - 1, len(data))] rocs = fill_for_noncomputable_vals(data, rocs) return rocs
[ "def", "rate_of_change", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "rocs", "=", "[", "(", "(", "data", "[", "idx", "]", "-", "data", "[", "idx", "-", "(", "period", "-", "1", ")", "]", ")", "/", "data", "[", "idx", "-", "(", "period", "-", "1", ")", "]", ")", "*", "100", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "rocs", "=", "fill_for_noncomputable_vals", "(", "data", ",", "rocs", ")", "return", "rocs" ]
Rate of Change. Formula: (Close - Close n periods ago) / (Close n periods ago) * 100
[ "Rate", "of", "Change", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/rate_of_change.py#L7-L19
234,362
kylejusticemagnuson/pyti
pyti/average_true_range.py
average_true_range
def average_true_range(close_data, period): """ Average True Range. Formula: ATRt = ATRt-1 * (n - 1) + TRt / n """ tr = true_range(close_data, period) atr = smoothed_moving_average(tr, period) atr[0:period-1] = tr[0:period-1] return atr
python
def average_true_range(close_data, period): """ Average True Range. Formula: ATRt = ATRt-1 * (n - 1) + TRt / n """ tr = true_range(close_data, period) atr = smoothed_moving_average(tr, period) atr[0:period-1] = tr[0:period-1] return atr
[ "def", "average_true_range", "(", "close_data", ",", "period", ")", ":", "tr", "=", "true_range", "(", "close_data", ",", "period", ")", "atr", "=", "smoothed_moving_average", "(", "tr", ",", "period", ")", "atr", "[", "0", ":", "period", "-", "1", "]", "=", "tr", "[", "0", ":", "period", "-", "1", "]", "return", "atr" ]
Average True Range. Formula: ATRt = ATRt-1 * (n - 1) + TRt / n
[ "Average", "True", "Range", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/average_true_range.py#L8-L18
234,363
kylejusticemagnuson/pyti
pyti/relative_strength_index.py
relative_strength_index
def relative_strength_index(data, period): """ Relative Strength Index. Formula: RSI = 100 - (100 / 1 + (prevGain/prevLoss)) """ catch_errors.check_for_period_error(data, period) period = int(period) changes = [data_tup[1] - data_tup[0] for data_tup in zip(data[::1], data[1::1])] filtered_gain = [val < 0 for val in changes] gains = [0 if filtered_gain[idx] is True else changes[idx] for idx in range(0, len(filtered_gain))] filtered_loss = [val > 0 for val in changes] losses = [0 if filtered_loss[idx] is True else abs(changes[idx]) for idx in range(0, len(filtered_loss))] avg_gain = np.mean(gains[:period]) avg_loss = np.mean(losses[:period]) rsi = [] if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) for idx in range(1, len(data) - period): avg_gain = ((avg_gain * (period - 1) + gains[idx + (period - 1)]) / period) avg_loss = ((avg_loss * (period - 1) + losses[idx + (period - 1)]) / period) if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) rsi = fill_for_noncomputable_vals(data, rsi) return rsi
python
def relative_strength_index(data, period): """ Relative Strength Index. Formula: RSI = 100 - (100 / 1 + (prevGain/prevLoss)) """ catch_errors.check_for_period_error(data, period) period = int(period) changes = [data_tup[1] - data_tup[0] for data_tup in zip(data[::1], data[1::1])] filtered_gain = [val < 0 for val in changes] gains = [0 if filtered_gain[idx] is True else changes[idx] for idx in range(0, len(filtered_gain))] filtered_loss = [val > 0 for val in changes] losses = [0 if filtered_loss[idx] is True else abs(changes[idx]) for idx in range(0, len(filtered_loss))] avg_gain = np.mean(gains[:period]) avg_loss = np.mean(losses[:period]) rsi = [] if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) for idx in range(1, len(data) - period): avg_gain = ((avg_gain * (period - 1) + gains[idx + (period - 1)]) / period) avg_loss = ((avg_loss * (period - 1) + losses[idx + (period - 1)]) / period) if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) rsi = fill_for_noncomputable_vals(data, rsi) return rsi
[ "def", "relative_strength_index", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "changes", "=", "[", "data_tup", "[", "1", "]", "-", "data_tup", "[", "0", "]", "for", "data_tup", "in", "zip", "(", "data", "[", ":", ":", "1", "]", ",", "data", "[", "1", ":", ":", "1", "]", ")", "]", "filtered_gain", "=", "[", "val", "<", "0", "for", "val", "in", "changes", "]", "gains", "=", "[", "0", "if", "filtered_gain", "[", "idx", "]", "is", "True", "else", "changes", "[", "idx", "]", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "filtered_gain", ")", ")", "]", "filtered_loss", "=", "[", "val", ">", "0", "for", "val", "in", "changes", "]", "losses", "=", "[", "0", "if", "filtered_loss", "[", "idx", "]", "is", "True", "else", "abs", "(", "changes", "[", "idx", "]", ")", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "filtered_loss", ")", ")", "]", "avg_gain", "=", "np", ".", "mean", "(", "gains", "[", ":", "period", "]", ")", "avg_loss", "=", "np", ".", "mean", "(", "losses", "[", ":", "period", "]", ")", "rsi", "=", "[", "]", "if", "avg_loss", "==", "0", ":", "rsi", ".", "append", "(", "100", ")", "else", ":", "rs", "=", "avg_gain", "/", "avg_loss", "rsi", ".", "append", "(", "100", "-", "(", "100", "/", "(", "1", "+", "rs", ")", ")", ")", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "data", ")", "-", "period", ")", ":", "avg_gain", "=", "(", "(", "avg_gain", "*", "(", "period", "-", "1", ")", "+", "gains", "[", "idx", "+", "(", "period", "-", "1", ")", "]", ")", "/", "period", ")", "avg_loss", "=", "(", "(", "avg_loss", "*", "(", "period", "-", "1", ")", "+", "losses", "[", "idx", "+", "(", "period", "-", "1", ")", "]", ")", "/", "period", ")", "if", "avg_loss", "==", "0", ":", "rsi", ".", "append", "(", "100", ")", "else", ":", "rs", "=", "avg_gain", "/", "avg_loss", "rsi", ".", "append", "(", "100", "-", "(", "100", "/", "(", "1", "+", "rs", ")", ")", ")", "rsi", "=", "fill_for_noncomputable_vals", "(", "data", ",", "rsi", ")", "return", "rsi" ]
Relative Strength Index. Formula: RSI = 100 - (100 / 1 + (prevGain/prevLoss))
[ "Relative", "Strength", "Index", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/relative_strength_index.py#L9-L51
234,364
kylejusticemagnuson/pyti
pyti/vertical_horizontal_filter.py
vertical_horizontal_filter
def vertical_horizontal_filter(data, period): """ Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) """ catch_errors.check_for_period_error(data, period) vhf = [abs(np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1])) / sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))] vhf = fill_for_noncomputable_vals(data, vhf) return vhf
python
def vertical_horizontal_filter(data, period): """ Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) """ catch_errors.check_for_period_error(data, period) vhf = [abs(np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1])) / sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))] vhf = fill_for_noncomputable_vals(data, vhf) return vhf
[ "def", "vertical_horizontal_filter", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "vhf", "=", "[", "abs", "(", "np", ".", "max", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "/", "sum", "(", "[", "abs", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "]", "-", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "-", "1", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "vhf", "=", "fill_for_noncomputable_vals", "(", "data", ",", "vhf", ")", "return", "vhf" ]
Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
[ "Vertical", "Horizontal", "Filter", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/vertical_horizontal_filter.py#L8-L22
234,365
kylejusticemagnuson/pyti
pyti/ultimate_oscillator.py
buying_pressure
def buying_pressure(close_data, low_data): """ Buying Pressure. Formula: BP = current close - min() """ catch_errors.check_for_input_len_diff(close_data, low_data) bp = [close_data[idx] - np.min([low_data[idx], close_data[idx-1]]) for idx in range(1, len(close_data))] bp = fill_for_noncomputable_vals(close_data, bp) return bp
python
def buying_pressure(close_data, low_data): """ Buying Pressure. Formula: BP = current close - min() """ catch_errors.check_for_input_len_diff(close_data, low_data) bp = [close_data[idx] - np.min([low_data[idx], close_data[idx-1]]) for idx in range(1, len(close_data))] bp = fill_for_noncomputable_vals(close_data, bp) return bp
[ "def", "buying_pressure", "(", "close_data", ",", "low_data", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "low_data", ")", "bp", "=", "[", "close_data", "[", "idx", "]", "-", "np", ".", "min", "(", "[", "low_data", "[", "idx", "]", ",", "close_data", "[", "idx", "-", "1", "]", "]", ")", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "close_data", ")", ")", "]", "bp", "=", "fill_for_noncomputable_vals", "(", "close_data", ",", "bp", ")", "return", "bp" ]
Buying Pressure. Formula: BP = current close - min()
[ "Buying", "Pressure", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/ultimate_oscillator.py#L9-L19
234,366
kylejusticemagnuson/pyti
pyti/ultimate_oscillator.py
ultimate_oscillator
def ultimate_oscillator(close_data, low_data): """ Ultimate Oscillator. Formula: UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1) """ a7 = 4 * average_7(close_data, low_data) a14 = 2 * average_14(close_data, low_data) a28 = average_28(close_data, low_data) uo = 100 * ((a7 + a14 + a28) / 7) return uo
python
def ultimate_oscillator(close_data, low_data): """ Ultimate Oscillator. Formula: UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1) """ a7 = 4 * average_7(close_data, low_data) a14 = 2 * average_14(close_data, low_data) a28 = average_28(close_data, low_data) uo = 100 * ((a7 + a14 + a28) / 7) return uo
[ "def", "ultimate_oscillator", "(", "close_data", ",", "low_data", ")", ":", "a7", "=", "4", "*", "average_7", "(", "close_data", ",", "low_data", ")", "a14", "=", "2", "*", "average_14", "(", "close_data", ",", "low_data", ")", "a28", "=", "average_28", "(", "close_data", ",", "low_data", ")", "uo", "=", "100", "*", "(", "(", "a7", "+", "a14", "+", "a28", ")", "/", "7", ")", "return", "uo" ]
Ultimate Oscillator. Formula: UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1)
[ "Ultimate", "Oscillator", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/ultimate_oscillator.py#L62-L73
234,367
kylejusticemagnuson/pyti
pyti/aroon.py
aroon_up
def aroon_up(data, period): """ Aroon Up. Formula: AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100 """ catch_errors.check_for_period_error(data, period) period = int(period) a_up = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.max(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_up = fill_for_noncomputable_vals(data, a_up) return a_up
python
def aroon_up(data, period): """ Aroon Up. Formula: AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100 """ catch_errors.check_for_period_error(data, period) period = int(period) a_up = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.max(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_up = fill_for_noncomputable_vals(data, a_up) return a_up
[ "def", "aroon_up", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "a_up", "=", "[", "(", "(", "period", "-", "list", "(", "reversed", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", ".", "index", "(", "np", ".", "max", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", ")", "/", "float", "(", "period", ")", ")", "*", "100", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "a_up", "=", "fill_for_noncomputable_vals", "(", "data", ",", "a_up", ")", "return", "a_up" ]
Aroon Up. Formula: AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100
[ "Aroon", "Up", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/aroon.py#L8-L22
234,368
kylejusticemagnuson/pyti
pyti/aroon.py
aroon_down
def aroon_down(data, period): """ Aroon Down. Formula: AROONDWN = (((PERIOD) - (PERIODS SINCE PERIOD LOW)) / (PERIOD)) * 100 """ catch_errors.check_for_period_error(data, period) period = int(period) a_down = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.min(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_down = fill_for_noncomputable_vals(data, a_down) return a_down
python
def aroon_down(data, period): """ Aroon Down. Formula: AROONDWN = (((PERIOD) - (PERIODS SINCE PERIOD LOW)) / (PERIOD)) * 100 """ catch_errors.check_for_period_error(data, period) period = int(period) a_down = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.min(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_down = fill_for_noncomputable_vals(data, a_down) return a_down
[ "def", "aroon_down", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "a_down", "=", "[", "(", "(", "period", "-", "list", "(", "reversed", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", ".", "index", "(", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", ")", "/", "float", "(", "period", ")", ")", "*", "100", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "a_down", "=", "fill_for_noncomputable_vals", "(", "data", ",", "a_down", ")", "return", "a_down" ]
Aroon Down. Formula: AROONDWN = (((PERIOD) - (PERIODS SINCE PERIOD LOW)) / (PERIOD)) * 100
[ "Aroon", "Down", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/aroon.py#L25-L39
234,369
kylejusticemagnuson/pyti
pyti/price_channels.py
upper_price_channel
def upper_price_channel(data, period, upper_percent): """ Upper Price Channel. Formula: upc = EMA(t) * (1 + upper_percent / 100) """ catch_errors.check_for_period_error(data, period) emas = ema(data, period) upper_channel = [val * (1+float(upper_percent)/100) for val in emas] return upper_channel
python
def upper_price_channel(data, period, upper_percent): """ Upper Price Channel. Formula: upc = EMA(t) * (1 + upper_percent / 100) """ catch_errors.check_for_period_error(data, period) emas = ema(data, period) upper_channel = [val * (1+float(upper_percent)/100) for val in emas] return upper_channel
[ "def", "upper_price_channel", "(", "data", ",", "period", ",", "upper_percent", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "emas", "=", "ema", "(", "data", ",", "period", ")", "upper_channel", "=", "[", "val", "*", "(", "1", "+", "float", "(", "upper_percent", ")", "/", "100", ")", "for", "val", "in", "emas", "]", "return", "upper_channel" ]
Upper Price Channel. Formula: upc = EMA(t) * (1 + upper_percent / 100)
[ "Upper", "Price", "Channel", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/price_channels.py#L8-L19
234,370
kylejusticemagnuson/pyti
pyti/price_channels.py
lower_price_channel
def lower_price_channel(data, period, lower_percent): """ Lower Price Channel. Formula: lpc = EMA(t) * (1 - lower_percent / 100) """ catch_errors.check_for_period_error(data, period) emas = ema(data, period) lower_channel = [val * (1-float(lower_percent)/100) for val in emas] return lower_channel
python
def lower_price_channel(data, period, lower_percent): """ Lower Price Channel. Formula: lpc = EMA(t) * (1 - lower_percent / 100) """ catch_errors.check_for_period_error(data, period) emas = ema(data, period) lower_channel = [val * (1-float(lower_percent)/100) for val in emas] return lower_channel
[ "def", "lower_price_channel", "(", "data", ",", "period", ",", "lower_percent", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "emas", "=", "ema", "(", "data", ",", "period", ")", "lower_channel", "=", "[", "val", "*", "(", "1", "-", "float", "(", "lower_percent", ")", "/", "100", ")", "for", "val", "in", "emas", "]", "return", "lower_channel" ]
Lower Price Channel. Formula: lpc = EMA(t) * (1 - lower_percent / 100)
[ "Lower", "Price", "Channel", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/price_channels.py#L22-L33
234,371
kylejusticemagnuson/pyti
pyti/exponential_moving_average.py
exponential_moving_average
def exponential_moving_average(data, period): """ Exponential Moving Average. Formula: p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +... / 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +... where: w = 2 / (N + 1) """ catch_errors.check_for_period_error(data, period) emas = [exponential_moving_average_helper( data[idx - period + 1:idx + 1], period) for idx in range(period - 1, len(data))] emas = fill_for_noncomputable_vals(data, emas) return emas
python
def exponential_moving_average(data, period): """ Exponential Moving Average. Formula: p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +... / 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +... where: w = 2 / (N + 1) """ catch_errors.check_for_period_error(data, period) emas = [exponential_moving_average_helper( data[idx - period + 1:idx + 1], period) for idx in range(period - 1, len(data))] emas = fill_for_noncomputable_vals(data, emas) return emas
[ "def", "exponential_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "emas", "=", "[", "exponential_moving_average_helper", "(", "data", "[", "idx", "-", "period", "+", "1", ":", "idx", "+", "1", "]", ",", "period", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "emas", "=", "fill_for_noncomputable_vals", "(", "data", ",", "emas", ")", "return", "emas" ]
Exponential Moving Average. Formula: p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +... / 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +... where: w = 2 / (N + 1)
[ "Exponential", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/exponential_moving_average.py#L7-L21
234,372
kylejusticemagnuson/pyti
pyti/commodity_channel_index.py
commodity_channel_index
def commodity_channel_index(close_data, high_data, low_data, period): """ Commodity Channel Index. Formula: CCI = (TP - SMA(TP)) / (0.015 * Mean Deviation) """ catch_errors.check_for_input_len_diff(close_data, high_data, low_data) catch_errors.check_for_period_error(close_data, period) tp = typical_price(close_data, high_data, low_data) cci = ((tp - sma(tp, period)) / (0.015 * np.mean(np.absolute(tp - np.mean(tp))))) return cci
python
def commodity_channel_index(close_data, high_data, low_data, period): """ Commodity Channel Index. Formula: CCI = (TP - SMA(TP)) / (0.015 * Mean Deviation) """ catch_errors.check_for_input_len_diff(close_data, high_data, low_data) catch_errors.check_for_period_error(close_data, period) tp = typical_price(close_data, high_data, low_data) cci = ((tp - sma(tp, period)) / (0.015 * np.mean(np.absolute(tp - np.mean(tp))))) return cci
[ "def", "commodity_channel_index", "(", "close_data", ",", "high_data", ",", "low_data", ",", "period", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "high_data", ",", "low_data", ")", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "tp", "=", "typical_price", "(", "close_data", ",", "high_data", ",", "low_data", ")", "cci", "=", "(", "(", "tp", "-", "sma", "(", "tp", ",", "period", ")", ")", "/", "(", "0.015", "*", "np", ".", "mean", "(", "np", ".", "absolute", "(", "tp", "-", "np", ".", "mean", "(", "tp", ")", ")", ")", ")", ")", "return", "cci" ]
Commodity Channel Index. Formula: CCI = (TP - SMA(TP)) / (0.015 * Mean Deviation)
[ "Commodity", "Channel", "Index", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/commodity_channel_index.py#L10-L22
234,373
kylejusticemagnuson/pyti
pyti/williams_percent_r.py
williams_percent_r
def williams_percent_r(close_data): """ Williams %R. Formula: wr = (HighestHigh - close / HighestHigh - LowestLow) * -100 """ highest_high = np.max(close_data) lowest_low = np.min(close_data) wr = [((highest_high - close) / (highest_high - lowest_low)) * -100 for close in close_data] return wr
python
def williams_percent_r(close_data): """ Williams %R. Formula: wr = (HighestHigh - close / HighestHigh - LowestLow) * -100 """ highest_high = np.max(close_data) lowest_low = np.min(close_data) wr = [((highest_high - close) / (highest_high - lowest_low)) * -100 for close in close_data] return wr
[ "def", "williams_percent_r", "(", "close_data", ")", ":", "highest_high", "=", "np", ".", "max", "(", "close_data", ")", "lowest_low", "=", "np", ".", "min", "(", "close_data", ")", "wr", "=", "[", "(", "(", "highest_high", "-", "close", ")", "/", "(", "highest_high", "-", "lowest_low", ")", ")", "*", "-", "100", "for", "close", "in", "close_data", "]", "return", "wr" ]
Williams %R. Formula: wr = (HighestHigh - close / HighestHigh - LowestLow) * -100
[ "Williams", "%R", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/williams_percent_r.py#L5-L15
234,374
kylejusticemagnuson/pyti
pyti/moving_average_convergence_divergence.py
moving_average_convergence_divergence
def moving_average_convergence_divergence(data, short_period, long_period): """ Moving Average Convergence Divergence. Formula: EMA(DATA, P1) - EMA(DATA, P2) """ catch_errors.check_for_period_error(data, short_period) catch_errors.check_for_period_error(data, long_period) macd = ema(data, short_period) - ema(data, long_period) return macd
python
def moving_average_convergence_divergence(data, short_period, long_period): """ Moving Average Convergence Divergence. Formula: EMA(DATA, P1) - EMA(DATA, P2) """ catch_errors.check_for_period_error(data, short_period) catch_errors.check_for_period_error(data, long_period) macd = ema(data, short_period) - ema(data, long_period) return macd
[ "def", "moving_average_convergence_divergence", "(", "data", ",", "short_period", ",", "long_period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "short_period", ")", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "long_period", ")", "macd", "=", "ema", "(", "data", ",", "short_period", ")", "-", "ema", "(", "data", ",", "long_period", ")", "return", "macd" ]
Moving Average Convergence Divergence. Formula: EMA(DATA, P1) - EMA(DATA, P2)
[ "Moving", "Average", "Convergence", "Divergence", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/moving_average_convergence_divergence.py#L8-L19
234,375
kylejusticemagnuson/pyti
pyti/money_flow_index.py
money_flow_index
def money_flow_index(close_data, high_data, low_data, volume, period): """ Money Flow Index. Formula: MFI = 100 - (100 / (1 + PMF / NMF)) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume ) catch_errors.check_for_period_error(close_data, period) mf = money_flow(close_data, high_data, low_data, volume) tp = typical_price(close_data, high_data, low_data) flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))] pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))] nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))] pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))] nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))] # Dividing by 0 is not an issue, it turns the value into NaN which we would # want in that case with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) money_ratio = np.array(pmf) / np.array(nmf) mfi = 100 - (100 / (1 + money_ratio)) mfi = fill_for_noncomputable_vals(close_data, mfi) return mfi
python
def money_flow_index(close_data, high_data, low_data, volume, period): """ Money Flow Index. Formula: MFI = 100 - (100 / (1 + PMF / NMF)) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume ) catch_errors.check_for_period_error(close_data, period) mf = money_flow(close_data, high_data, low_data, volume) tp = typical_price(close_data, high_data, low_data) flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))] pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))] nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))] pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))] nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))] # Dividing by 0 is not an issue, it turns the value into NaN which we would # want in that case with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) money_ratio = np.array(pmf) / np.array(nmf) mfi = 100 - (100 / (1 + money_ratio)) mfi = fill_for_noncomputable_vals(close_data, mfi) return mfi
[ "def", "money_flow_index", "(", "close_data", ",", "high_data", ",", "low_data", ",", "volume", ",", "period", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "high_data", ",", "low_data", ",", "volume", ")", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "mf", "=", "money_flow", "(", "close_data", ",", "high_data", ",", "low_data", ",", "volume", ")", "tp", "=", "typical_price", "(", "close_data", ",", "high_data", ",", "low_data", ")", "flow", "=", "[", "tp", "[", "idx", "]", ">", "tp", "[", "idx", "-", "1", "]", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "tp", ")", ")", "]", "pf", "=", "[", "mf", "[", "idx", "]", "if", "flow", "[", "idx", "]", "else", "0", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "flow", ")", ")", "]", "nf", "=", "[", "mf", "[", "idx", "]", "if", "not", "flow", "[", "idx", "]", "else", "0", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "flow", ")", ")", "]", "pmf", "=", "[", "sum", "(", "pf", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "pf", ")", ")", "]", "nmf", "=", "[", "sum", "(", "nf", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "nf", ")", ")", "]", "# Dividing by 0 is not an issue, it turns the value into NaN which we would", "# want in that case", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "money_ratio", "=", "np", ".", "array", "(", "pmf", ")", "/", "np", ".", "array", "(", "nmf", ")", "mfi", "=", "100", "-", "(", "100", "/", "(", "1", "+", "money_ratio", ")", ")", "mfi", "=", "fill_for_noncomputable_vals", "(", "close_data", ",", "mfi", ")", "return", "mfi" ]
Money Flow Index. Formula: MFI = 100 - (100 / (1 + PMF / NMF))
[ "Money", "Flow", "Index", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/money_flow_index.py#L11-L43
234,376
kylejusticemagnuson/pyti
pyti/typical_price.py
typical_price
def typical_price(close_data, high_data, low_data): """ Typical Price. Formula: TPt = (HIGHt + LOWt + CLOSEt) / 3 """ catch_errors.check_for_input_len_diff(close_data, high_data, low_data) tp = [(high_data[idx] + low_data[idx] + close_data[idx]) / 3 for idx in range(0, len(close_data))] return np.array(tp)
python
def typical_price(close_data, high_data, low_data): """ Typical Price. Formula: TPt = (HIGHt + LOWt + CLOSEt) / 3 """ catch_errors.check_for_input_len_diff(close_data, high_data, low_data) tp = [(high_data[idx] + low_data[idx] + close_data[idx]) / 3 for idx in range(0, len(close_data))] return np.array(tp)
[ "def", "typical_price", "(", "close_data", ",", "high_data", ",", "low_data", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "high_data", ",", "low_data", ")", "tp", "=", "[", "(", "high_data", "[", "idx", "]", "+", "low_data", "[", "idx", "]", "+", "close_data", "[", "idx", "]", ")", "/", "3", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "close_data", ")", ")", "]", "return", "np", ".", "array", "(", "tp", ")" ]
Typical Price. Formula: TPt = (HIGHt + LOWt + CLOSEt) / 3
[ "Typical", "Price", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/typical_price.py#L7-L16
234,377
kylejusticemagnuson/pyti
pyti/true_range.py
true_range
def true_range(close_data, period): """ True Range. Formula: TRt = MAX(abs(Ht - Lt), abs(Ht - Ct-1), abs(Lt - Ct-1)) """ catch_errors.check_for_period_error(close_data, period) tr = [np.max([np.max(close_data[idx+1-period:idx+1]) - np.min(close_data[idx+1-period:idx+1]), abs(np.max(close_data[idx+1-period:idx+1]) - close_data[idx-1]), abs(np.min(close_data[idx+1-period:idx+1]) - close_data[idx-1])]) for idx in range(period-1, len(close_data))] tr = fill_for_noncomputable_vals(close_data, tr) return tr
python
def true_range(close_data, period): """ True Range. Formula: TRt = MAX(abs(Ht - Lt), abs(Ht - Ct-1), abs(Lt - Ct-1)) """ catch_errors.check_for_period_error(close_data, period) tr = [np.max([np.max(close_data[idx+1-period:idx+1]) - np.min(close_data[idx+1-period:idx+1]), abs(np.max(close_data[idx+1-period:idx+1]) - close_data[idx-1]), abs(np.min(close_data[idx+1-period:idx+1]) - close_data[idx-1])]) for idx in range(period-1, len(close_data))] tr = fill_for_noncomputable_vals(close_data, tr) return tr
[ "def", "true_range", "(", "close_data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "tr", "=", "[", "np", ".", "max", "(", "[", "np", ".", "max", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "np", ".", "min", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ",", "abs", "(", "np", ".", "max", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "close_data", "[", "idx", "-", "1", "]", ")", ",", "abs", "(", "np", ".", "min", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "close_data", "[", "idx", "-", "1", "]", ")", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "close_data", ")", ")", "]", "tr", "=", "fill_for_noncomputable_vals", "(", "close_data", ",", "tr", ")", "return", "tr" ]
True Range. Formula: TRt = MAX(abs(Ht - Lt), abs(Ht - Ct-1), abs(Lt - Ct-1))
[ "True", "Range", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/true_range.py#L8-L24
234,378
kylejusticemagnuson/pyti
pyti/double_smoothed_stochastic.py
double_smoothed_stochastic
def double_smoothed_stochastic(data, period): """ Double Smoothed Stochastic. Formula: dss = 100 * EMA(Close - Lowest Low) / EMA(Highest High - Lowest Low) """ catch_errors.check_for_period_error(data, period) lows = [data[idx] - np.min(data[idx+1-period:idx+1]) for idx in range(period-1, len(data))] sm_lows = ema(ema(lows, period), period) highs = [np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1]) for idx in range(period-1, len(data))] sm_highs = ema(ema(highs, period), period) dss = (sm_lows / sm_highs) * 100 dss = fill_for_noncomputable_vals(data, dss) return dss
python
def double_smoothed_stochastic(data, period): """ Double Smoothed Stochastic. Formula: dss = 100 * EMA(Close - Lowest Low) / EMA(Highest High - Lowest Low) """ catch_errors.check_for_period_error(data, period) lows = [data[idx] - np.min(data[idx+1-period:idx+1]) for idx in range(period-1, len(data))] sm_lows = ema(ema(lows, period), period) highs = [np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1]) for idx in range(period-1, len(data))] sm_highs = ema(ema(highs, period), period) dss = (sm_lows / sm_highs) * 100 dss = fill_for_noncomputable_vals(data, dss) return dss
[ "def", "double_smoothed_stochastic", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "lows", "=", "[", "data", "[", "idx", "]", "-", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "sm_lows", "=", "ema", "(", "ema", "(", "lows", ",", "period", ")", ",", "period", ")", "highs", "=", "[", "np", ".", "max", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "sm_highs", "=", "ema", "(", "ema", "(", "highs", ",", "period", ")", ",", "period", ")", "dss", "=", "(", "sm_lows", "/", "sm_highs", ")", "*", "100", "dss", "=", "fill_for_noncomputable_vals", "(", "data", ",", "dss", ")", "return", "dss" ]
Double Smoothed Stochastic. Formula: dss = 100 * EMA(Close - Lowest Low) / EMA(Highest High - Lowest Low)
[ "Double", "Smoothed", "Stochastic", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/double_smoothed_stochastic.py#L11-L26
234,379
kylejusticemagnuson/pyti
pyti/volume_adjusted_moving_average.py
volume_adjusted_moving_average
def volume_adjusted_moving_average(close_data, volume, period): """ Volume Adjusted Moving Average. Formula: VAMA = SUM(CLOSE * VolumeRatio) / period """ catch_errors.check_for_input_len_diff(close_data, volume) catch_errors.check_for_period_error(close_data, period) avg_vol = np.mean(volume) vol_incr = avg_vol * 0.67 vol_ratio = [val / vol_incr for val in volume] close_vol = np.array(close_data) * vol_ratio vama = [sum(close_vol[idx+1-period:idx+1]) / period for idx in range(period-1, len(close_data))] vama = fill_for_noncomputable_vals(close_data, vama) return vama
python
def volume_adjusted_moving_average(close_data, volume, period): """ Volume Adjusted Moving Average. Formula: VAMA = SUM(CLOSE * VolumeRatio) / period """ catch_errors.check_for_input_len_diff(close_data, volume) catch_errors.check_for_period_error(close_data, period) avg_vol = np.mean(volume) vol_incr = avg_vol * 0.67 vol_ratio = [val / vol_incr for val in volume] close_vol = np.array(close_data) * vol_ratio vama = [sum(close_vol[idx+1-period:idx+1]) / period for idx in range(period-1, len(close_data))] vama = fill_for_noncomputable_vals(close_data, vama) return vama
[ "def", "volume_adjusted_moving_average", "(", "close_data", ",", "volume", ",", "period", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "volume", ")", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "avg_vol", "=", "np", ".", "mean", "(", "volume", ")", "vol_incr", "=", "avg_vol", "*", "0.67", "vol_ratio", "=", "[", "val", "/", "vol_incr", "for", "val", "in", "volume", "]", "close_vol", "=", "np", ".", "array", "(", "close_data", ")", "*", "vol_ratio", "vama", "=", "[", "sum", "(", "close_vol", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "/", "period", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "close_data", ")", ")", "]", "vama", "=", "fill_for_noncomputable_vals", "(", "close_data", ",", "vama", ")", "return", "vama" ]
Volume Adjusted Moving Average. Formula: VAMA = SUM(CLOSE * VolumeRatio) / period
[ "Volume", "Adjusted", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/volume_adjusted_moving_average.py#L8-L24
234,380
kylejusticemagnuson/pyti
pyti/double_exponential_moving_average.py
double_exponential_moving_average
def double_exponential_moving_average(data, period): """ Double Exponential Moving Average. Formula: DEMA = 2*EMA - EMA(EMA) """ catch_errors.check_for_period_error(data, period) dema = (2 * ema(data, period)) - ema(ema(data, period), period) return dema
python
def double_exponential_moving_average(data, period): """ Double Exponential Moving Average. Formula: DEMA = 2*EMA - EMA(EMA) """ catch_errors.check_for_period_error(data, period) dema = (2 * ema(data, period)) - ema(ema(data, period), period) return dema
[ "def", "double_exponential_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "dema", "=", "(", "2", "*", "ema", "(", "data", ",", "period", ")", ")", "-", "ema", "(", "ema", "(", "data", ",", "period", ")", ",", "period", ")", "return", "dema" ]
Double Exponential Moving Average. Formula: DEMA = 2*EMA - EMA(EMA)
[ "Double", "Exponential", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/double_exponential_moving_average.py#L8-L18
234,381
kylejusticemagnuson/pyti
pyti/triangular_moving_average.py
triangular_moving_average
def triangular_moving_average(data, period): """ Triangular Moving Average. Formula: TMA = SMA(SMA()) """ catch_errors.check_for_period_error(data, period) tma = sma(sma(data, period), period) return tma
python
def triangular_moving_average(data, period): """ Triangular Moving Average. Formula: TMA = SMA(SMA()) """ catch_errors.check_for_period_error(data, period) tma = sma(sma(data, period), period) return tma
[ "def", "triangular_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "tma", "=", "sma", "(", "sma", "(", "data", ",", "period", ")", ",", "period", ")", "return", "tma" ]
Triangular Moving Average. Formula: TMA = SMA(SMA())
[ "Triangular", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/triangular_moving_average.py#L8-L18
234,382
kylejusticemagnuson/pyti
pyti/weighted_moving_average.py
weighted_moving_average
def weighted_moving_average(data, period): """ Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price """ catch_errors.check_for_period_error(data, period) k = (period * (period + 1)) / 2.0 wmas = [] for idx in range(0, len(data)-period+1): product = [data[idx + period_idx] * (period_idx + 1) for period_idx in range(0, period)] wma = sum(product) / k wmas.append(wma) wmas = fill_for_noncomputable_vals(data, wmas) return wmas
python
def weighted_moving_average(data, period): """ Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price """ catch_errors.check_for_period_error(data, period) k = (period * (period + 1)) / 2.0 wmas = [] for idx in range(0, len(data)-period+1): product = [data[idx + period_idx] * (period_idx + 1) for period_idx in range(0, period)] wma = sum(product) / k wmas.append(wma) wmas = fill_for_noncomputable_vals(data, wmas) return wmas
[ "def", "weighted_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "k", "=", "(", "period", "*", "(", "period", "+", "1", ")", ")", "/", "2.0", "wmas", "=", "[", "]", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "data", ")", "-", "period", "+", "1", ")", ":", "product", "=", "[", "data", "[", "idx", "+", "period_idx", "]", "*", "(", "period_idx", "+", "1", ")", "for", "period_idx", "in", "range", "(", "0", ",", "period", ")", "]", "wma", "=", "sum", "(", "product", ")", "/", "k", "wmas", ".", "append", "(", "wma", ")", "wmas", "=", "fill_for_noncomputable_vals", "(", "data", ",", "wmas", ")", "return", "wmas" ]
Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price
[ "Weighted", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/weighted_moving_average.py#L7-L25
234,383
kylejusticemagnuson/pyti
pyti/ichimoku_cloud.py
conversion_base_line_helper
def conversion_base_line_helper(data, period): """ The only real difference between TenkanSen and KijunSen is the period value """ catch_errors.check_for_period_error(data, period) cblh = [(np.max(data[idx+1-period:idx+1]) + np.min(data[idx+1-period:idx+1])) / 2 for idx in range(period-1, len(data))] cblh = fill_for_noncomputable_vals(data, cblh) return cblh
python
def conversion_base_line_helper(data, period): """ The only real difference between TenkanSen and KijunSen is the period value """ catch_errors.check_for_period_error(data, period) cblh = [(np.max(data[idx+1-period:idx+1]) + np.min(data[idx+1-period:idx+1])) / 2 for idx in range(period-1, len(data))] cblh = fill_for_noncomputable_vals(data, cblh) return cblh
[ "def", "conversion_base_line_helper", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "cblh", "=", "[", "(", "np", ".", "max", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "+", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "/", "2", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "cblh", "=", "fill_for_noncomputable_vals", "(", "data", ",", "cblh", ")", "return", "cblh" ]
The only real difference between TenkanSen and KijunSen is the period value
[ "The", "only", "real", "difference", "between", "TenkanSen", "and", "KijunSen", "is", "the", "period", "value" ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/ichimoku_cloud.py#L8-L17
234,384
kylejusticemagnuson/pyti
pyti/chande_momentum_oscillator.py
chande_momentum_oscillator
def chande_momentum_oscillator(close_data, period): """ Chande Momentum Oscillator. Formula: cmo = 100 * ((sum_up - sum_down) / (sum_up + sum_down)) """ catch_errors.check_for_period_error(close_data, period) close_data = np.array(close_data) moving_period_diffs = [[(close_data[idx+1-period:idx+1][i] - close_data[idx+1-period:idx+1][i-1]) for i in range(1, len(close_data[idx+1-period:idx+1]))] for idx in range(0, len(close_data))] sum_up = [] sum_down = [] for period_diffs in moving_period_diffs: ups = [val if val > 0 else 0 for val in period_diffs] sum_up.append(sum(ups)) downs = [abs(val) if val < 0 else 0 for val in period_diffs] sum_down.append(sum(downs)) sum_up = np.array(sum_up) sum_down = np.array(sum_down) # numpy is able to handle dividing by zero and makes those calculations # nans which is what we want, so we safely suppress the RuntimeWarning with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) cmo = 100 * ((sum_up - sum_down) / (sum_up + sum_down)) return cmo
python
def chande_momentum_oscillator(close_data, period): """ Chande Momentum Oscillator. Formula: cmo = 100 * ((sum_up - sum_down) / (sum_up + sum_down)) """ catch_errors.check_for_period_error(close_data, period) close_data = np.array(close_data) moving_period_diffs = [[(close_data[idx+1-period:idx+1][i] - close_data[idx+1-period:idx+1][i-1]) for i in range(1, len(close_data[idx+1-period:idx+1]))] for idx in range(0, len(close_data))] sum_up = [] sum_down = [] for period_diffs in moving_period_diffs: ups = [val if val > 0 else 0 for val in period_diffs] sum_up.append(sum(ups)) downs = [abs(val) if val < 0 else 0 for val in period_diffs] sum_down.append(sum(downs)) sum_up = np.array(sum_up) sum_down = np.array(sum_down) # numpy is able to handle dividing by zero and makes those calculations # nans which is what we want, so we safely suppress the RuntimeWarning with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) cmo = 100 * ((sum_up - sum_down) / (sum_up + sum_down)) return cmo
[ "def", "chande_momentum_oscillator", "(", "close_data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "close_data", "=", "np", ".", "array", "(", "close_data", ")", "moving_period_diffs", "=", "[", "[", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "]", "-", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "-", "1", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "]", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "close_data", ")", ")", "]", "sum_up", "=", "[", "]", "sum_down", "=", "[", "]", "for", "period_diffs", "in", "moving_period_diffs", ":", "ups", "=", "[", "val", "if", "val", ">", "0", "else", "0", "for", "val", "in", "period_diffs", "]", "sum_up", ".", "append", "(", "sum", "(", "ups", ")", ")", "downs", "=", "[", "abs", "(", "val", ")", "if", "val", "<", "0", "else", "0", "for", "val", "in", "period_diffs", "]", "sum_down", ".", "append", "(", "sum", "(", "downs", ")", ")", "sum_up", "=", "np", ".", "array", "(", "sum_up", ")", "sum_down", "=", "np", ".", "array", "(", "sum_down", ")", "# numpy is able to handle dividing by zero and makes those calculations", "# nans which is what we want, so we safely suppress the RuntimeWarning", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "cmo", "=", "100", "*", "(", "(", "sum_up", "-", "sum_down", ")", "/", "(", "sum_up", "+", "sum_down", ")", ")", "return", "cmo" ]
Chande Momentum Oscillator. Formula: cmo = 100 * ((sum_up - sum_down) / (sum_up + sum_down))
[ "Chande", "Momentum", "Oscillator", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/chande_momentum_oscillator.py#L8-L37
234,385
kylejusticemagnuson/pyti
pyti/price_oscillator.py
price_oscillator
def price_oscillator(data, short_period, long_period): """ Price Oscillator. Formula: (short EMA - long EMA / long EMA) * 100 """ catch_errors.check_for_period_error(data, short_period) catch_errors.check_for_period_error(data, long_period) ema_short = ema(data, short_period) ema_long = ema(data, long_period) po = ((ema_short - ema_long) / ema_long) * 100 return po
python
def price_oscillator(data, short_period, long_period): """ Price Oscillator. Formula: (short EMA - long EMA / long EMA) * 100 """ catch_errors.check_for_period_error(data, short_period) catch_errors.check_for_period_error(data, long_period) ema_short = ema(data, short_period) ema_long = ema(data, long_period) po = ((ema_short - ema_long) / ema_long) * 100 return po
[ "def", "price_oscillator", "(", "data", ",", "short_period", ",", "long_period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "short_period", ")", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "long_period", ")", "ema_short", "=", "ema", "(", "data", ",", "short_period", ")", "ema_long", "=", "ema", "(", "data", ",", "long_period", ")", "po", "=", "(", "(", "ema_short", "-", "ema_long", ")", "/", "ema_long", ")", "*", "100", "return", "po" ]
Price Oscillator. Formula: (short EMA - long EMA / long EMA) * 100
[ "Price", "Oscillator", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/price_oscillator.py#L8-L22
234,386
kylejusticemagnuson/pyti
pyti/catch_errors.py
check_for_period_error
def check_for_period_error(data, period): """ Check for Period Error. This method checks if the developer is trying to enter a period that is larger than the data set being entered. If that is the case an exception is raised with a custom message that informs the developer that their period is greater than the data set. """ period = int(period) data_len = len(data) if data_len < period: raise Exception("Error: data_len < period")
python
def check_for_period_error(data, period): """ Check for Period Error. This method checks if the developer is trying to enter a period that is larger than the data set being entered. If that is the case an exception is raised with a custom message that informs the developer that their period is greater than the data set. """ period = int(period) data_len = len(data) if data_len < period: raise Exception("Error: data_len < period")
[ "def", "check_for_period_error", "(", "data", ",", "period", ")", ":", "period", "=", "int", "(", "period", ")", "data_len", "=", "len", "(", "data", ")", "if", "data_len", "<", "period", ":", "raise", "Exception", "(", "\"Error: data_len < period\"", ")" ]
Check for Period Error. This method checks if the developer is trying to enter a period that is larger than the data set being entered. If that is the case an exception is raised with a custom message that informs the developer that their period is greater than the data set.
[ "Check", "for", "Period", "Error", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/catch_errors.py#L2-L14
234,387
kylejusticemagnuson/pyti
pyti/catch_errors.py
check_for_input_len_diff
def check_for_input_len_diff(*args): """ Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same. """ arrays_len = [len(arr) for arr in args] if not all(a == arrays_len[0] for a in arrays_len): err_msg = ("Error: mismatched data lengths, check to ensure that all " "input data is the same length and valid") raise Exception(err_msg)
python
def check_for_input_len_diff(*args): """ Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same. """ arrays_len = [len(arr) for arr in args] if not all(a == arrays_len[0] for a in arrays_len): err_msg = ("Error: mismatched data lengths, check to ensure that all " "input data is the same length and valid") raise Exception(err_msg)
[ "def", "check_for_input_len_diff", "(", "*", "args", ")", ":", "arrays_len", "=", "[", "len", "(", "arr", ")", "for", "arr", "in", "args", "]", "if", "not", "all", "(", "a", "==", "arrays_len", "[", "0", "]", "for", "a", "in", "arrays_len", ")", ":", "err_msg", "=", "(", "\"Error: mismatched data lengths, check to ensure that all \"", "\"input data is the same length and valid\"", ")", "raise", "Exception", "(", "err_msg", ")" ]
Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same.
[ "Check", "for", "Input", "Length", "Difference", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/catch_errors.py#L17-L30
234,388
kylejusticemagnuson/pyti
pyti/bollinger_bands.py
upper_bollinger_band
def upper_bollinger_band(data, period, std_mult=2.0): """ Upper Bollinger Band. Formula: u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult """ catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] upper_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) upper_bb.append(simple_ma[idx] + std_dev * std_mult) upper_bb = fill_for_noncomputable_vals(data, upper_bb) return np.array(upper_bb)
python
def upper_bollinger_band(data, period, std_mult=2.0): """ Upper Bollinger Band. Formula: u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult """ catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] upper_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) upper_bb.append(simple_ma[idx] + std_dev * std_mult) upper_bb = fill_for_noncomputable_vals(data, upper_bb) return np.array(upper_bb)
[ "def", "upper_bollinger_band", "(", "data", ",", "period", ",", "std_mult", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "simple_ma", "=", "sma", "(", "data", ",", "period", ")", "[", "period", "-", "1", ":", "]", "upper_bb", "=", "[", "]", "for", "idx", "in", "range", "(", "len", "(", "data", ")", "-", "period", "+", "1", ")", ":", "std_dev", "=", "np", ".", "std", "(", "data", "[", "idx", ":", "idx", "+", "period", "]", ")", "upper_bb", ".", "append", "(", "simple_ma", "[", "idx", "]", "+", "std_dev", "*", "std_mult", ")", "upper_bb", "=", "fill_for_noncomputable_vals", "(", "data", ",", "upper_bb", ")", "return", "np", ".", "array", "(", "upper_bb", ")" ]
Upper Bollinger Band. Formula: u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult
[ "Upper", "Bollinger", "Band", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/bollinger_bands.py#L11-L29
234,389
kylejusticemagnuson/pyti
pyti/bollinger_bands.py
middle_bollinger_band
def middle_bollinger_band(data, period, std=2.0): """ Middle Bollinger Band. Formula: m_bb = sma() """ catch_errors.check_for_period_error(data, period) period = int(period) mid_bb = sma(data, period) return mid_bb
python
def middle_bollinger_band(data, period, std=2.0): """ Middle Bollinger Band. Formula: m_bb = sma() """ catch_errors.check_for_period_error(data, period) period = int(period) mid_bb = sma(data, period) return mid_bb
[ "def", "middle_bollinger_band", "(", "data", ",", "period", ",", "std", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "mid_bb", "=", "sma", "(", "data", ",", "period", ")", "return", "mid_bb" ]
Middle Bollinger Band. Formula: m_bb = sma()
[ "Middle", "Bollinger", "Band", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/bollinger_bands.py#L32-L44
234,390
kylejusticemagnuson/pyti
pyti/bollinger_bands.py
lower_bollinger_band
def lower_bollinger_band(data, period, std=2.0): """ Lower Bollinger Band. Formula: u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult """ catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] lower_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) lower_bb.append(simple_ma[idx] - std_dev * std) lower_bb = fill_for_noncomputable_vals(data, lower_bb) return np.array(lower_bb)
python
def lower_bollinger_band(data, period, std=2.0): """ Lower Bollinger Band. Formula: u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult """ catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] lower_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) lower_bb.append(simple_ma[idx] - std_dev * std) lower_bb = fill_for_noncomputable_vals(data, lower_bb) return np.array(lower_bb)
[ "def", "lower_bollinger_band", "(", "data", ",", "period", ",", "std", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "simple_ma", "=", "sma", "(", "data", ",", "period", ")", "[", "period", "-", "1", ":", "]", "lower_bb", "=", "[", "]", "for", "idx", "in", "range", "(", "len", "(", "data", ")", "-", "period", "+", "1", ")", ":", "std_dev", "=", "np", ".", "std", "(", "data", "[", "idx", ":", "idx", "+", "period", "]", ")", "lower_bb", ".", "append", "(", "simple_ma", "[", "idx", "]", "-", "std_dev", "*", "std", ")", "lower_bb", "=", "fill_for_noncomputable_vals", "(", "data", ",", "lower_bb", ")", "return", "np", ".", "array", "(", "lower_bb", ")" ]
Lower Bollinger Band. Formula: u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult
[ "Lower", "Bollinger", "Band", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/bollinger_bands.py#L47-L65
234,391
kylejusticemagnuson/pyti
pyti/bollinger_bands.py
percent_bandwidth
def percent_bandwidth(data, period, std=2.0): """ Percent Bandwidth. Formula: %_bw = data() - l_bb() / bb_range() """ catch_errors.check_for_period_error(data, period) period = int(period) percent_bandwidth = ((np.array(data) - lower_bollinger_band(data, period, std)) / bb_range(data, period, std) ) return percent_bandwidth
python
def percent_bandwidth(data, period, std=2.0): """ Percent Bandwidth. Formula: %_bw = data() - l_bb() / bb_range() """ catch_errors.check_for_period_error(data, period) period = int(period) percent_bandwidth = ((np.array(data) - lower_bollinger_band(data, period, std)) / bb_range(data, period, std) ) return percent_bandwidth
[ "def", "percent_bandwidth", "(", "data", ",", "period", ",", "std", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "percent_bandwidth", "=", "(", "(", "np", ".", "array", "(", "data", ")", "-", "lower_bollinger_band", "(", "data", ",", "period", ",", "std", ")", ")", "/", "bb_range", "(", "data", ",", "period", ",", "std", ")", ")", "return", "percent_bandwidth" ]
Percent Bandwidth. Formula: %_bw = data() - l_bb() / bb_range()
[ "Percent", "Bandwidth", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/bollinger_bands.py#L102-L117
234,392
kylejusticemagnuson/pyti
pyti/standard_deviation.py
standard_deviation
def standard_deviation(data, period): """ Standard Deviation. Formula: std = sqrt(avg(abs(x - avg(x))^2)) """ catch_errors.check_for_period_error(data, period) stds = [np.std(data[idx+1-period:idx+1], ddof=1) for idx in range(period-1, len(data))] stds = fill_for_noncomputable_vals(data, stds) return stds
python
def standard_deviation(data, period): """ Standard Deviation. Formula: std = sqrt(avg(abs(x - avg(x))^2)) """ catch_errors.check_for_period_error(data, period) stds = [np.std(data[idx+1-period:idx+1], ddof=1) for idx in range(period-1, len(data))] stds = fill_for_noncomputable_vals(data, stds) return stds
[ "def", "standard_deviation", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "stds", "=", "[", "np", ".", "std", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ",", "ddof", "=", "1", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "stds", "=", "fill_for_noncomputable_vals", "(", "data", ",", "stds", ")", "return", "stds" ]
Standard Deviation. Formula: std = sqrt(avg(abs(x - avg(x))^2))
[ "Standard", "Deviation", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/standard_deviation.py#L8-L20
234,393
kylejusticemagnuson/pyti
pyti/detrended_price_oscillator.py
detrended_price_oscillator
def detrended_price_oscillator(data, period): """ Detrended Price Oscillator. Formula: DPO = DATA[i] - Avg(DATA[period/2 + 1]) """ catch_errors.check_for_period_error(data, period) period = int(period) dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))] dop = fill_for_noncomputable_vals(data, dop) return dop
python
def detrended_price_oscillator(data, period): """ Detrended Price Oscillator. Formula: DPO = DATA[i] - Avg(DATA[period/2 + 1]) """ catch_errors.check_for_period_error(data, period) period = int(period) dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))] dop = fill_for_noncomputable_vals(data, dop) return dop
[ "def", "detrended_price_oscillator", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "dop", "=", "[", "data", "[", "idx", "]", "-", "np", ".", "mean", "(", "data", "[", "idx", "+", "1", "-", "(", "int", "(", "period", "/", "2", ")", "+", "1", ")", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "dop", "=", "fill_for_noncomputable_vals", "(", "data", ",", "dop", ")", "return", "dop" ]
Detrended Price Oscillator. Formula: DPO = DATA[i] - Avg(DATA[period/2 + 1])
[ "Detrended", "Price", "Oscillator", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/detrended_price_oscillator.py#L8-L19
234,394
kylejusticemagnuson/pyti
pyti/smoothed_moving_average.py
smoothed_moving_average
def smoothed_moving_average(data, period): """ Smoothed Moving Average. Formula: smma = avg(data(n)) - avg(data(n)/n) + data(t)/n """ catch_errors.check_for_period_error(data, period) series = pd.Series(data) return series.ewm(alpha = 1.0/period).mean().values.flatten()
python
def smoothed_moving_average(data, period): """ Smoothed Moving Average. Formula: smma = avg(data(n)) - avg(data(n)/n) + data(t)/n """ catch_errors.check_for_period_error(data, period) series = pd.Series(data) return series.ewm(alpha = 1.0/period).mean().values.flatten()
[ "def", "smoothed_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "series", "=", "pd", ".", "Series", "(", "data", ")", "return", "series", ".", "ewm", "(", "alpha", "=", "1.0", "/", "period", ")", ".", "mean", "(", ")", ".", "values", ".", "flatten", "(", ")" ]
Smoothed Moving Average. Formula: smma = avg(data(n)) - avg(data(n)/n) + data(t)/n
[ "Smoothed", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/smoothed_moving_average.py#L9-L18
234,395
kylejusticemagnuson/pyti
pyti/chaikin_money_flow.py
chaikin_money_flow
def chaikin_money_flow(close_data, high_data, low_data, volume, period): """ Chaikin Money Flow. Formula: CMF = SUM[(((Cn - Ln) - (Hn - Cn)) / (Hn - Ln)) * V] / SUM(Vn) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume) catch_errors.check_for_period_error(close_data, period) close_data = np.array(close_data) high_data = np.array(high_data) low_data = np.array(low_data) volume = np.array(volume) cmf = [sum((((close_data[idx+1-period:idx+1] - low_data[idx+1-period:idx+1]) - (high_data[idx+1-period:idx+1] - close_data[idx+1-period:idx+1])) / (high_data[idx+1-period:idx+1] - low_data[idx+1-period:idx+1])) * volume[idx+1-period:idx+1]) / sum(volume[idx+1-period:idx+1]) for idx in range(period-1, len(close_data))] cmf = fill_for_noncomputable_vals(close_data, cmf) return cmf
python
def chaikin_money_flow(close_data, high_data, low_data, volume, period): """ Chaikin Money Flow. Formula: CMF = SUM[(((Cn - Ln) - (Hn - Cn)) / (Hn - Ln)) * V] / SUM(Vn) """ catch_errors.check_for_input_len_diff( close_data, high_data, low_data, volume) catch_errors.check_for_period_error(close_data, period) close_data = np.array(close_data) high_data = np.array(high_data) low_data = np.array(low_data) volume = np.array(volume) cmf = [sum((((close_data[idx+1-period:idx+1] - low_data[idx+1-period:idx+1]) - (high_data[idx+1-period:idx+1] - close_data[idx+1-period:idx+1])) / (high_data[idx+1-period:idx+1] - low_data[idx+1-period:idx+1])) * volume[idx+1-period:idx+1]) / sum(volume[idx+1-period:idx+1]) for idx in range(period-1, len(close_data))] cmf = fill_for_noncomputable_vals(close_data, cmf) return cmf
[ "def", "chaikin_money_flow", "(", "close_data", ",", "high_data", ",", "low_data", ",", "volume", ",", "period", ")", ":", "catch_errors", ".", "check_for_input_len_diff", "(", "close_data", ",", "high_data", ",", "low_data", ",", "volume", ")", "catch_errors", ".", "check_for_period_error", "(", "close_data", ",", "period", ")", "close_data", "=", "np", ".", "array", "(", "close_data", ")", "high_data", "=", "np", ".", "array", "(", "high_data", ")", "low_data", "=", "np", ".", "array", "(", "low_data", ")", "volume", "=", "np", ".", "array", "(", "volume", ")", "cmf", "=", "[", "sum", "(", "(", "(", "(", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "-", "low_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "(", "high_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "-", "close_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "/", "(", "high_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "-", "low_data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "*", "volume", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "/", "sum", "(", "volume", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "close_data", ")", ")", "]", "cmf", "=", "fill_for_noncomputable_vals", "(", "close_data", ",", "cmf", ")", "return", "cmf" ]
Chaikin Money Flow. Formula: CMF = SUM[(((Cn - Ln) - (Hn - Cn)) / (Hn - Ln)) * V] / SUM(Vn)
[ "Chaikin", "Money", "Flow", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/chaikin_money_flow.py#L8-L28
234,396
kylejusticemagnuson/pyti
pyti/hull_moving_average.py
hull_moving_average
def hull_moving_average(data, period): """ Hull Moving Average. Formula: HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n) """ catch_errors.check_for_period_error(data, period) hma = wma( 2 * wma(data, int(period/2)) - wma(data, period), int(np.sqrt(period)) ) return hma
python
def hull_moving_average(data, period): """ Hull Moving Average. Formula: HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n) """ catch_errors.check_for_period_error(data, period) hma = wma( 2 * wma(data, int(period/2)) - wma(data, period), int(np.sqrt(period)) ) return hma
[ "def", "hull_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "hma", "=", "wma", "(", "2", "*", "wma", "(", "data", ",", "int", "(", "period", "/", "2", ")", ")", "-", "wma", "(", "data", ",", "period", ")", ",", "int", "(", "np", ".", "sqrt", "(", "period", ")", ")", ")", "return", "hma" ]
Hull Moving Average. Formula: HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
[ "Hull", "Moving", "Average", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/hull_moving_average.py#L9-L20
234,397
kylejusticemagnuson/pyti
pyti/standard_variance.py
standard_variance
def standard_variance(data, period): """ Standard Variance. Formula: (Ct - AVGt)^2 / N """ catch_errors.check_for_period_error(data, period) sv = [np.var(data[idx+1-period:idx+1], ddof=1) for idx in range(period-1, len(data))] sv = fill_for_noncomputable_vals(data, sv) return sv
python
def standard_variance(data, period): """ Standard Variance. Formula: (Ct - AVGt)^2 / N """ catch_errors.check_for_period_error(data, period) sv = [np.var(data[idx+1-period:idx+1], ddof=1) for idx in range(period-1, len(data))] sv = fill_for_noncomputable_vals(data, sv) return sv
[ "def", "standard_variance", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "sv", "=", "[", "np", ".", "var", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ",", "ddof", "=", "1", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "sv", "=", "fill_for_noncomputable_vals", "(", "data", ",", "sv", ")", "return", "sv" ]
Standard Variance. Formula: (Ct - AVGt)^2 / N
[ "Standard", "Variance", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/standard_variance.py#L8-L19
234,398
kylejusticemagnuson/pyti
pyti/directional_indicators.py
calculate_up_moves
def calculate_up_moves(high_data): """ Up Move. Formula: UPMOVE = Ht - Ht-1 """ up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))] return [np.nan] + up_moves
python
def calculate_up_moves(high_data): """ Up Move. Formula: UPMOVE = Ht - Ht-1 """ up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))] return [np.nan] + up_moves
[ "def", "calculate_up_moves", "(", "high_data", ")", ":", "up_moves", "=", "[", "high_data", "[", "idx", "]", "-", "high_data", "[", "idx", "-", "1", "]", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "high_data", ")", ")", "]", "return", "[", "np", ".", "nan", "]", "+", "up_moves" ]
Up Move. Formula: UPMOVE = Ht - Ht-1
[ "Up", "Move", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/directional_indicators.py#L13-L21
234,399
kylejusticemagnuson/pyti
pyti/directional_indicators.py
calculate_down_moves
def calculate_down_moves(low_data): """ Down Move. Formula: DWNMOVE = Lt-1 - Lt """ down_moves = [low_data[idx-1] - low_data[idx] for idx in range(1, len(low_data))] return [np.nan] + down_moves
python
def calculate_down_moves(low_data): """ Down Move. Formula: DWNMOVE = Lt-1 - Lt """ down_moves = [low_data[idx-1] - low_data[idx] for idx in range(1, len(low_data))] return [np.nan] + down_moves
[ "def", "calculate_down_moves", "(", "low_data", ")", ":", "down_moves", "=", "[", "low_data", "[", "idx", "-", "1", "]", "-", "low_data", "[", "idx", "]", "for", "idx", "in", "range", "(", "1", ",", "len", "(", "low_data", ")", ")", "]", "return", "[", "np", ".", "nan", "]", "+", "down_moves" ]
Down Move. Formula: DWNMOVE = Lt-1 - Lt
[ "Down", "Move", "." ]
2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/directional_indicators.py#L24-L32