code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
LOGGER.debug('SchemaCache.index >>>') rv = self._seq_no2schema_key LOGGER.debug('SchemaCache.index <<< %s', rv) return rv
def index(self) -> dict
Return dict mapping content sequence numbers to schema keys. :return: dict mapping sequence numbers to schema keys
12.300857
6.868917
1.7908
LOGGER.debug('SchemaCache.schema_key_for >>> seq_no: %s', seq_no) rv = self._seq_no2schema_key.get(seq_no, None) LOGGER.debug('SchemaCache.schema_key_for <<< %s', rv) return rv
def schema_key_for(self, seq_no: int) -> SchemaKey
Get schema key for schema by sequence number if known, None for no such schema in cache. :param seq_no: sequence number :return: corresponding schema key or None
3.067198
2.6512
1.156909
LOGGER.debug('SchemaCache.schemata >>>') LOGGER.debug('SchemaCache.schemata <<<') return [self._schema_key2schema[seq_no] for seq_no in self._schema_key2schema]
def schemata(self) -> list
Return list with schemata in cache. :return: list of schemata
6.600978
5.560213
1.187181
LOGGER.debug('SchemaCache.clear >>>') self._schema_key2schema = {} self._seq_no2schema_key = {} LOGGER.debug('SchemaCache.clear <<<')
def clear(self) -> None
Clear the cache.
8.446942
7.111752
1.187744
LOGGER.debug( 'RevoCacheEntry.get_delta_json >>> rr_delta_builder: %s, fro: %s, to: %s', rr_delta_builder.__name__, fro, to) rv = await self._get_update(rr_delta_builder, fro, to, True) LOGGER.debug('RevoCacheEntry.get_delta_json <<< %s', rv) return rv
async def get_delta_json( self, rr_delta_builder: Callable[['HolderProver', str, int, int, dict], Awaitable[Tuple[str, int]]], fro: int, to: int) -> (str, int)
Get rev reg delta json, and its timestamp on the distributed ledger, from cached rev reg delta frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a delta to the future. On return of any previously existing rev reg delta frame, always update its query time beforehand. :param rr_delta_builder: callback to build rev reg delta if need be (specify anchor instance's _build_rr_delta()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation delta timestamp :return: rev reg delta json and ledger timestamp (epoch seconds)
2.808626
2.8879
0.972549
LOGGER.debug( 'RevoCacheEntry.get_state_json >>> rr_state_builder: %s, fro: %s, to: %s', rr_state_builder.__name__, fro, to) rv = await self._get_update(rr_state_builder, fro, to, False) LOGGER.debug('RevoCacheEntry.get_state_json <<< %s', rv) return rv
async def get_state_json( self, rr_state_builder: Callable[['Verifier', str, int], Awaitable[Tuple[str, int]]], fro: int, to: int) -> (str, int)
Get rev reg state json, and its timestamp on the distributed ledger, from cached rev reg state frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a state in the future. On return of any previously existing rev reg state frame, always update its query time beforehand. :param rr_state_builder: callback to build rev reg state if need be (specify anchor instance's _build_rr_state()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation state timestamp :return: rev reg state json and ledger timestamp (epoch seconds)
2.913775
2.878708
1.012182
LOGGER.debug('clear >>>') with SCHEMA_CACHE.lock: SCHEMA_CACHE.clear() with CRED_DEF_CACHE.lock: CRED_DEF_CACHE.clear() with REVO_CACHE.lock: REVO_CACHE.clear() LOGGER.debug('clear <<<')
def clear() -> None
Clear all archivable caches in memory.
4.377911
3.712078
1.179369
LOGGER.debug('archive >>> base_dir: %s', base_dir) rv = int(time()) timestamp_dir = join(base_dir, str(rv)) makedirs(timestamp_dir, exist_ok=True) with SCHEMA_CACHE.lock: with open(join(timestamp_dir, 'schema'), 'w') as archive: print(json.dumps(SCHEMA_CACHE.schemata()), file=archive) with CRED_DEF_CACHE.lock: with open(join(timestamp_dir, 'cred_def'), 'w') as archive: print(json.dumps(CRED_DEF_CACHE), file=archive) with REVO_CACHE.lock: with open(join(timestamp_dir, 'revocation'), 'w') as archive: revo_cache_dict = {} for rr_id in REVO_CACHE: revo_cache_dict[rr_id] = { 'rev_reg_def': REVO_CACHE[rr_id].rev_reg_def, 'rr_delta_frames': [vars(f) for f in REVO_CACHE[rr_id].rr_delta_frames], 'rr_state_frames': [vars(f) for f in REVO_CACHE[rr_id].rr_state_frames] } print(json.dumps(revo_cache_dict), file=archive) LOGGER.debug('archive <<< %s', rv) return rv
def archive(base_dir: str) -> int
Archive schema, cred def, revocation caches to disk as json. :param base_dir: archive base directory :return: timestamp (epoch seconds) used as subdirectory
2.436935
2.107322
1.156414
LOGGER.debug('purge_archives >>> base_dir: %s, retain_latest: %s', base_dir, retain_latest) if isdir(base_dir): timestamps = sorted([int(t) for t in listdir(base_dir) if t.isdigit()]) if retain_latest and timestamps: timestamps.pop() for timestamp in timestamps: timestamp_dir = join(base_dir, str(timestamp)) rmtree(timestamp_dir) LOGGER.info('Purged archive cache directory %s', timestamp_dir) LOGGER.debug('purge_archives <<<')
def purge_archives(base_dir: str, retain_latest: bool = False) -> None
Erase all (or nearly all) cache archives. :param base_dir: archive base directory :param retain_latest: retain latest archive if present, purge all others
2.6843
2.413689
1.112115
rv = { canon_pairwise_tag(tag): raw(pairwise.metadata[tag]) for tag in pairwise.metadata or {} } rv['~their_did'] = pairwise.their_did rv['~their_verkey'] = pairwise.their_verkey rv['~my_did'] = pairwise.my_did rv['~my_verkey'] = pairwise.my_verkey if not StorageRecord.ok_tags(rv): raise BadRecord('Pairwise metadata {} must map strings to strings'.format(rv)) return rv
def pairwise_info2tags(pairwise: PairwiseInfo) -> dict
Given pairwise info with metadata mapping tags to values, return corresponding indy-sdk non_secrets record tags dict to store same in wallet (via non_secrets) unencrypted (for WQL search options). Canonicalize metadata values to strings via raw() for WQL fitness. Raise BadRecord if metadata does not coerce into non_secrets API tags spec of {str:str}. :param pairwise: pairwise info with metadata dict mapping tags to values :return: corresponding non_secrets tags dict marked for unencrypted storage
4.945764
3.418523
1.446754
return PairwiseInfo( storec.id, # = their did storec.value, # = their verkey storec.tags['~my_did'], storec.tags['~my_verkey'], { tag[tag.startswith('~'):]: storec.tags[tag] for tag in (storec.tags or {}) # strip any leading '~' })
def storage_record2pairwise_info(storec: StorageRecord) -> PairwiseInfo
Given indy-sdk non_secrets implementation of pairwise storage record dict, return corresponding PairwiseInfo. :param storec: (non-secret) storage record to convert to PairwiseInfo :return: PairwiseInfo on record DIDs, verkeys, metadata
6.51721
5.904422
1.103784
assert {'name', 'id'} & {k for k in config} return { 'id': config.get('name', config.get('id')), 'storage_type': config.get('storage_type', self.default_storage_type), 'freshness_time': config.get('freshness_time', self.default_freshness_time) }
def _config2indy(self, config: dict) -> dict
Given a configuration dict with indy and possibly more configuration values, return the corresponding indy wallet configuration dict from current default and input values. :param config: input configuration :return: configuration dict for indy wallet
3.882448
3.491819
1.11187
rv = {k: config.get(k, self._defaults[k]) for k in ('auto_create', 'auto_remove')} rv['access'] = access or self.default_access for key in ('seed', 'did', 'link_secret_label'): if key in config: rv[key] = config[key] return rv
def _config2von(self, config: dict, access: str = None) -> dict
Given a configuration dict with indy and possibly more configuration values, return the corresponding VON wallet configuration dict from current default and input values. :param config: input configuration :param access: access credentials value :return: configuration dict for VON wallet with VON-specific entries
5.411078
4.074049
1.328182
LOGGER.debug('WalletManager.create >>> config %s, access %s, replace %s', config, access, replace) assert {'name', 'id'} & {k for k in config} wallet_name = config.get('name', config.get('id')) if replace: von_wallet = self.get(config, access) if not await von_wallet.remove(): LOGGER.debug('WalletManager.create <!< Failed to remove wallet %s for replacement', wallet_name) raise ExtantWallet('Failed to remove wallet {} for replacement'.format(wallet_name)) indy_config = self._config2indy(config) von_config = self._config2von(config, access) rv = Wallet(indy_config, von_config) await rv.create() LOGGER.debug('WalletManager.create <<< %s', rv) return rv
async def create(self, config: dict = None, access: str = None, replace: bool = False) -> Wallet
Create wallet on input name with given configuration and access credential value. Raise ExtantWallet if wallet on input name exists already and replace parameter is False. Raise BadAccess on replacement for bad access credentials value. FAIR WARNING: specifying replace=True attempts to remove any matching wallet before proceeding; to succeed, the existing wallet must use the same access credentials that the input configuration has. :param config: configuration data for both indy-sdk and VON anchor wallet: - 'name' or 'id': wallet name - 'storage_type': storage type - 'freshness_time': freshness time - 'did': (optional) DID to use - 'seed': (optional) seed to use - 'auto_create': whether to create the wallet on first open (persists past close, can work with auto_remove) - 'auto_remove': whether to remove the wallet on next close - 'link_secret_label': (optional) link secret label to use to create link secret :param access: indy wallet access credential ('key') value, if different than default :param replace: whether to replace old wallet if it exists :return: wallet created
3.963847
3.198076
1.239448
LOGGER.debug('WalletManager.get >>> config %s, access %s', config, access) rv = Wallet( self._config2indy(config), self._config2von(config, access)) LOGGER.debug('WalletManager.get <<< %s', rv) return rv
def get(self, config: dict, access: str = None) -> Wallet
Instantiate and return VON anchor wallet object on given configuration, respecting wallet manager default configuration values. :param config: configuration data for both indy-sdk and VON anchor wallet: - 'name' or 'id': wallet name - 'storage_type': storage type - 'freshness_time': freshness time - 'did': (optional) DID to use - 'seed': (optional) seed to use - 'auto_create': whether to create the wallet on first open (persists past close, can work with auto_remove) - 'auto_remove': whether to remove the wallet on next close - 'link_secret_label': (optional) link secret label to use to create link secret :param access: indy access credentials value :return: VON anchor wallet
5.824901
4.672369
1.24667
LOGGER.debug('WalletManager.reseed_local >>> local_wallet %s', local_wallet) await local_wallet.reseed_init(next_seed) rv = await local_wallet.reseed_apply() LOGGER.debug('WalletManager.reseed_local <<< %s', rv) return rv
async def reseed_local(self, local_wallet: Wallet, next_seed: str = None) -> DIDInfo
Generate and apply new key, in wallet only, for local DID based on input seed (default random). Raise WalletState if wallet is closed. Note that this operation does not update the corresponding NYM on the ledger: for VON anchors anchored to the ledger, use von_anchor.BaseAnchor.reseed(). :param local_wallet: VON anchor wallet without NYM on ledger :param next_seed: incoming replacement seed (default random) :return: DIDInfo with new verification key and metadata for DID
4.27451
3.873799
1.103441
LOGGER.debug('WalletManager.export_wallet >>> von_wallet %s, path %s', von_wallet, path) if not von_wallet.handle: LOGGER.debug('WalletManager.export_wallet <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) await wallet.export_wallet( von_wallet.handle, json.dumps({ 'path': path, **von_wallet.access_creds })) LOGGER.debug('WalletManager.export_wallet <<<')
async def export_wallet(self, von_wallet: Wallet, path: str) -> None
Export an existing VON anchor wallet. Raise WalletState if wallet is closed. :param von_wallet: open wallet :param path: path to which to export wallet
3.730546
3.011771
1.238655
LOGGER.debug('WalletManager.import_wallet >>> indy_config %s, path: %s', indy_config, path) try: await wallet.import_wallet( json.dumps(indy_config), json.dumps({'key': access or self.default_access}), json.dumps({'path': path, 'key': access or self.default_access})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.CommonInvalidStructure: # indy-sdk raises on bad access LOGGER.debug( 'WalletManager.import_wallet <!< bad access credential value for wallet %s', indy_config.get('id', '(no id)')) raise BadAccess('Bad access credential value for wallet {}'.format(indy_config.get('id', '(no id)'))) LOGGER.debug( 'WalletManager.import_wallet <!< indy error code %s on wallet %s import', x_indy.error_code, indy_config.get('id', '(no id)')) raise LOGGER.debug('WalletManager.import_wallet <<<')
async def import_wallet(self, indy_config: dict, path: str, access: str = None) -> None
Import a VON anchor wallet. Raise BadAccess on bad access credential value. :param indy_config: indy wallet configuration to use, with: - 'id' - 'storage_type' (optional) - 'storage_config' (optional) :param path: path from which to import wallet file :param access: indy access credentials value (default value from wallet manager)
2.613663
2.29867
1.137033
LOGGER.debug('WalletManager.reset >>> von_wallet %s', von_wallet) if not von_wallet.handle: LOGGER.debug('WalletManager.reset <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) w_config = von_wallet.config # wallet under reset, no need to make copy w_config['did'] = von_wallet.did w_config['seed'] = seed w_config['auto_create'] = von_wallet.auto_create # in case both auto_remove+auto_create set (create every open) w_config['auto_remove'] = von_wallet.auto_remove label = await von_wallet.get_link_secret_label() if label: w_config['link_secret_label'] = label await von_wallet.close() if not von_wallet.auto_remove: await self.remove(von_wallet) rv = await self.create(w_config, von_wallet.access) await rv.open() LOGGER.debug('WalletManager.reset <<< %s', rv) return rv
async def reset(self, von_wallet: Wallet, seed: str = None) -> Wallet
Close and delete (open) VON anchor wallet and then create, open, and return replacement on current link secret. Note that this operation effectively destroys private keys for keyed data structures such as credential offers or credential definitions. Raise WalletState if the wallet is closed. :param von_wallet: open wallet :param seed: seed to use for new wallet (default random) :return: replacement wallet
4.137765
3.544917
1.167239
LOGGER.debug('WalletManager.remove >>> wallet %s', von_wallet) await von_wallet.remove() LOGGER.debug('WalletManager.remove <<<')
async def remove(self, von_wallet: Wallet) -> None
Remove serialized wallet if it exists. Raise WalletState if wallet is open. :param von_wallet: (closed) wallet to remove
6.199957
5.512574
1.124694
LOGGER.debug( 'WalletManager.register_storage_library >>> storage_type %s, c_library %s, entry_point %s', storage_type, c_library, entry_point) try: stg_lib = CDLL(c_library) result = stg_lib[entry_point]() if result: LOGGER.debug( 'WalletManager.register_storage_library <!< indy error code %s on storage library entry at %s', result, entry_point) raise IndyError(result) LOGGER.info('Loaded storage library type %s (%s)', storage_type, c_library) except IndyError as x_indy: LOGGER.debug( 'WalletManager.register_storage_library <!< indy error code %s on load of storage library %s %s', x_indy.error_code, storage_type, c_library) raise LOGGER.debug('WalletManager.register_storage_library <<<')
async def register_storage_library(storage_type: str, c_library: str, entry_point: str) -> None
Load a wallet storage plug-in. An indy-sdk wallet storage plug-in is a shared library; relying parties must explicitly load it before creating or opening a wallet with the plug-in. The implementation loads a dynamic library and calls an entry point; internally, the plug-in calls the indy-sdk wallet async def register_wallet_storage_library(storage_type: str, c_library: str, fn_pfx: str). :param storage_type: wallet storage type :param c_library: plug-in library :param entry_point: function to initialize the library
2.766637
2.586195
1.069771
if not invalidate: return if kwargs.get("raw", False): return if sender is MigrationRecorder.Migration: return if issubclass(sender, Model): obj = kwargs["instance"] if isinstance(obj, Model): # get_for_model itself is cached try: ct = ContentType.objects.get_for_model(sender) except RuntimeError: # This happens when ultracache is being used by another product # during a test run. return if kwargs.get("created", False): # Expire cache keys that contain objects of this content type key = "ucache-ct-%s" % ct.id to_delete = cache.get(key, []) if to_delete: try: cache.delete_many(to_delete) except NotImplementedError: for k in to_delete: cache.delete(k) cache.delete(key) # Purge paths in reverse caching proxy that contain objects of # this content type. key = "ucache-ct-pth-%s" % ct.id if purger is not None: for li in cache.get(key, []): purger(li[0], li[1]) cache.delete(key) else: # Expire cache keys key = "ucache-%s-%s" % (ct.id, obj.pk) to_delete = cache.get(key, []) if to_delete: try: cache.delete_many(to_delete) except NotImplementedError: for k in to_delete: cache.delete(k) cache.delete(key) # Purge paths in reverse caching proxy key = "ucache-pth-%s-%s" % (ct.id, obj.pk) if purger is not None: for li in cache.get(key, []): purger(li[0], li[1]) cache.delete(key)
def on_post_save(sender, **kwargs)
Expire ultracache cache keys affected by this object
2.890828
2.750111
1.051168
if not invalidate: return if kwargs.get("raw", False): return if sender is MigrationRecorder.Migration: return if issubclass(sender, Model): obj = kwargs["instance"] if isinstance(obj, Model): # get_for_model itself is cached try: ct = ContentType.objects.get_for_model(sender) except RuntimeError: # This happens when ultracache is being used by another product # during a test run. return # Expire cache keys key = "ucache-%s-%s" % (ct.id, obj.pk) to_delete = cache.get(key, []) if to_delete: try: cache.delete_many(to_delete) except NotImplementedError: for k in to_delete: cache.delete(k) cache.delete(key) # Invalidate paths in reverse caching proxy key = "ucache-pth-%s-%s" % (ct.id, obj.pk) if purger is not None: for li in cache.get(key, []): purger(li[0], li[1]) cache.delete(key)
def on_post_delete(sender, **kwargs)
Expire ultracache cache keys affected by this object
4.648169
4.298223
1.081416
LOGGER.debug('Wallet.create_signing_key >>> seed: [SEED], metadata: %s', metadata) if not self.handle: LOGGER.debug('Wallet.create_signing_key <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: verkey = await crypto.create_key(self.handle, json.dumps({'seed': seed} if seed else {})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemAlreadyExists: LOGGER.debug('Wallet.create_signing_key <!< Verification key already present in wallet %s', self.name) raise ExtantRecord('Verification key already present in wallet {}'.format(self.name)) LOGGER.debug('Wallet.create_signing_key <!< indy-sdk raised error %s', x_indy.error_code) raise await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {})) # coerce None to empty rv = KeyInfo(verkey, metadata or {}) LOGGER.debug('Wallet.create_signing_key <<< %s', rv) return rv
async def create_signing_key(self, seed: str = None, metadata: dict = None) -> KeyInfo
Create a new signing key pair. Raise WalletState if wallet is closed, ExtantRecord if verification key already exists. :param seed: optional seed allowing deterministic key creation :param metadata: optional metadata to store with key pair :return: KeyInfo for new key pair
2.879786
2.306201
1.248714
LOGGER.debug('Wallet.get_signing_key >>> seed: [SEED], verkey: %s', verkey) if not self.handle: LOGGER.debug('Wallet.get_signing_key <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: metadata = await crypto.get_key_metadata(self.handle, verkey) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.get_signing_key <!< Verification key %s not in wallet %s', verkey, self.name) raise AbsentRecord('Verification key not in wallet {}'.format(self.name)) LOGGER.debug('Wallet.get_signing_key <!< indy-sdk raised error %s', x_indy.error_code) raise rv = KeyInfo(verkey, json.loads(metadata) if metadata else {}) LOGGER.debug('Wallet.get_signing_key <<< %s', rv) return rv
async def get_signing_key(self, verkey: str) -> KeyInfo
Get signing key pair for input verification key. Raise WalletState if wallet is closed, AbsentRecord for no such key pair. :param verkey: verification key of key pair :return: KeyInfo for key pair
2.646971
2.27963
1.161141
LOGGER.debug('Wallet.create_local_did >>> seed: [SEED] loc_did: %s metadata: %s', loc_did, metadata) cfg = {} if seed: cfg['seed'] = seed if loc_did: cfg['did'] = loc_did if not self.handle: LOGGER.debug('Wallet.create_local_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: (created_did, verkey) = await did.create_and_store_my_did(self.handle, json.dumps(cfg)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.DidAlreadyExistsError: LOGGER.debug('Wallet.create_local_did <!< DID %s already present in wallet %s', loc_did, self.name) raise ExtantRecord('Local DID {} already present in wallet {}'.format(loc_did, self.name)) LOGGER.debug('Wallet.create_local_did <!< indy-sdk raised error %s', x_indy.error_code) raise now = int(time()) loc_did_metadata = {**(metadata or {}), 'since': now, 'modified': now} await did.set_did_metadata(self.handle, created_did, json.dumps(loc_did_metadata)) rv = DIDInfo(created_did, verkey, loc_did_metadata) LOGGER.debug('Wallet.create_local_did <<< %s', rv) return rv
async def create_local_did(self, seed: str = None, loc_did: str = None, metadata: dict = None) -> DIDInfo
Create and store a new local DID for use in pairwise DID relations. :param seed: seed from which to create (default random) :param loc_did: local DID value (default None to let indy-sdk generate) :param metadata: metadata to associate with the local DID (operation always sets 'since', 'modified' epoch timestamps) :return: DIDInfo for new local DID
2.46734
2.261248
1.091141
LOGGER.debug('Wallet.replace_local_did_metadata >>> loc_did: %s, metadata: %s', loc_did, metadata) old = await self.get_local_did(loc_did) # raises exceptions if applicable now = int(time()) loc_did_metadata = {**(metadata or {}), 'since': (old.metadata or {}).get('since', now), 'modified': now} try: await did.set_did_metadata(self.handle, loc_did, json.dumps(loc_did_metadata)) except IndyError as x_indy: LOGGER.debug('Wallet.replace_local_did_metadata <!< indy-sdk raised error %s', x_indy.error_code) raise rv = await self.get_local_did(loc_did) LOGGER.debug('Wallet.replace_local_did_metadata <<< %s', rv) return rv
async def replace_local_did_metadata(self, loc_did: str, metadata: dict) -> DIDInfo
Replace the metadata associated with a local DID. Raise WalletState if wallet is closed, AbsentRecord for no such local DID. :param loc_did: local DID of interest :param metadata: new metadata to store :return: DIDInfo for local DID after write
3.377781
3.244906
1.040949
LOGGER.debug('Wallet.get_local_dids >>>') dids_with_meta = json.loads(did.list_my_dids_with_meta(self.handle)) # list rv = [] for did_with_meta in dids_with_meta: meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {} if meta.get('anchor', False): continue # exclude anchor DIDs past and present rv.append(DIDInfo(did_with_meta['did'], did_with_meta['verkey'], meta)) LOGGER.debug('Wallet.get_local_dids <<< %s', rv) return rv
async def get_local_dids(self) -> Sequence[DIDInfo]
Get list of DIDInfos for local DIDs. :return: list of local DIDInfos
3.373125
3.348016
1.0075
LOGGER.debug('Wallet.get_local_did >>> loc: %s', loc) if not self.handle: LOGGER.debug('Wallet.get_local_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if ok_did(loc): # it's a DID try: did_with_meta = json.loads(await did.get_my_did_with_meta(self.handle, loc)) rv = DIDInfo( did_with_meta['did'], did_with_meta['verkey'], json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}) # nudge None to empty except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.get_local_did <!< DID %s not present in wallet %s', loc, self.name) raise AbsentRecord('Local DID {} not present in wallet {}'.format(loc, self.name)) LOGGER.debug('Wallet.get_local_did <!< indy-sdk raised error %s', x_indy.error_code) raise else: # it's a verkey dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list for did_with_meta in dids_with_meta: if did_with_meta['verkey'] == loc: rv = DIDInfo( did_with_meta['did'], did_with_meta['verkey'], json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}) break else: LOGGER.debug('Wallet.get_local_did <!< Wallet %s has no local DID for verkey %s', self.name, loc) raise AbsentRecord('Wallet {} has no local DID for verkey {}'.format(self.name, loc)) LOGGER.debug('Wallet.get_local_did <<< %s', rv) return rv
async def get_local_did(self, loc: str) -> DIDInfo
Get local DID info by local DID or verification key. Raise AbsentRecord for no such local DID. :param loc: DID or verification key of interest :return: DIDInfo for local DID
2.060932
1.95645
1.053404
LOGGER.debug('Wallet.get_anchor_did >>>') if not self.handle: LOGGER.debug('Wallet.get_anchor_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = None dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list latest = 0 for did_with_meta in dids_with_meta: try: meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {} if not meta.get('anchor', False): continue if isinstance(meta, dict) and meta.get('since', -1) > latest: rv = did_with_meta.get('did') except json.decoder.JSONDecodeError: continue # it's not an anchor DID, carry on LOGGER.debug('Wallet.get_anchor_did <<< %s', rv) return rv
async def get_anchor_did(self) -> str
Get current anchor DID by metadata, None for not yet set. :return: DID
3.295964
3.15876
1.043436
LOGGER.debug('Wallet.create_link_secret >>> label: %s', label) if not self.handle: LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await anoncreds.prover_create_master_secret(self.handle, label) await self._write_link_secret_label(label) except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError: LOGGER.warning( 'Wallet %s link secret already current: abstaining from updating label record', self.name) await self._write_link_secret_label(label) else: LOGGER.debug( 'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s', self.name, x_indy.error_code) raise LOGGER.debug('Wallet.create_link_secret <<<')
async def create_link_secret(self, label: str) -> None
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret
3.14712
2.633736
1.194926
LOGGER.debug('Wallet._write_link_secret_label <<< %s', label) if await self.get_link_secret_label() == label: LOGGER.info('Wallet._write_link_secret_label abstaining - already current') else: await self.write_non_secret(StorageRecord( TYPE_LINK_SECRET_LABEL, label, tags=None, ident=str(int(time())))) # indy requires str LOGGER.debug('Wallet._write_link_secret_label <<<')
async def _write_link_secret_label(self, label) -> None
Update non-secret storage record with link secret label. :param label: link secret label
6.193484
5.314698
1.16535
LOGGER.debug('Wallet.get_link_secret_label >>>') if not self.handle: LOGGER.debug('Wallet.get_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = None records = await self.get_non_secret(TYPE_LINK_SECRET_LABEL) if records: rv = records[str(max(int(k) for k in records))].value # str to int, max, and back again LOGGER.debug('Wallet.get_link_secret_label <<< %s', rv) return rv
async def get_link_secret_label(self) -> str
Get current link secret label from non-secret storage records; return None for no match. :return: latest non-secret storage record for link secret label
5.569408
4.517105
1.23296
LOGGER.debug('Wallet.open >>>') created = False while True: try: self._handle = await wallet.open_wallet( json.dumps(self.config), json.dumps(self.access_creds)) LOGGER.info('Opened wallet %s on handle %s', self.name, self.handle) break except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletNotFoundError: if created: LOGGER.debug('Wallet.open() <!< Wallet %s not found after creation', self.name) raise AbsentWallet('Wallet {} not found after creation'.format(self.name)) if self.auto_create: await self.create() continue else: LOGGER.debug('Wallet.open() <!< Wallet %s not found', self.name) raise AbsentWallet('Wallet {} not found'.format(self.name)) elif x_indy.error_code == ErrorCode.WalletAlreadyOpenedError: LOGGER.debug('Wallet.open() <!< Wallet %s is already open', self.name) raise WalletState('Wallet {} is already open'.format(self.name)) elif x_indy.error_code == ErrorCode.WalletAccessFailed: LOGGER.debug('Wallet.open() <!< Bad access credentials value for wallet %s', self.name) raise BadAccess('Bad access credentials value for wallet {}'.format(self.name)) LOGGER.debug('Wallet %s open raised indy error %s', self.name, x_indy.error_code) raise self.did = await self.get_anchor_did() self.verkey = await did.key_for_local_did(self.handle, self.did) if self.did else None LOGGER.info('Wallet %s got verkey %s for existing DID %s', self.name, self.verkey, self.did) LOGGER.debug('Wallet.open <<<') return self
async def open(self) -> 'Wallet'
Explicit entry. Open wallet as configured, for later closure via close(). For use when keeping wallet open across multiple calls. Raise any IndyError causing failure to open wallet, WalletState if wallet already open, or AbsentWallet on attempt to enter wallet not yet created. :return: current object
2.348415
2.220572
1.057573
LOGGER.debug('Wallet.create >>>') try: await wallet.create_wallet( config=json.dumps(self.config), credentials=json.dumps(self.access_creds)) LOGGER.info('Created wallet %s', self.name) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletAlreadyExistsError: LOGGER.debug('Wallet.create <!< Wallet %s already exists', self.name) raise ExtantWallet('Wallet {} already exists'.format(self.name)) LOGGER.debug( 'Wallet.create <!< indy error code %s on creation of wallet %s', x_indy.error_code, self.name) raise auto_remove = self.auto_remove self.auto_remove = False # defer past this creation process async with self: did_info = await self.create_local_did( self._von_config.get('seed', None), self._von_config.get('did', None), {'anchor': True}) self.did = did_info.did self.verkey = did_info.verkey if 'link_secret_label' in self._von_config: await self.create_link_secret(self._von_config['link_secret_label']) self.auto_remove = auto_remove LOGGER.debug('Wallet.create <<<')
async def create(self) -> None
Persist the wallet. Raise ExtantWallet if it already exists. Actuators should prefer WalletManager.create() to calling this method directly - the wallet manager filters wallet configuration through preset defaults.
3.281208
3.006346
1.091427
LOGGER.debug('Wallet.close >>>') if not self.handle: LOGGER.warning('Abstaining from closing wallet %s: already closed', self.name) else: LOGGER.debug('Closing wallet %s', self.name) await wallet.close_wallet(self.handle) self._handle = None if self.auto_remove: LOGGER.info('Automatically removing wallet %s', self.name) await self.remove() self._handle = None LOGGER.debug('Wallet.close <<<')
async def close(self) -> None
Explicit exit. Close wallet (and delete if so configured).
3.755757
3.203277
1.172473
LOGGER.debug('Wallet.remove >>>') if self.handle: LOGGER.debug('Wallet.remove <!< Wallet %s is open', self.name) raise WalletState('Wallet {} is open'.format(self.name)) rv = True try: LOGGER.info('Attempting to remove wallet: %s', self.name) await wallet.delete_wallet( json.dumps(self.config), json.dumps(self.access_creds)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletNotFoundError: LOGGER.info('Wallet %s not present; abstaining from removal', self.name) else: LOGGER.info('Failed wallet %s removal; indy-sdk error code %s', self.name, x_indy.error_code) rv = False LOGGER.debug('Wallet.remove <<< %s', rv) return rv
async def remove(self) -> bool
Remove serialized wallet, best effort, if it exists. Return whether wallet absent after operation (removal successful or else not present a priori). Raise WalletState if wallet is open. :return: whether wallet gone from persistent storage
3.582574
3.045738
1.176258
LOGGER.debug( 'Wallet.write_pairwise >>> their_did: %s, their_verkey: %s, my_did: %s, metadata: %s, replace_meta: %s', their_did, their_verkey, my_did, metadata, replace_meta) if their_verkey is None: match = await self.get_pairwise(their_did) if not match: LOGGER.debug( 'Wallet.write_pairwise <!< Wallet %s has no pairwise DID on %s to update', self.name, their_did) raise AbsentRecord('Wallet {} has no pairwise DID on {} to update'.format(self.name, their_did)) their_verkey = [pwise for pwise in match.values()][0].their_verkey try: await did.store_their_did(self.handle, json.dumps({'did': their_did, 'verkey': their_verkey})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemAlreadyExists: pass # exists already, carry on else: LOGGER.debug( 'Wallet.write_pairwise <!< Wallet %s write of their_did %s raised indy error code %s', self.name, their_did, x_indy.error_code) raise if my_did: my_did_info = await self.get_local_did(my_did) # raises AbsentRecord if no such local did else: my_did_info = await self.create_local_did(None, None, {'pairwise_for': their_did}) pairwise = PairwiseInfo(their_did, their_verkey, my_did_info.did, my_did_info.verkey, metadata) try: storec = await self.write_non_secret( StorageRecord(TYPE_PAIRWISE, their_verkey, tags=pairwise_info2tags(pairwise), ident=their_did), replace_meta) except BadRecord: LOGGER.debug( 'Wallet.write_pairwise <!< Pairwise metadata %s does not coerce into flat {str:str} tags dict', pairwise.metadata) raise rv = storage_record2pairwise_info(storec) LOGGER.debug('Wallet.write_pairwise <<< %s', rv) return rv
async def write_pairwise( self, their_did: str, their_verkey: str = None, my_did: str = None, metadata: dict = None, replace_meta: bool = False) -> PairwiseInfo
Store a pairwise DID for a secure connection. Use verification key for local DID in wallet if supplied; otherwise, create one first. If local DID specified but not present, raise AbsentRecord. With supplied metadata, replace or augment and overwrite any existing metadata for the pairwise relation if one already exists in the wallet. Always include local and remote DIDs and keys in metadata to allow for WQL search. Raise AbsentRecord on call to update a non-existent record. Raise BadRecord if metadata does not coerce into non-secrets API tags specification {str:str}. :param their_did: remote DID :param their_verkey: remote verification key (default None is OK if updating an existing pairwise DID) :param my_did: local DID :param metadata: metadata for pairwise connection :param replace_meta: whether to (True) replace or (False) augment and overwrite existing metadata :return: resulting PairwiseInfo
2.828482
2.583862
1.094672
LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did) if not ok_did(their_did): LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did) raise BadIdentifier('Bad DID {}'.format(their_did)) await self.delete_non_secret(TYPE_PAIRWISE, their_did) LOGGER.debug('Wallet.delete_pairwise <<<')
async def delete_pairwise(self, their_did: str) -> None
Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove
3.357203
2.97092
1.130021
LOGGER.debug('Wallet.get_pairwise >>> pairwise_filt: %s', pairwise_filt) if not self.handle: LOGGER.debug('Wallet.get_pairwise <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) storecs = await self.get_non_secret( TYPE_PAIRWISE, pairwise_filt if ok_did(pairwise_filt) or not pairwise_filt else json.loads(pairwise_filt), canon_pairwise_wql) rv = {k: storage_record2pairwise_info(storecs[k]) for k in storecs} # touch up tags, mute leading ~ LOGGER.debug('Wallet.get_pairwise <<< %s', rv) return rv
async def get_pairwise(self, pairwise_filt: str = None) -> dict
Return dict mapping each pairwise DID of interest in wallet to its pairwise info, or, for no filter specified, mapping them all. If wallet has no such item, return empty dict. :param pairwise_filt: remote DID of interest, or WQL json (default all) :return: dict mapping remote DIDs to PairwiseInfo
6.418128
5.397105
1.18918
LOGGER.debug('Wallet.write_non_secret >>> storec: %s, replace_meta: %s', storec, replace_meta) if not self.handle: LOGGER.debug('Wallet.write_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not StorageRecord.ok_tags(storec.tags): LOGGER.debug('Wallet.write_non_secret <!< bad storage record tags %s; use flat {str: str} dict', storec) raise BadRecord('Bad storage record tags {}; use flat {{str:str}} dict'.format(storec)) try: record = json.loads(await non_secrets.get_wallet_record( self.handle, storec.type, storec.id, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True }))) if record['value'] != storec.value: await non_secrets.update_wallet_record_value( self.handle, storec.type, storec.id, storec.value) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: await non_secrets.add_wallet_record( self.handle, storec.type, storec.id, storec.value, json.dumps(storec.tags) if storec.tags else None) else: LOGGER.debug( 'Wallet.write_non_secret <!< Wallet lookup raised indy error code %s', x_indy.error_code) raise else: if (record['tags'] or None) != storec.tags: # record maps no tags to {}, not None tags = (storec.tags or {}) if replace_meta else {**record['tags'], **(storec.tags or {})} await non_secrets.update_wallet_record_tags( self.handle, storec.type, storec.id, json.dumps(tags)) # indy-sdk takes '{}' instead of None for null tags record = json.loads(await non_secrets.get_wallet_record( self.handle, storec.type, storec.id, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True }))) rv = StorageRecord(storec.type, record['value'], tags=record.get('tags', None), ident=record['id']) LOGGER.debug('Wallet.write_non_secret <<< %s', rv) return rv
async def write_non_secret(self, storec: StorageRecord, replace_meta: bool = False) -> StorageRecord
Add or update non-secret storage record to the wallet; return resulting wallet non-secret record. :param storec: non-secret storage record :param replace_meta: whether to replace any existing metadata on matching record or to augment it :return: non-secret storage record as it appears in the wallet after write
2.595328
2.494882
1.040261
LOGGER.debug('Wallet.delete_non_secret >>> typ: %s, ident: %s', typ, ident) if not self.handle: LOGGER.debug('Wallet.delete_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await non_secrets.delete_wallet_record(self.handle, typ, ident) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.info('Wallet.delete_non_secret <!< no record for type %s on identifier %s', typ, ident) else: LOGGER.debug( 'Wallet.delete_non_secret <!< deletion of %s record on identifier %s raised indy error code %s', typ, ident, x_indy.error_code) raise LOGGER.debug('Wallet.delete_non_secret <<<')
async def delete_non_secret(self, typ: str, ident: str) -> None
Remove a non-secret record by its type and identifier. Silently return if no such record is present. Raise WalletState for closed wallet. :param typ: non-secret storage record type :param ident: non-secret storage record identifier
2.668588
2.30108
1.159711
LOGGER.debug('Wallet.get_non_secret >>> typ: %s, filt: %s, canon_wql: %s', typ, filt, canon_wql) if not self.handle: LOGGER.debug('Wallet.get_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) records = [] if isinstance(filt, str): # ordinary lookup by value try: records = [json.loads(await non_secrets.get_wallet_record( self.handle, typ, filt, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True })))] except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: pass else: LOGGER.debug( 'Wallet.get_non_secret <!< Wallet %s lookup raised indy exception %s', self.name, x_indy.error_code) raise else: canon = canon_wql or canon_non_secret_wql s_handle = await non_secrets.open_wallet_search( self.handle, typ, json.dumps(canon(filt or {})), json.dumps({ 'retrieveRecords': True, 'retrieveTotalCount': True, 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True })) records = [] cardinality = int(json.loads( await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, 0))['totalCount']) chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK) if limit: cardinality = min(limit, cardinality) try: while len(records) != cardinality: batch = json.loads( await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, chunk))['records'] records.extend(batch) if len(batch) < chunk: break if len(records) != cardinality: LOGGER.warning( 'Non-secret search/limit indicated %s results but fetched %s', cardinality, len(records)) finally: await non_secrets.close_wallet_search(s_handle) rv = {record['id']: StorageRecord(typ, record['value'], record['tags'], record['id']) for record in records} LOGGER.debug('Wallet.get_non_secret <<< %s', rv) return rv
async def get_non_secret( self, typ: str, filt: Union[dict, str] = None, canon_wql: Callable[[dict], dict] = None, limit: int = None) -> dict
Return dict mapping each non-secret storage record of interest by identifier or, for no filter specified, mapping them all. If wallet has no such item, return empty dict. :param typ: non-secret storage record type :param filt: non-secret storage record identifier or WQL json (default all) :param canon_wql: WQL canonicalization function (default von_anchor.canon.canon_non_secret_wql()) :param limit: maximum number of results to return (default no limit) :return: dict mapping identifiers to non-secret storage records
2.868724
2.676671
1.071751
LOGGER.debug( 'Wallet.encrypt >>> message: %s, authn: %s, to_verkey: %s, from_verkey: %s', message, authn, to_verkey, from_verkey) if not message: LOGGER.debug('Wallet.encrypt <!< No message to encrypt') raise AbsentMessage('No message to encrypt') if not self.handle: LOGGER.debug('Wallet.encrypt <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if authn: rv = await crypto.auth_crypt(self.handle, from_verkey or self.verkey, to_verkey or self.verkey, message) else: rv = await crypto.anon_crypt(to_verkey or self.verkey, message) LOGGER.debug('Wallet.auth_encrypt <<< %s', rv) return rv
async def encrypt( self, message: bytes, authn: bool = False, to_verkey: str = None, from_verkey: str = None) -> bytes
Encrypt plaintext for owner of DID, anonymously or via authenticated encryption scheme. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: plaintext, as bytes :param authn: whether to use authenticated encryption scheme :param to_verkey: verification key of recipient, None for anchor's own :param from_verkey: verification key of sender for authenticated encryption, None for anchor's own :return: ciphertext, as bytes
2.314026
1.979817
1.168808
LOGGER.debug( 'Wallet.decrypt >>> ciphertext: %s, authn_check: %s, to_verkey: %s, from_verkey: %s', ciphertext, authn_check, to_verkey, from_verkey) if not ciphertext: LOGGER.debug('Wallet.decrypt <!< No ciphertext to decrypt') raise AbsentMessage('No ciphertext to decrypt') if not self.handle: LOGGER.debug('Wallet.decrypt <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) sender_verkey = None if authn_check is None: plaintext = await crypto.anon_decrypt(self.handle, to_verkey or self.verkey, ciphertext) else: (sender_verkey, plaintext) = await crypto.auth_decrypt(self.handle, to_verkey or self.verkey, ciphertext) if authn_check and sender_verkey != (from_verkey or self.verkey): LOGGER.debug('Wallet.decrypt <!< Authentication revealed unexpected sender key on decryption') raise BadKey('Authentication revealed unexpected sender key on decryption') rv = (plaintext, sender_verkey) LOGGER.debug('Wallet.decrypt <<< %s', rv) return rv
async def decrypt( self, ciphertext: bytes, authn_check: bool = None, to_verkey: str = None, from_verkey: str = None) -> (bytes, str)
Decrypt ciphertext and optionally authenticate sender. Raise BadKey if authentication operation checks and reveals sender key distinct from input sender verification key. Raise AbsentMessage for missing ciphertext, or WalletState if wallet is closed. :param ciphertext: ciphertext, as bytes :param authn_check: True to authenticate and check sender verification key, False to authenticate and return sender verification key for client to decide fitness, or None to use anonymous decryption :param to_verkey: recipient verification key, default anchor's own :param from_verkey: sender verification key, ignored for anonymous decryption, default anchor's own for authenticated decryption :return: decrypted bytes and sender verification key (None for anonymous decryption)
2.315295
1.970538
1.174955
LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey) if not message: LOGGER.debug('Wallet.sign <!< No message to sign') raise AbsentMessage('No message to sign') if not self.handle: LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message) LOGGER.debug('Wallet.sign <<< %s', rv) return rv
async def sign(self, message: bytes, verkey: str = None) -> bytes
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: Content to sign, as bytes :param verkey: verification key corresponding to private signing key (default anchor's own) :return: signature, as bytes
3.016111
2.332914
1.292851
LOGGER.debug('Wallet.verify >>> message: %s, signature: %s, verkey: %s', message, signature, verkey) if not message: LOGGER.debug('Wallet.verify <!< No message to verify') raise AbsentMessage('No message to verify') if not signature: LOGGER.debug('Wallet.verify <!< No signature to verify') raise AbsentMessage('No signature to verify') if not self.handle: LOGGER.debug('Wallet.verify <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await crypto.crypto_verify(verkey or self.verkey, message, signature) LOGGER.debug('Wallet.verify <<< %s', rv) return rv
async def verify(self, message: bytes, signature: bytes, verkey: str = None) -> bool
Verify signature against input signer verification key (default anchor's own). Raise AbsentMessage for missing message or signature, or WalletState if wallet is closed. :param message: Content to sign, as bytes :param signature: signature, as bytes :param verkey: signer verification key (default for anchor's own) :return: whether signature is valid
2.459748
2.050442
1.199618
LOGGER.debug( 'Wallet.pack >>> message: %s, recip_verkeys: %s, sender_verkey: %s', message, recip_verkeys, sender_verkey) if message is None: LOGGER.debug('Wallet.pack <!< No message to pack') raise AbsentMessage('No message to pack') rv = await crypto.pack_message( self.handle, message, [recip_verkeys] if isinstance(recip_verkeys, str) else list(recip_verkeys or [self.verkey]), sender_verkey) LOGGER.debug('Wallet.pack <<< %s', rv) return rv
async def pack( self, message: str, recip_verkeys: Union[str, Sequence[str]] = None, sender_verkey: str = None) -> bytes
Pack a message for one or more recipients (default anchor only). Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: message to pack :param recip_verkeys: verification keys of recipients (default anchor's own, only) :param sender_verkey: sender verification key (default anonymous encryption) :return: packed message
2.669831
2.234544
1.194799
LOGGER.debug('Wallet.unpack >>> ciphertext: %s', ciphertext) if not ciphertext: LOGGER.debug('Wallet.pack <!< No ciphertext to unpack') raise AbsentMessage('No ciphertext to unpack') try: unpacked = json.loads(await crypto.unpack_message(self.handle, ciphertext)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.unpack <!< Wallet %s has no local key to unpack ciphertext', self.name) raise AbsentRecord('Wallet {} has no local key to unpack ciphertext'.format(self.name)) LOGGER.debug('Wallet.unpack <!< Wallet %s unpack() raised indy error code {}', x_indy.error_code) raise rv = (unpacked['message'], unpacked.get('sender_verkey', None), unpacked.get('recipient_verkey', None)) LOGGER.debug('Wallet.unpack <<< %s', rv) return rv
async def unpack(self, ciphertext: bytes) -> (str, str, str)
Unpack a message. Return triple with cleartext, sender verification key, and recipient verification key. Raise AbsentMessage for missing ciphertext, or WalletState if wallet is closed. Raise AbsentRecord if wallet has no key to unpack ciphertext. :param ciphertext: JWE-like formatted message as pack() produces :return: cleartext, sender verification key, recipient verification key
3.338061
2.609951
1.278974
LOGGER.debug('Wallet.reseed_init >>> next_seed: [SEED]') if not self.handle: LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await did.replace_keys_start(self.handle, self.did, json.dumps({'seed': next_seed} if next_seed else {})) LOGGER.debug('Wallet.reseed_init <<< %s', rv) return rv
async def reseed_init(self, next_seed: str = None) -> str
Begin reseed operation: generate new key. Raise WalletState if wallet is closed. :param next_seed: incoming replacement seed (default random) :return: new verification key
5.006967
3.63145
1.378779
LOGGER.debug('Wallet.reseed_apply >>>') if not self.handle: LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) await did.replace_keys_apply(self.handle, self.did) self.verkey = await did.key_for_local_did(self.handle, self.did) now = int(time()) rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now}) await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata)) LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did) LOGGER.debug('Wallet.reseed_apply <<< %s', rv) return rv
async def reseed_apply(self) -> DIDInfo
Replace verification key with new verification key from reseed operation. Raise WalletState if wallet is closed. :return: DIDInfo with new verification key and metadata for DID
3.725185
3.313556
1.124226
LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json) schema_data = json.loads(schema_data_json) for attr in schema_data['attr_names']: if not (re.match(r'(?=[^- ])[-_a-zA-Z0-9 ]+(?<=[^- ])$', attr)) or attr.strip().lower() == 'hash': LOGGER.debug('Origin.send_schema <!< Bad attribute name [%s]', attr) raise BadAttribute('Bad attribute name [{}]'.format(attr)) s_id = schema_id(self.did, schema_data['name'], schema_data['version']) s_key = schema_key(s_id) rv_json = None with SCHEMA_CACHE.lock: try: rv_json = await self.get_schema(s_key) LOGGER.error( 'Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) except AbsentSchema: # OK - about to create and send it (_, schema_json) = await anoncreds.issuer_create_schema( self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names'])) req_json = await ledger.build_schema_request(self.did, schema_json) await self._sign_submit(req_json) for _ in range(16): # reasonable timeout try: rv_json = await self.get_schema(s_key) # adds to cache break except AbsentSchema: await sleep(1) LOGGER.info('Sent schema %s to ledger, waiting 1s for its appearance', s_id) if not rv_json: LOGGER.debug('Origin.send_schema <!< timed out waiting on sent schema %s', s_id) raise BadLedgerTxn('Timed out waiting on sent schema {}'.format(s_id)) LOGGER.debug('Origin.send_schema <<< %s', rv_json) return rv_json
async def send_schema(self, schema_data_json: str) -> str
Send schema to ledger, then retrieve it as written to the ledger and return it. Raise BadLedgerTxn on failure. Raise BadAttribute for attribute name with spaces or reserved for indy-sdk. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori)
3.34237
2.795847
1.195477
LOGGER.debug('NominalAnchor.least_role >>>') rv = Role.USER LOGGER.debug('NominalAnchor.least_role <<< %s', rv) return rv
def least_role() -> Role
Return the indy-sdk null role for a tails sync anchor, which does not need write access. :return: USER role
10.815067
6.618578
1.634047
LOGGER.debug('OrgHubAnchor.close >>>') archive_caches = False if self.config.get('archive-holder-prover-caches-on-close', False): archive_caches = True await self.load_cache_for_proof(False) if self.config.get('archive-verifier-caches-on-close', {}): archive_caches = True await self.load_cache_for_verification(False) if archive_caches: ArchivableCaches.archive(self.dir_cache) ArchivableCaches.purge_archives(self.dir_cache, True) # Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes # await self.wallet.close() #1.7.8 # Do not close pool independently: let relying party decide when to go on-line and off-line for path_rr_id in Tails.links(self._dir_tails): rr_id = basename(path_rr_id) try: await HolderProver._sync_revoc_for_proof(self, rr_id) except ClosedPool: LOGGER.warning('OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed', rr_id) LOGGER.debug('OrgHubAnchor.close <<<')
async def close(self) -> None
Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object
8.071318
7.23921
1.114945
for k in [qk for qk in query]: # copy: iteration alters query keys attr_match = re.match('attr::([^:]+)::(marker|value)$', k) if isinstance(query[k], dict): # only subqueries are dicts: recurse query[k] = canon_cred_wql(query[k]) if k == '$or': if not isinstance(query[k], list): raise BadWalletQuery('Bad WQL; $or value must be a list in {}'.format(json.dumps(query))) query[k] = [canon_cred_wql(subq) for subq in query[k]] if attr_match: qkey = 'attr::{}::{}'.format(canon(attr_match.group(1)), canon(attr_match.group(2))) query[qkey] = query.pop(k) tag_value = query[qkey] if isinstance(tag_value, dict) and len(tag_value) == 1: if '$in' in tag_value: tag_value['$in'] = [raw(val) for val in tag_value.pop('$in')] else: wql_op = set(tag_value.keys()).pop() # $neq, $gt, $gte, etc. tag_value[wql_op] = raw(tag_value[wql_op]) else: # equality query[qkey] = raw(query[qkey]) return query
def canon_cred_wql(query: dict) -> dict
Canonicalize WQL attribute marker and value keys for input to indy-sdk wallet credential filtration. Canonicalize comparison values to proper indy-sdk raw values as per raw(). Raise BadWalletQuery for WQL mapping '$or' to non-list. :param query: WQL query :return: canonicalized WQL query dict
3.602821
3.18666
1.130595
if not query: return { '~their_did': { '$neq': '' } } for k in [qk for qk in query]: # copy: iteration alters query keys if isinstance(query[k], dict): # only subqueries are dicts: recurse query[k] = canon_pairwise_wql(query[k]) if k == '$or': if not isinstance(query[k], list): raise BadWalletQuery('Bad WQL; $or value must be a list in {}'.format(json.dumps(query))) query[k] = [canon_pairwise_wql(subq) for subq in query[k]] elif k == '$not': query[k] = canon_pairwise_wql(query.pop(k)) elif k not in WQL_1_OPS: qkey = canon_pairwise_tag(k) query[qkey] = query.pop(k) tag_value = query[qkey] if isinstance(tag_value, dict) and len(tag_value) == 1: if '$in' in tag_value: tag_value['$in'] = [raw(val) for val in tag_value['$in']] else: wql_op = set(tag_value.keys()).pop() # $neq, $gt, $gt, etc. tag_value[wql_op] = raw(tag_value[wql_op]) else: query[qkey] = raw(query.pop(qkey)) return query
def canon_pairwise_wql(query: dict = None) -> dict
Canonicalize WQL tags to unencrypted storage specification. Canonicalize comparison values to strings via raw(). Raise BadWalletQuery for WQL mapping '$or' to non-list. :param query: WQL query :return: canonicalized WQL query dict
3.405453
2.935706
1.160012
try: with open(filename) as fh_req: return [line.strip() for line in fh_req if line.strip() and not line.startswith('#')] except FileNotFoundError: print('File not found: {}'.format(realpath(filename)), file=stderr) raise
def parse_requirements(filename)
Load requirements from a pip requirements file. :param filename: file name with requirements to parse
3.504601
3.612953
0.97001
LOGGER.debug('NodePoolManager.__init__ >>> name: %s, genesis: %s', name, genesis) if name in await self.list(): LOGGER.debug('NodePoolManager.add_config: <!< Node pool %s configuration already present', name) raise ExtantPool('Node pool {} configuration already present'.format(name)) genesis_tmp = None path_gen = realpath(expanduser(expandvars(genesis))) try: if not isfile(path_gen): genesis_tmp = NamedTemporaryFile(mode='w+b', buffering=0, delete=False) with genesis_tmp: genesis_tmp.write(genesis.encode()) await pool.create_pool_ledger_config( name, json.dumps({ 'genesis_txn': path_gen if isfile(path_gen) else genesis_tmp.name })) finally: if genesis_tmp: remove(genesis_tmp.name) LOGGER.debug('NodePoolManager.__init__ <<<')
async def add_config(self, name: str, genesis: str = None) -> None
Given pool name and genesis transaction path or data, add node pool configuration to indy home directory. Raise ExtantPool if node pool configuration on input name already exists. :param name: pool name :param genesis: genesis transaction path or raw data
3.459804
2.842484
1.217176
LOGGER.debug('NodePoolManager.list >>>') rv = [p['pool'] for p in await pool.list_pools()] LOGGER.debug('NodePoolManager.list <<< %s', rv) return rv
async def list(self) -> List[str]
Return list of pool names configured, empty list for none. :return: list of pool names.
7.092717
5.873394
1.207601
LOGGER.debug('NodePoolManager.node_pool >>>') rv = NodePool(name, self.protocol, config) LOGGER.debug('NodePoolManager.node_pool <<< %s', rv) return rv
def get(self, name: str, config: dict = None) -> NodePool
Return node pool in input name and optional configuration. :param name: name of configured pool :param config: pool configuration with optional 'timeout' int, 'extended_timeout' int, 'preordered_nodes' array of strings :return: node pool
6.15786
6.619282
0.930291
LOGGER.debug('NodePoolManager.remove >>> name: %s', name) try: await pool.delete_pool_ledger_config(name) except IndyError as x_indy: LOGGER.info('Abstaining from node pool removal; indy-sdk error code %s', x_indy.error_code) LOGGER.debug('NodePool.remove <<<')
async def remove(self, name: str) -> None
Remove serialized pool info if it exists. Abstain from removing open node pool.
7.022555
4.92781
1.425086
return {k: self._pubkey[k] for k in self._pubkey if self._pubkey[k].authn}
def authnkey(self) -> dict
Accessor for public keys marked as authentication keys, by identifier.
7.909725
4.324795
1.828925
if isinstance(item, Service): self.service[item.id] = item elif isinstance(item, PublicKey): self.pubkey[item.id] = item else: raise BadDIDDocItem('Cannot add item {} to DIDDoc on DID {}'.format(item, self.did))
def set(self, item: Union[Service, PublicKey]) -> 'DIDDoc'
Add or replace service or public key; return current DIDDoc. Raise BadDIDDocItem if input item is neither service nor public key. :param item: service or public key to set :return: current DIDDoc
3.024364
2.685719
1.126091
return { '@context': DIDDoc.CONTEXT, 'id': canon_ref(self.did, self.did), 'publicKey': [pubkey.to_dict() for pubkey in self.pubkey.values()], 'authentication': [{ 'type': pubkey.type.authn_type, 'publicKey': canon_ref(self.did, pubkey.id) } for pubkey in self.pubkey.values() if pubkey.authn], 'service': [service.to_dict() for service in self.service.values()] }
def serialize(self) -> str
Dump current object to a JSON-compatible dictionary. :return: dict representation of current DIDDoc
3.357543
3.025607
1.109709
rv = [] for tag in [tags] if isinstance(tags, str) else list(tags): for svc_key in service.get(tag, {}): canon_key = canon_ref(self.did, svc_key) pubkey = None if '#' in svc_key: if canon_key in self.pubkey: pubkey = self.pubkey[canon_key] else: # service key refers to another DID doc LOGGER.debug( 'DIDDoc.add_service_pubkeys <!< DID document %s has no public key %s', self.did, svc_key) raise AbsentDIDDocItem('DID document {} has no public key {}'.format(self.did, svc_key)) else: for existing_pubkey in self.pubkey.values(): if existing_pubkey.value == svc_key: pubkey = existing_pubkey break else: pubkey = PublicKey( self.did, ident=svc_key[-9:-1], # industrial-grade uniqueness value=svc_key) self._pubkey[pubkey.id] = pubkey if pubkey and pubkey not in rv: # perverse case: could specify same key multiple ways; append once rv.append(pubkey) return rv
def add_service_pubkeys(self, service: dict, tags: Union[Sequence[str], str]) -> List[PublicKey]
Add public keys specified in service. Return public keys so discovered. Raise AbsentDIDDocItem for public key reference not present in DID document. :param service: service from DID document :param tags: potential tags marking public keys of type of interest - the standard is still coalescing :return: list of public keys that service specification in DID document identifies.
4.433198
3.907525
1.134528
rv = None if 'id' in did_doc: rv = DIDDoc(did_doc['id']) else: # get DID to serve as DID document identifier from first public key if 'publicKey' not in did_doc: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: pubkey_did = canon_did(resource(pubkey['id'])) if ok_did(pubkey_did): rv = DIDDoc(pubkey_did) break else: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: # include public keys and authentication keys by reference pubkey_type = PublicKeyType.get(pubkey['type']) authn = any( canon_ref(rv.did, ak.get('publicKey', '')) == canon_ref(rv.did, pubkey['id']) for ak in did_doc.get('authentication', {}) if isinstance(ak.get('publicKey', None), str)) key = PublicKey( # initialization canonicalizes id rv.did, pubkey['id'], pubkey[pubkey_type.specifier], pubkey_type, canon_did(pubkey['controller']), authn) rv.pubkey[key.id] = key for akey in did_doc.get('authentication', {}): # include embedded authentication keys pk_ref = akey.get('publicKey', None) if pk_ref: pass # got it already with public keys else: pubkey_type = PublicKeyType.get(akey['type']) key = PublicKey( # initialization canonicalized id rv.did, akey['id'], akey[pubkey_type.specifier], pubkey_type, canon_did(akey['controller']), True) rv.pubkey[key.id] = key for service in did_doc.get('service', {}): endpoint = service['serviceEndpoint'] svc = Service( # initialization canonicalizes id rv.did, service.get('id', canon_ref(rv.did, 'assigned-service-{}'.format(len(rv.service)), ';')), service['type'], rv.add_service_pubkeys(service, 'recipientKeys'), rv.add_service_pubkeys(service, ['mediatorKeys', 'routingKeys']), canon_ref(rv.did, endpoint, ';') if ';' in endpoint else endpoint, service.get('priority', None)) rv.service[svc.id] = svc return rv
def deserialize(cls, did_doc: dict) -> 'DIDDoc'
Construct DIDDoc object from dict representation. Raise BadIdentifier for bad DID. :param did_doc: DIDDoc dict reprentation. :return: DIDDoc from input json.
3.409677
3.433169
0.993157
return Protocol.V_13 if version == Protocol.V_13.value.name else Protocol.DEFAULT
def get(version: str) -> 'Protocol'
Return enum instance corresponding to input version value ('1.6' etc.)
11.233223
9.489699
1.183728
if for_box_id: return '' if self == Protocol.V_13 else ':tag' return 'tag'
def cd_id_tag(self, for_box_id: bool = False) -> str
Return (place-holder) credential definition identifier tag for current version of node protocol. At present, von_anchor always uses the tag of 'tag' if the protocol calls for one. :param for_box_id: whether to prefix a colon, if current protocol uses one, in constructing a cred def id or rev reg id. :return: cred def id tag
20.305996
12.035062
1.687237
return '{}:3:CL:{}{}'.format( # 3 marks indy cred def id, CL is sig type issuer_did, schema_seq_no, self.cd_id_tag(True))
def cred_def_id(self, issuer_did: str, schema_seq_no: int) -> str
Return credential definition identifier for input issuer DID and schema sequence number. :param issuer_did: DID of credential definition issuer :param schema_seq_no: schema sequence number :return: credential definition identifier
14.277624
14.242385
1.002474
rv = None if self == Protocol.V_13: rv = SchemaKey(txn['identifier'], txn['data']['name'], txn['data']['version']) else: txn_txn = txn.get('txn', None) or txn # may have already run this txn through txn2data() below rv = SchemaKey( txn_txn['metadata']['from'], txn_txn['data']['data']['name'], txn_txn['data']['data']['version']) return rv
def txn_data2schema_key(self, txn: dict) -> SchemaKey
Return schema key from ledger transaction data. :param txn: get-schema transaction (by sequence number) :return: schema key identified
5.604516
5.412226
1.035529
rv_json = json.dumps({}) if self == Protocol.V_13: rv_json = json.dumps(txn['result'].get('data', {})) else: rv_json = json.dumps((txn['result'].get('data', {}) or {}).get('txn', {})) # "data": null for no such txn return rv_json
def txn2data(self, txn: dict) -> str
Given ledger transaction, return its data json. :param txn: transaction as dict :return: transaction data json
6.761711
6.129995
1.103053
rv = None if self == Protocol.V_13: rv = txn['result']['txnTime'] else: rv = txn['result']['txnMetadata']['txnTime'] return rv
def txn2epoch(self, txn: dict) -> int
Given ledger transaction, return its epoch time. :param txn: transaction as dict :return: transaction time
8.283708
6.870111
1.20576
txn_data = genesis_txn['data'] if self == Protocol.V_13 else genesis_txn['txn']['data']['data'] return (txn_data['node_ip'], txn_data['node_port'])
def genesis_host_port(self, genesis_txn: dict) -> tuple
Given a genesis transaction, return its node host and port. :param genesis_txn: genesis transaction as dict :return: node host and port
6.460598
6.045745
1.068619
LOGGER.debug('Tails.open >>>') self._reader_handle = await blob_storage.open_reader('default', self._tails_config_json) LOGGER.debug('Tails.open <<<') return self
async def open(self) -> 'Tails'
Open reader handle and return current object. :return: current object
7.546237
6.538414
1.154139
LOGGER.debug('Tails.ok_hash >>> token: %s', token) rv = re.match('[{}]{{42,44}}$'.format(B58), token) is not None LOGGER.debug('Tails.ok_hash <<< %s', rv) return rv
def ok_hash(token: str) -> bool
Whether input token looks like a valid tails hash. :param token: candidate string :return: whether input token looks like a valid tails hash
7.329083
5.064758
1.447075
LOGGER.debug('Tails.associate >>> base_dir: %s, rr_id: %s, tails_hash: %s', base_dir, rr_id, tails_hash) if not ok_rev_reg_id(rr_id): LOGGER.debug('Tails.associate <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) if not Tails.ok_hash(tails_hash): LOGGER.debug('Tails.associate <!< Bad tails hash %s', tails_hash) raise BadIdentifier('Bad tails hash {}'.format(tails_hash)) cd_id = rev_reg_id2cred_def_id(rr_id) directory = join(base_dir, cd_id) cwd = getcwd() makedirs(directory, exist_ok=True) chdir(directory) symlink(tails_hash, rr_id) chdir(cwd) LOGGER.debug('Tails.associate <<<')
def associate(base_dir: str, rr_id: str, tails_hash: str) -> None
Create symbolic link to tails file named tails_hash for rev reg id rr_id. :param rr_id: rev reg id :param tails_hash: hash of tails file, serving as file name
2.435398
2.187264
1.113445
LOGGER.debug('Tails.dir >>> base_dir: %s, rr_id: %s', base_dir, rr_id) if not ok_rev_reg_id(rr_id): LOGGER.debug('Tails.dir <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) rv = join(base_dir, rev_reg_id2cred_def_id(rr_id)) LOGGER.debug('Tails.dir <<< %s', rv) return rv
def dir(base_dir: str, rr_id: str) -> str
Return correct subdirectory of input base dir for artifacts corresponding to input rev reg id. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id
3.327741
2.526316
1.317231
LOGGER.debug('Tails.linked >>> base_dir: %s, rr_id: %s', base_dir, rr_id) if not ok_rev_reg_id(rr_id): LOGGER.debug('Tails.linked <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) cd_id = rev_reg_id2cred_def_id(rr_id) link = join(base_dir, cd_id, rr_id) rv = join(base_dir, cd_id, readlink(link)) if islink(link) else None LOGGER.debug('Tails.linked <<< %s', rv) return rv
def linked(base_dir: str, rr_id: str) -> str
Get, from the specified directory, the path to the tails file associated with the input revocation registry identifier, or None for no such file. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id :return: (stringified) path to tails file of interest, or None for no such file.
3.48223
2.639282
1.319385
LOGGER.debug('Tails.links >>> base_dir: %s, issuer_did: %s', base_dir, issuer_did) if issuer_did and not ok_did(issuer_did): LOGGER.debug('Tails.links <!< Bad DID %s', issuer_did) raise BadIdentifier('Bad DID {}'.format(issuer_did)) rv = set() for dir_path, dir_names, file_names in walk(base_dir, topdown=True): dir_names[:] = [d for d in dir_names if not d.startswith('.')] for file_name in file_names: if islink(join(dir_path, file_name)) and (not issuer_did or ok_rev_reg_id(file_name, issuer_did)): rv.add(join(dir_path, file_name)) LOGGER.debug('Tails.links <<< %s', rv) return rv
def links(base_dir: str, issuer_did: str = None) -> set
Return set of all paths to symbolic links (rev reg ids) associating their respective tails files, in specified base tails directory recursively (omitting the .hopper subdirectory), on input issuer DID if specified. :param base_dir: base directory for tails files, thereafter split by cred def id :param issuer_did: issuer DID of interest :return: set of paths to symbolic links associating tails files
2.259552
2.082491
1.085024
LOGGER.debug('Tails.unlinked >>> base_dir: %s', base_dir) rv = set() for dir_path, dir_names, file_names in walk(base_dir, topdown=True): dir_names[:] = [d for d in dir_names if not d.startswith('.')] for file_name in file_names: if isfile(join(dir_path, file_name)) and Tails.ok_hash(file_name): rv.add(join(dir_path, file_name)) rv -= {join(dirname(path_link), readlink(path_link)) for path_link in Tails.links(base_dir)} LOGGER.debug('Tails.unlinked <<< %s', rv) return rv
def unlinked(base_dir: str) -> set
Return all paths to tails files, in specified tails base directory recursively (omitting the .hopper subdirectory), without symbolic links associating revocation registry identifiers. At an Issuer, tails files should not persist long without revocation registry identifier association via symbolic link. At a HolderProver, a newly downloaded tails file stays unlinked until the anchor stores a credential or creates a proof needing it, or else the anchor restarts. :param base_dir: base directory for tails files, thereafter split by cred def id :return: set of paths to tails files with no local symbolic links to them
2.878534
2.495269
1.153596
LOGGER.debug('Tails.next_tag >>> base_dir: %s, cd_id: %s', base_dir, cd_id) if not ok_cred_def_id(cd_id): LOGGER.debug('Tails.next_tag <!< Bad cred def id %s', cd_id) raise BadIdentifier('Bad cred def id {}'.format(cd_id)) tag = 1 + max([int(rev_reg_id2tag(basename(f))) for f in Tails.links(base_dir) if cd_id in basename(f)] + [-1]) # -1: next tag is '0' if no tags so far size = min(2**(tag + 6), Tails.MAX_SIZE) rv = (tag, size) LOGGER.debug('Tails.next_tag <<< %s', rv) return rv
def next_tag(base_dir: str, cd_id: str) -> (str, int)
Return the next tag name available for a new rev reg id on input cred def id in base directory, and suggested size of associated rev reg. :param base_dir: base directory for tails files, thereafter split by cred def id :param cd_id: credential definition identifier of interest :return: stringified least non-negative integer not yet used in a rev reg id associated with a tails file in base directory, and recommendation for next size to use
4.818086
3.827523
1.2588
LOGGER.debug('Tails.current_rev_reg_id >>> base_dir: %s, cd_id: %s', base_dir, cd_id) if not ok_cred_def_id(cd_id): LOGGER.debug('Tails.current_rev_reg_id <!< Bad cred def id %s', cd_id) raise BadIdentifier('Bad cred def id {}'.format(cd_id)) tags = [int(rev_reg_id2tag(basename(f))) for f in Tails.links(base_dir) if cd_id in basename(f)] if not tags: raise AbsentTails('No tails files present for cred def id {}'.format(cd_id)) rv = rev_reg_id(cd_id, str(max(tags))) # ensure 10 > 9, not '9' > '10' LOGGER.debug('Tails.current_rev_reg_id <<< %s', rv) return rv
def current_rev_reg_id(base_dir: str, cd_id: str) -> str
Return the current revocation registry identifier for input credential definition identifier, in input directory. Raise AbsentTails if no corresponding tails file, signifying no such revocation registry defined. :param base_dir: base directory for tails files, thereafter split by cred def id :param cd_id: credential definition identifier of interest :return: identifier for current revocation registry on input credential definition identifier
3.781987
3.258533
1.160641
config = json.loads(self._tails_config_json) return join(config['base_dir'], config['file'])
def path(self) -> str
Accessor for (stringified) path to current tails file. :return: (stringified) path to current tails file.
15.17517
9.357669
1.621683
if isinstance(orig, int) and -I32_BOUND <= orig < I32_BOUND: return str(int(orig)) # python bools are ints try: i32orig = int(str(orig)) # don't encode floats as ints if -I32_BOUND <= i32orig < I32_BOUND: return str(i32orig) except (ValueError, TypeError): pass rv = int.from_bytes(sha256(raw(orig).encode()).digest(), 'big') while -I32_BOUND <= rv < I32_BOUND: rv = int.from_bytes(sha256(rv.encode()).digest(), 'big') # sha256 maps no 32-bit int to another: terminates return str(rv)
def encode(orig: Any) -> str
Encode credential attribute value, purely stringifying any int32 and leaving numeric int32 strings alone, but mapping any other input to a stringified 256-bit (but not 32-bit) integer. Predicates in indy-sdk operate on int32 values properly only when their encoded values match their raw values. :param orig: original value to encode :return: encoded value
3.975882
3.692807
1.076656
for pred in Predicate: if relation.upper() in (pred.value.fortran, pred.value.wql.upper(), pred.value.math): return pred return None
def get(relation: str) -> 'Predicate'
Return enum instance corresponding to input relation string
12.532135
9.936096
1.261274
if isinstance(value, (bool, int)): return int(value) return int(str(value))
def to_int(value: Any) -> int
Cast a value as its equivalent int for indy predicate argument. Raise ValueError for any input but int, stringified int, or boolean. :param value: value to coerce.
6.372038
4.661023
1.36709
if token is None: return Role.USER for role in Role: if role == Role.ROLE_REMOVE: continue # ROLE_REMOVE is not a sensible role to parse from any configuration if isinstance(token, int) and token in role.value: return role if str(token).upper() == role.name or token in (str(v) for v in role.value): # could be numeric string return role return None
def get(token: Union[str, int] = None) -> 'Role'
Return enum instance corresponding to input token. :param token: token identifying role to indy-sdk: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', '' or None :return: enum instance corresponding to input token
6.426092
6.31336
1.017856
return self.value[0] if self in (Role.USER, Role.ROLE_REMOVE) else self.name
def token(self) -> str
Return token identifying role to indy-sdk. :return: token: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', or None (for USER)
24.797869
18.307585
1.354513
def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(view_or_request, *args, **kwargs): # The type of the request gets muddled when using a function based # decorator. We must use a function based decorator so it can be # used in urls.py. request = getattr(view_or_request, "request", view_or_request) if not hasattr(_thread_locals, "ultracache_request"): setattr(_thread_locals, "ultracache_request", request) # If request not GET or HEAD never cache if request.method.lower() not in ("get", "head"): return view_func(view_or_request, *args, **kwargs) # If request contains messages never cache l = 0 try: l = len(request._messages) except (AttributeError, TypeError): pass if l: return view_func(view_or_request, *args, **kwargs) # Compute a cache key li = [str(view_or_request.__class__), view_func.__name__] # request.get_full_path is implicitly added it no other request # path is provided. get_full_path includes the querystring and is # the more conservative approach but makes it trivially easy for a # request to bust through the cache. if not set(params).intersection(set(( "request.get_full_path()", "request.path", "request.path_info" ))): li.append(request.get_full_path()) if "django.contrib.sites" in settings.INSTALLED_APPS: li.append(get_current_site_pk(request)) # Pre-sort kwargs keys = list(kwargs.keys()) keys.sort() for key in keys: li.append("%s,%s" % (key, kwargs[key])) # Extend cache key with custom variables for param in params: if not isinstance(param, str): param = str(param) li.append(eval(param)) s = ":".join([str(l) for l in li]) hashed = hashlib.md5(s.encode("utf-8")).hexdigest() cache_key = "ucache-get-%s" % hashed cached = cache.get(cache_key, None) if cached is None: # The get view as outermost caller may bluntly set _ultracache request._ultracache = [] response = view_func(view_or_request, *args, **kwargs) content = None if isinstance(response, TemplateResponse): content = response.render().rendered_content elif isinstance(response, HttpResponse): content = response.content if content is not None: headers = getattr(response, "_headers", {}) cache.set( cache_key, {"content": content, "headers": headers}, timeout ) cache_meta(request, cache_key) else: response = HttpResponse(cached["content"]) # Headers has a non-obvious format for k, v in cached["headers"].items(): response[v[0]] = v[1] return response return _wrapped_view return decorator
def cached_get(timeout, *params)
Decorator applied specifically to a view's get method
3.737104
3.662661
1.020325
def decorator(cls): class WrappedClass(cls): def __init__(self, *args, **kwargs): super(WrappedClass, self).__init__(*args, **kwargs) @cached_get(timeout, *params) def get(self, *args, **kwargs): return super(WrappedClass, self).get(*args, **kwargs) return WrappedClass return decorator
def ultracache(timeout, *params)
Decorator applied to a view class. The get method is decorated implicitly.
2.282994
2.022013
1.12907
LOGGER.debug('Issuer.open >>>') await super().open() for path_rr_id in Tails.links(self.dir_tails, self.did): await self._sync_revoc_for_issue(basename(path_rr_id)) LOGGER.debug('Issuer.open <<<') return self
async def open(self) -> 'Issuer'
Explicit entry. Perform ancestor opening operations, then synchronize revocation registry to tails tree content. :return: current object
10.75239
9.172164
1.172285
LOGGER.debug('Issuer._send_rev_reg_def >>> rr_id: %s', rr_id) dir_tails_rr_id = self.rrb.dir_tails_top(rr_id) dir_target = self.rrb.dir_tails_target(rr_id) if not Tails.linked(dir_tails_rr_id, rr_id): LOGGER.debug( 'Issuer._send_rev_reg_def <!< Tails file for rev reg %s not ready in dir %s', rr_id, dir_target) raise AbsentRevReg('Tails file for rev reg {} not ready in dir {}'.format(rr_id, dir_target)) file_rr_def = join(dir_target, 'rr_def.json') if not isfile(file_rr_def): LOGGER.debug('Issuer._send_rev_reg_def <!< Rev reg def file %s not present', file_rr_def) raise AbsentRevReg('Rev reg def file {} not present'.format(file_rr_def)) with open(file_rr_def, 'r') as fh_rr_def: rr_def_json = fh_rr_def.read() file_rr_ent = join(dir_target, 'rr_ent.json') if not isfile(file_rr_ent): LOGGER.debug('Issuer._send_rev_reg_def <!< Rev reg entry file %s not present', file_rr_ent) raise AbsentRevReg('Rev reg entry file {} not present'.format(file_rr_ent)) with open(file_rr_ent, 'r') as fh_rr_ent: rr_ent_json = fh_rr_ent.read() file_tails = Tails.linked(dir_tails_rr_id, rr_id) if not file_tails: LOGGER.debug('Issuer._send_rev_reg_def <!< Tails link %s not present in dir %s', rr_id, dir_target) raise AbsentTails('Tails link {} not present in dir {}'.format(rr_id, dir_target)) if self.rrbx: dir_cd_id = join(self.dir_tails, rev_reg_id2cred_def_id(rr_id)) makedirs(dir_cd_id, exist_ok=True) rename(file_tails, join(dir_cd_id, basename(file_tails))) with REVO_CACHE.lock: rr_def_req_json = await ledger.build_revoc_reg_def_request(self.did, rr_def_json) await self._sign_submit(rr_def_req_json) await self.get_rev_reg_def(rr_id) # add to cache en passant rr_ent_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rr_ent_json) await self._sign_submit(rr_ent_req_json) if self.rrbx: Tails.associate(self.dir_tails, rr_id, basename(file_tails)) rmtree(dir_tails_rr_id) else: remove(file_rr_def) remove(file_rr_ent) LOGGER.debug('Issuer._send_rev_reg_def <<<')
async def _send_rev_reg_def(self, rr_id: str) -> None
Move tails file from hopper; deserialize revocation registry definition and initial entry; send to ledger and cache revocation registry definition. Operation serializes to subdirectory within tails hopper directory; symbolic link presence signals completion. Raise AbsentRevReg if revocation registry is not ready in hopper, or AbsentTails if tails file is not yet linked by its revocation registry identifier. :param rr_id: revocation registry identifier
2.228908
2.10067
1.061046
LOGGER.debug('Issuer._set_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size) assert self.rrbx dir_hopper_rr_id = join(self.rrb.dir_tails_hopper, rr_id) while Tails.linked(dir_hopper_rr_id, rr_id) is None: await asyncio.sleep(1) await self._send_rev_reg_def(rr_id) cd_id = rev_reg_id2cred_def_id(rr_id) (next_tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id) rr_id = rev_reg_id(cd_id, next_tag) self.rrb.mark_in_progress(rr_id, rr_size or rr_size_suggested) LOGGER.debug('Issuer._set_rev_reg <<<')
async def _set_rev_reg(self, rr_id: str, rr_size: int) -> None
Move precomputed revocation registry data from hopper into place within tails directory. :param rr_id: revocation registry identifier :param rr_size: revocation registry size, in case creation required
4.779446
4.28128
1.116359
LOGGER.debug('Issuer._sync_revoc_for_issue >>> rr_id: %s, rr_size: %s', rr_id, rr_size) if not ok_rev_reg_id(rr_id): LOGGER.debug('Issuer._sync_revoc_for_issue <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) (cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id) try: await self.get_cred_def(cd_id) except AbsentCredDef: LOGGER.debug( 'Issuer._sync_revoc_for_issue <!< tails tree %s may be for another ledger; no cred def found on %s', self.dir_tails, cd_id) raise AbsentCredDef('Tails tree {} may be for another ledger; no cred def found on {}'.format( self.dir_tails, cd_id)) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = None if revo_cache_entry is None else revo_cache_entry.tails if tails is None: # it's a new revocation registry, or not yet set in cache try: tails = await Tails(self.dir_tails, cd_id, tag).open() except AbsentTails: # it's a new revocation registry if self.rrbx: await self._set_rev_reg(rr_id, rr_size) else: await self.rrb.create_rev_reg(rr_id, rr_size) await self._send_rev_reg_def(rr_id) tails = await Tails(self.dir_tails, cd_id, tag).open() # symlink should exist now if revo_cache_entry is None: REVO_CACHE[rr_id] = RevoCacheEntry(None, tails) else: REVO_CACHE[rr_id].tails = tails LOGGER.debug('Issuer._sync_revoc_for_issue <<<')
async def _sync_revoc_for_issue(self, rr_id: str, rr_size: int = None) -> None
Create revocation registry if need be for input revocation registry identifier; open and cache tails file reader. :param rr_id: revocation registry identifier :param rr_size: if new revocation registry necessary, its size (default as per RevRegBuilder.create_rev_reg())
2.684647
2.630571
1.020557
LOGGER.debug('Issuer.path_tails >>>') if not ok_rev_reg_id(rr_id): LOGGER.debug('Issuer.path_tails <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) rv = Tails.linked(self.dir_tails, rr_id) LOGGER.debug('Issuer.path_tails <<< %s', rv) return rv
def path_tails(self, rr_id: str) -> str
Return path to tails file for input revocation registry identifier. :param rr_id: revocation registry identifier of interest :return: path to tails file for input revocation registry identifier
3.887284
3.445838
1.12811
LOGGER.debug( 'Issuer._create_cred_def >>> schema: %s, ledger_cred_def: %s, revo: %s', schema, ledger_cred_def, revo) cred_def_json = '{}' private_key_ok = True try: (_, cred_def_json) = await anoncreds.issuer_create_and_store_credential_def( self.wallet.handle, self.did, # issuer DID json.dumps(schema), self.pool.protocol.cd_id_tag(False), # expect only one cred def per schema and issuer 'CL', json.dumps({'support_revocation': revo})) if ledger_cred_def: private_key_ok = False LOGGER.warning( 'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cred_def_id(self.did, schema['seqNo'], self.pool.protocol)) # carry on though, this anchor may have other capacities so public key may be good enough except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError: if ledger_cred_def: LOGGER.info( 'Issuer wallet %s reusing existing cred def on schema %s version %s', self.name, schema['name'], schema['version']) else: LOGGER.debug('Issuer._create_cred_def <!< corrupt wallet %s', self.name) raise CorruptWallet('Corrupt Issuer wallet {} has cred def on schema {} not on ledger'.format( self.name, schema['id'])) else: LOGGER.debug( 'Issuer._create_cred_def <!< cannot store cred def in wallet %s: indy error code %s', self.name, x_indy.error_code) raise rv = (cred_def_json, private_key_ok) LOGGER.debug('Issuer._create_cred_def <<< %s', rv) return rv
async def _create_cred_def(self, schema: dict, ledger_cred_def: dict, revo: bool) -> (str, bool)
Create credential definition in wallet as part of the send_cred_def() sequence. Return whether the private key for the cred def is OK to continue with the sequence, propagating the cred def and revocation registry info to the ledger. :param schema: schema on which to create cred def :param ledger_cred_def: credential definition as ledger has it (typically, None) :param revo: whether cred def supports revocation :return: cred def json and whether local cred def private key is OK, hence cred def is OK to send to the ledger
3.598093
3.447338
1.043731
LOGGER.debug('Issuer.create_cred_offer >>> schema_seq_no: %s', schema_seq_no) if not self.wallet.handle: LOGGER.debug('Issuer.create_cred_offer <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not self.pool: LOGGER.debug('Issuer.create_cred_offer <!< issuer %s has no pool', self.name) raise AbsentPool('Issuer {} has no pool: cannot create cred offer'.format(self.name)) rv = None cd_id = cred_def_id(self.did, schema_seq_no, self.pool.protocol) try: rv = await anoncreds.issuer_create_credential_offer(self.wallet.handle, cd_id) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletNotFoundError: LOGGER.debug( 'Issuer.create_cred_offer <!< did not issue cred definition from wallet %s', self.name) raise CorruptWallet('Cannot create cred offer: did not issue cred definition from wallet {}'.format( self.name)) LOGGER.debug( 'Issuer.create_cred_offer <!< cannot create cred offer, indy error code %s', x_indy.error_code) raise LOGGER.debug('Issuer.create_cred_offer <<< %s', rv) return rv
async def create_cred_offer(self, schema_seq_no: int) -> str
Create credential offer as Issuer for given schema. Raise CorruptWallet if the wallet has no private key for the corresponding credential definition. Raise WalletState for closed wallet. :param schema_seq_no: schema sequence number :return: credential offer json for use in storing credentials at HolderProver.
2.427794
2.217435
1.094866
LOGGER.debug('Issuer.revoke_cred >>> rr_id: %s, cr_id: %s', rr_id, cr_id) if not self.wallet.handle: LOGGER.debug('Issuer.revoke_cred <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not ok_rev_reg_id(rr_id): LOGGER.debug('Issuer.revoke_cred <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) tails_reader_handle = (await Tails( self.dir_tails, *rev_reg_id2cred_def_id_tag(rr_id)).open()).reader_handle try: rrdelta_json = await anoncreds.issuer_revoke_credential( self.wallet.handle, tails_reader_handle, rr_id, cr_id) except IndyError as x_indy: LOGGER.debug( 'Issuer.revoke_cred <!< Could not revoke revoc reg id %s, cred rev id %s: indy error code %s', rr_id, cr_id, x_indy.error_code) raise BadRevocation( 'Could not revoke revoc reg id {}, cred rev id {}: indy error code {}'.format( rr_id, cr_id, x_indy.error_code)) rr_ent_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rrdelta_json) resp_json = await self._sign_submit(rr_ent_req_json) # raises AbsentPool or ClosedPool if applicable resp = json.loads(resp_json) rv = self.pool.protocol.txn2epoch(resp) LOGGER.debug('Issuer.revoke_cred <<< %s', rv) return rv
async def revoke_cred(self, rr_id: str, cr_id) -> int
Revoke credential that input revocation registry identifier and credential revocation identifier specify. Return (epoch seconds) time of revocation. Raise AbsentTails if no tails file is available for input revocation registry identifier. Raise WalletState for closed wallet. Raise BadRevocation if issuer cannot revoke specified credential for any other reason (e.g., did not issue it, already revoked it). :param rr_id: revocation registry identifier :param cr_id: credential revocation identifier :return: time of revocation, in epoch seconds
3.187411
2.817575
1.13126
LOGGER.debug('Issuer.get_box_ids_issued >>>') cd_ids = [ d for d in listdir(self.dir_tails) if isdir(join(self.dir_tails, d)) and ok_cred_def_id(d, self.did)] s_ids = [] for cd_id in cd_ids: try: s_ids.append(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id']) except AbsentSchema: LOGGER.error( 'Issuer %s has issued cred def %s but no corresponding schema on ledger', self.name, cd_id) rr_ids = [basename(link) for link in Tails.links(self.dir_tails, self.did)] rv = json.dumps({ 'schema_id': s_ids, 'cred_def_id': cd_ids, 'rev_reg_id': rr_ids }) LOGGER.debug('Issuer.get_box_ids_issued <<< %s', rv) return rv
async def get_box_ids_issued(self) -> str
Return json object on lists of all unique box identifiers (schema identifiers, credential definition identifiers, and revocation registry identifiers) for all credential definitions and credentials issued; e.g., :: { "schema_id": [ "R17v42T4pk...:2:tombstone:1.2", ... ], "cred_def_id": [ "R17v42T4pk...:3:CL:19:tag", ... ] "rev_reg_id": [ "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0", "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1", ... ] } An issuer must issue a credential definition to include its schema identifier in the returned values; the schema identifier in isolation belongs properly to an Origin, not necessarily to an Issuer. The operation may be useful for a Verifier anchor going off-line to seed its cache before doing so. :return: tuple of sets for schema ids, cred def ids, rev reg ids
3.837304
3.27231
1.172659
return ref.split(delimiter if delimiter else '#')[0]
def resource(ref: str, delimiter: str = None) -> str
Given a (URI) reference, return up to its delimiter (exclusively), or all of it if there is none. :param ref: reference :param delimiter: delimiter character (default None maps to '#', or ';' introduces identifiers)
16.105051
9.111184
1.767613