_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q11000
ppjson
train
def ppjson(dumpit: Any, elide_to: int = None) -> str: """ JSON pretty printer, whether already json-encoded or not :param dumpit: object to pretty-print :param elide_to: optional maximum length including ellipses ('...') :return: json pretty-print """ if elide_to is not None: elide_to = max(elide_to, 3) # make room for ellipses '...' try: rv = json.dumps(json.loads(dumpit) if isinstance(dumpit, str) else dumpit, indent=4) except TypeError: rv = '{}'.format(pformat(dumpit, indent=4, width=120)) return rv if elide_to is None or len(rv) <= elide_to else '{}...'.format(rv[0 : elide_to - 3])
python
{ "resource": "" }
q11001
do_wait
train
def do_wait(coro: Callable) -> Any: """ Perform aynchronous operation; await then return the result. :param coro: coroutine to await :return: coroutine result """ event_loop = None try: event_loop = asyncio.get_event_loop() except RuntimeError: event_loop = asyncio.new_event_loop() asyncio.set_event_loop(event_loop) return event_loop.run_until_complete(coro)
python
{ "resource": "" }
q11002
Stopwatch.mark
train
def mark(self, digits: int = None) -> float: """ Return time in seconds since last mark, reset, or construction. :param digits: number of fractional decimal digits to retain (default as constructed) """ self._mark[:] = [self._mark[1], time()] rv = self._mark[1] - self._mark[0] if digits is not None and digits > 0: rv = round(rv, digits) elif digits == 0 or self._digits == 0: rv = int(rv) elif self._digits is not None and self._digits > 0: rv = round(rv, self._digits) return rv
python
{ "resource": "" }
q11003
StorageRecordSearch.open
train
async def open(self) -> None: """ Begin the search operation. """ LOGGER.debug('StorageRecordSearch.open >>>') if self.opened: LOGGER.debug('StorageRecordSearch.open <!< Search is already opened') raise BadSearch('Search is already opened') if not self._wallet.opened: LOGGER.debug('StorageRecordSearch.open <!< Wallet %s is closed', self._wallet.name) raise WalletState('Wallet {} is closed'.format(self._wallet.name)) self._handle = await non_secrets.open_wallet_search( self._wallet.handle, self._type, self._query_json, StorageRecordSearch.OPTIONS_JSON) LOGGER.debug('StorageRecordSearch.open <<<')
python
{ "resource": "" }
q11004
StorageRecordSearch.fetch
train
async def fetch(self, limit: int = None) -> Sequence[StorageRecord]: """ Fetch next batch of search results. Raise BadSearch if search is closed, WalletState if wallet is closed. :param limit: maximum number of records to return (default value Wallet.DEFAULT_CHUNK) :return: next batch of records found """ LOGGER.debug('StorageRecordSearch.fetch >>> limit: %s', limit) if not self.opened: LOGGER.debug('StorageRecordSearch.fetch <!< Storage record search is closed') raise BadSearch('Storage record search is closed') if not self._wallet.opened: LOGGER.debug('StorageRecordSearch.fetch <!< Wallet %s is closed', self._wallet.name) raise WalletState('Wallet {} is closed'.format(self._wallet.name)) records = json.loads(await non_secrets.fetch_wallet_search_next_records( self._wallet.handle, self.handle, limit or Wallet.DEFAULT_CHUNK))['records'] or [] # at exhaustion results['records'] = None rv = [StorageRecord(typ=rec['type'], value=rec['value'], tags=rec['tags'], ident=rec['id']) for rec in records] LOGGER.debug('StorageRecordSearch.fetch <<< %s', rv) return rv
python
{ "resource": "" }
q11005
StorageRecordSearch.close
train
async def close(self) -> None: """ Close search. """ LOGGER.debug('StorageRecordSearch.close >>>') if self._handle: await non_secrets.close_wallet_search(self.handle) self._handle = None LOGGER.debug('StorageRecordSearch.close <<<')
python
{ "resource": "" }
q11006
NodePool.cache_id
train
def cache_id(self) -> str: """ Return identifier for archivable caches, computing it first and retaining it if need be. Raise AbsentPool if ledger configuration is not yet available. :param name: pool name :return: archivable cache identifier """ if self._cache_id: return self._cache_id with open(join(expanduser('~'), '.indy_client', 'pool', self.name, '{}.txn'.format(self.name))) as fh_genesis: genesis = [json.loads(line) for line in fh_genesis.readlines() if line] hps = [] for gen_txn in genesis: hps.append(self.protocol.genesis_host_port(gen_txn)) hps.sort() # canonicalize to make order irrelevant self._cache_id = ':'.join('{}:{}'.format(hp[0], hp[1]) for hp in hps) return self._cache_id
python
{ "resource": "" }
q11007
NodePool.close
train
async def close(self) -> None: """ Explicit exit. Closes pool. For use when keeping pool open across multiple calls. """ LOGGER.debug('NodePool.close >>>') if not self.handle: LOGGER.warning('Abstaining from closing pool %s: already closed', self.name) else: await pool.close_pool_ledger(self.handle) self._handle = None LOGGER.debug('NodePool.close <<<')
python
{ "resource": "" }
q11008
NodePool.refresh
train
async def refresh(self) -> None: """ Refresh local copy of pool ledger and update node pool connections. """ LOGGER.debug('NodePool.refresh >>>') await pool.refresh_pool_ledger(self.handle) LOGGER.debug('NodePool.refresh <<<')
python
{ "resource": "" }
q11009
BaseAnchor.get_nym_role
train
async def get_nym_role(self, target_did: str = None) -> Role: """ Return the cryptonym role for input did from the ledger - note that this may exceed the role of least privilege for the class. Raise AbsentNym if current anchor has no cryptonym on the ledger, or WalletState if current DID unavailable. :param target_did: DID of cryptonym role to fetch (default own DID) :return: identifier for current cryptonym role on ledger """ LOGGER.debug('BaseAnchor.get_nym_role >>> target_did: %s', target_did) nym = json.loads(await self.get_nym(target_did)) if not nym: LOGGER.debug('BaseAnchor.get_nym_role <!< Ledger has no cryptonym for anchor %s', self.name) raise AbsentNym('Ledger has no cryptonym for anchor {}'.format(self.name)) rv = Role.get(nym['role']) LOGGER.debug('BaseAnchor.get_nym_role <<< %s', rv) return rv
python
{ "resource": "" }
q11010
BaseAnchor.get_did_endpoint
train
async def get_did_endpoint(self, remote_did: str) -> EndpointInfo: """ Return endpoint info for remote DID. Raise BadIdentifier for bad remote DID. Raise WalletState if bypassing cache but wallet is closed. Raise AbsentRecord for no such endpoint. :param remote_did: pairwise remote DID :return: endpoint and (transport) verification key as EndpointInfo """ LOGGER.debug('BaseAnchor.get_did_endpoint >>> remote_did: %s', remote_did) if not ok_did(remote_did): LOGGER.debug('BaseAnchor.get_did_endpoint <!< Bad DID %s', remote_did) raise BadIdentifier('Bad DID {}'.format(remote_did)) if not self.wallet.handle: LOGGER.debug('BaseAnchor.get_did_endpoint <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) pairwise_info = (await self.wallet.get_pairwise(remote_did)).get(remote_did, None) if not (pairwise_info and 'did_endpoint' in pairwise_info.metadata): LOGGER.debug('BaseAnchor.get_did_endpoint <!< No endpoint for remote DID %s', remote_did) raise AbsentRecord('No endpoint for remote DID {}'.format(remote_did)) rv = EndpointInfo(pairwise_info.metadata['did_endpoint'], pairwise_info.their_verkey) LOGGER.debug('BaseAnchor.get_did_endpoint <<< %s', rv) return rv
python
{ "resource": "" }
q11011
BaseAnchor._verkey_for
train
async def _verkey_for(self, target: str) -> str: """ Given a DID, retrieve its verification key, looking in wallet, then pool. Given a verification key or None, return input. Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet, raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed. If no such verification key is on the ledger, raise AbsentNym. :param target: verification key, or DID to resolve to such :return: verification key """ LOGGER.debug('BaseAnchor._verkey_for >>> target: %s', target) rv = target if rv is None or not ok_did(rv): # it's None or already a verification key LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv) return rv if self.wallet.handle: try: rv = await did.key_for_local_did(self.wallet.handle, target) LOGGER.info('Anchor %s got verkey for DID %s from wallet', self.name, target) LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv) return rv except IndyError as x_indy: if x_indy.error_code != ErrorCode.WalletItemNotFound: # on not found, try the pool LOGGER.debug( 'BaseAnchor._verkey_for <!< key lookup for local DID %s raised indy error code %s', target, x_indy.error_code) raise nym = json.loads(await self.get_nym(target)) if not nym: LOGGER.debug( 'BaseAnchor._verkey_for <!< Wallet %s closed and ledger has no cryptonym for DID %s', self.name, target) raise AbsentNym('Wallet {} closed, and ledger has no cryptonym for DID {}'.format(self.name, target)) rv = json.loads(await self.get_nym(target))['verkey'] LOGGER.info('Anchor %s got verkey for DID %s from pool %s', self.name, target, self.pool.name) LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv) return rv
python
{ "resource": "" }
q11012
BaseAnchor.encrypt
train
async def encrypt(self, message: bytes, authn: bool = False, recip: str = None) -> bytes: """ Encrypt plaintext for owner of DID or verification key, anonymously or via authenticated encryption scheme. If given DID, first check wallet and then pool for corresponding verification key. Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet, raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed. :param message: plaintext, as bytes :param authn: whether to use authenticated encryption scheme :param recip: DID or verification key of recipient, None for anchor's own :return: ciphertext, as bytes """ LOGGER.debug('BaseAnchor.encrypt >>> message: %s, authn: %s, recip: %s', message, authn, recip) if not self.wallet.handle: LOGGER.debug('BaseAnchor.encrypt <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await self.wallet.encrypt(message, authn, await self._verkey_for(recip)) LOGGER.debug('BaseAnchor.auth_encrypt <<< %s', rv) return rv
python
{ "resource": "" }
q11013
BaseAnchor.sign
train
async def sign(self, message: bytes) -> bytes: """ Sign message; return signature. Raise WalletState if wallet is closed. :param message: Content to sign, as bytes :return: signature, as bytes """ LOGGER.debug('BaseAnchor.sign >>> message: %s', message) if not self.wallet.handle: LOGGER.debug('BaseAnchor.sign <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await self.wallet.sign(message) LOGGER.debug('BaseAnchor.sign <<< %s', rv) return rv
python
{ "resource": "" }
q11014
Service.to_dict
train
def to_dict(self): """ Return dict representation of service to embed in DID document. """ rv = { 'id': self.id, 'type': self.type, 'priority': self.priority } if self.recip_keys: rv['routingKeys'] = [canon_ref(k.did, k.id, '#') for k in self.recip_keys] if self.routing_keys: rv['routingKeys'] = [canon_ref(k.did, k.id, '#') for k in self.routing_keys] rv['serviceEndpoint'] = self.endpoint return rv
python
{ "resource": "" }
q11015
HolderProver._assert_link_secret
train
async def _assert_link_secret(self, action: str) -> str: """ Return current wallet link secret label. Raise AbsentLinkSecret if link secret is not set. :param action: action requiring link secret """ rv = await self.wallet.get_link_secret_label() if rv is None: LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action) raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action)) return rv
python
{ "resource": "" }
q11016
HolderProver.load_cache_for_proof
train
async def load_cache_for_proof(self, archive: bool = False) -> int: """ Load schema, cred def, revocation caches; optionally archive enough to go offline and be able to generate proof on all credentials in wallet. Return timestamp (epoch seconds) of cache load event, also used as subdirectory for cache archives. :param archive: True to archive now or False to demur (subclasses may still need to augment archivable caches further) :return: cache load event timestamp (epoch seconds) """ LOGGER.debug('HolderProver.load_cache_for_proof >>> archive: %s', archive) rv = int(time()) box_ids = json.loads(await self.get_box_ids_held()) for s_id in box_ids['schema_id']: with SCHEMA_CACHE.lock: await self.get_schema(s_id) for cd_id in box_ids['cred_def_id']: with CRED_DEF_CACHE.lock: await self.get_cred_def(cd_id) for rr_id in box_ids['rev_reg_id']: await self.get_rev_reg_def(rr_id) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) if revo_cache_entry: try: await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv) except ClosedPool: LOGGER.warning( 'HolderProver %s is offline from pool %s, cannot update revo cache reg delta for %s to %s', self.name, self.pool.name, rr_id, rv) except AbsentPool: LOGGER.warning( 'HolderProver %s has no pool, cannot update revo cache reg delta for %s to %s', self.name, rr_id, rv) if archive: ArchivableCaches.archive(self.dir_cache) LOGGER.debug('HolderProver.load_cache_for_proof <<< %s', rv) return rv
python
{ "resource": "" }
q11017
HolderProver.get_cred_info_by_id
train
async def get_cred_info_by_id(self, cred_id: str) -> str: """ Return cred-info json from wallet by wallet credential identifier. Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed. :param cred_id: credential identifier of interest :return: json with cred for input credential identifier :return: cred-info json; i.e., :: { "referent": string, # credential identifier in the wallet "attrs": { "attr1" : {"raw": "value1", "encoded": "value1_as_int" }, "attr2" : {"raw": "value2", "encoded": "value2_as_int" }, ... } "schema_id": string, "cred_def_id": string, "rev_reg_id": Optional<string>, "cred_rev_id": Optional<string> } """ LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id) if not self.wallet.handle: LOGGER.debug('HolderProver.get_cred_info_by_id <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id) except IndyError as x_indy: # no such cred if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug( 'HolderProver.get_cred_info_by_id <!< no cred in wallet %s for cred id %s', self.name, cred_id) raise AbsentCred('No cred in wallet for {}'.format(cred_id)) LOGGER.debug( 'HolderProver.get_cred_info_by_id <!< wallet %s, cred id %s: indy error code %s', self.name, cred_id, x_indy.error_code) raise LOGGER.debug('HolderProver.get_cred_info_by_id <<< %s', rv_json) return rv_json
python
{ "resource": "" }
q11018
HolderProver.get_cred_briefs_by_proof_req_q
train
async def get_cred_briefs_by_proof_req_q(self, proof_req_json: str, x_queries_json: str = None) -> str: """ A cred-brief aggregates a cred-info and a non-revocation interval. A cred-brief-dict maps wallet cred-ids to their corresponding cred-briefs. Return json (cred-brief-dict) object mapping wallet credential identifiers to cred-briefs by proof request and WQL queries by proof request referent. Return empty dict on no WQL query and empty requested predicates specification within proof request. Utility util.proof_req2wql_all() builds WQL to retrieve all cred-briefs for (some or all) cred-def-ids in a proof request. For each WQL query on an item referent, indy-sdk takes the WQL and the attribute name and restrictions (e.g., cred def id, schema id, etc.) from its referent. Note that util.proof_req_attr_referents() maps cred defs and attr names to proof req item referents, bridging the gap between attribute names and their corresponding item referents. Raise WalletState if the wallet is closed. :param proof_req_json: proof request as per Verifier.build_proof_req_json(); e.g., :: { "nonce": "1532429687", "name": "proof_req", "version": "0.0", "requested_predicates": {}, "requested_attributes": { "17_name_uuid": { "restrictions": [ { "cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag" } ], "name": "name" }, "17_thing_uuid": { "restrictions": [ { "cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag" } ], "name": "thing" } } } :param x_queries_json: json list of extra queries to apply to proof request attribute and predicate referents; e.g., :: { "17_thing_uuid": { # require attr presence on name 'thing', cred def id from proof req above "$or": [ { "attr::name::value": "J.R. 'Bob' Dobbs" }, { "attr::thing::value": "slack" }, ] }, } :return: json (cred-brief-dict) object mapping wallet cred ids to cred briefs; e.g., :: { "b42ce5bc-b690-43cd-9493-6fe86ad25e85": { "interval": null, "cred_info": { "schema_id": "LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0", "rev_reg_id": null, "attrs": { "name": "J.R. \"Bob\" Dobbs", "thing": "slack" }, "cred_rev_id": null, "referent": "b42ce5bc-b690-43cd-9493-6fe86ad25e85", "cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag" } }, "d773434a-0080-4e3e-a03b-f2033eae7d75": { "interval": null, "cred_info": { "schema_id": "LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0", "rev_reg_id": null, "attrs": { "name": "Chicken Hawk", "thing": "chicken" }, "cred_rev_id": null, "referent": "d773434a-0080-4e3e-a03b-f2033eae7d75", "cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag" } } } """ LOGGER.debug( ('HolderProver.get_cred_briefs_by_proof_req_q >>> proof_req_json: %s, x_queries_json: %s'), proof_req_json, x_queries_json) if not self.wallet.handle: LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) def _pred_filter(brief): nonlocal pred_refts for attr, preds in pred_refts.get(brief['cred_info']['cred_def_id'], {}).items(): if any(Predicate.get(p[0]).value.no(brief['cred_info']['attrs'][attr], p[1]) for p in preds.values()): return False return True rv = {} item_refts = set() x_queries = json.loads(x_queries_json or '{}') for k in x_queries: x_queries[k] = canon_cred_wql(x_queries[k]) # indy-sdk requires attr name canonicalization item_refts.add(k) proof_req = json.loads(proof_req_json) item_refts.update(uuid for uuid in proof_req['requested_predicates']) if not x_queries: item_refts.update(uuid for uuid in proof_req['requested_attributes']) # get all req attrs if no extra wql handle = await anoncreds.prover_search_credentials_for_proof_req( self.wallet.handle, proof_req_json, json.dumps(x_queries) if x_queries else None) pred_refts = proof_req_pred_referents(proof_req) try: for item_referent in item_refts: count = Wallet.DEFAULT_CHUNK while count == Wallet.DEFAULT_CHUNK: fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req( handle, item_referent, Wallet.DEFAULT_CHUNK)) count = len(fetched) for brief in fetched: # apply predicates from proof req here if brief['cred_info']['referent'] not in rv and _pred_filter(brief): rv[brief['cred_info']['referent']] = brief finally: await anoncreds.prover_close_credentials_search_for_proof_req(handle) rv_json = json.dumps(rv) LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <<< %s', rv_json) return rv_json
python
{ "resource": "" }
q11019
SchemaCache.index
train
def index(self) -> dict: """ Return dict mapping content sequence numbers to schema keys. :return: dict mapping sequence numbers to schema keys """ LOGGER.debug('SchemaCache.index >>>') rv = self._seq_no2schema_key LOGGER.debug('SchemaCache.index <<< %s', rv) return rv
python
{ "resource": "" }
q11020
SchemaCache.schema_key_for
train
def schema_key_for(self, seq_no: int) -> SchemaKey: """ Get schema key for schema by sequence number if known, None for no such schema in cache. :param seq_no: sequence number :return: corresponding schema key or None """ LOGGER.debug('SchemaCache.schema_key_for >>> seq_no: %s', seq_no) rv = self._seq_no2schema_key.get(seq_no, None) LOGGER.debug('SchemaCache.schema_key_for <<< %s', rv) return rv
python
{ "resource": "" }
q11021
SchemaCache.schemata
train
def schemata(self) -> list: """ Return list with schemata in cache. :return: list of schemata """ LOGGER.debug('SchemaCache.schemata >>>') LOGGER.debug('SchemaCache.schemata <<<') return [self._schema_key2schema[seq_no] for seq_no in self._schema_key2schema]
python
{ "resource": "" }
q11022
RevoCacheEntry.get_delta_json
train
async def get_delta_json( self, rr_delta_builder: Callable[['HolderProver', str, int, int, dict], Awaitable[Tuple[str, int]]], fro: int, to: int) -> (str, int): """ Get rev reg delta json, and its timestamp on the distributed ledger, from cached rev reg delta frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a delta to the future. On return of any previously existing rev reg delta frame, always update its query time beforehand. :param rr_delta_builder: callback to build rev reg delta if need be (specify anchor instance's _build_rr_delta()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation delta timestamp :return: rev reg delta json and ledger timestamp (epoch seconds) """ LOGGER.debug( 'RevoCacheEntry.get_delta_json >>> rr_delta_builder: %s, fro: %s, to: %s', rr_delta_builder.__name__, fro, to) rv = await self._get_update(rr_delta_builder, fro, to, True) LOGGER.debug('RevoCacheEntry.get_delta_json <<< %s', rv) return rv
python
{ "resource": "" }
q11023
RevoCacheEntry.get_state_json
train
async def get_state_json( self, rr_state_builder: Callable[['Verifier', str, int], Awaitable[Tuple[str, int]]], fro: int, to: int) -> (str, int): """ Get rev reg state json, and its timestamp on the distributed ledger, from cached rev reg state frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a state in the future. On return of any previously existing rev reg state frame, always update its query time beforehand. :param rr_state_builder: callback to build rev reg state if need be (specify anchor instance's _build_rr_state()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation state timestamp :return: rev reg state json and ledger timestamp (epoch seconds) """ LOGGER.debug( 'RevoCacheEntry.get_state_json >>> rr_state_builder: %s, fro: %s, to: %s', rr_state_builder.__name__, fro, to) rv = await self._get_update(rr_state_builder, fro, to, False) LOGGER.debug('RevoCacheEntry.get_state_json <<< %s', rv) return rv
python
{ "resource": "" }
q11024
ArchivableCaches.clear
train
def clear() -> None: """ Clear all archivable caches in memory. """ LOGGER.debug('clear >>>') with SCHEMA_CACHE.lock: SCHEMA_CACHE.clear() with CRED_DEF_CACHE.lock: CRED_DEF_CACHE.clear() with REVO_CACHE.lock: REVO_CACHE.clear() LOGGER.debug('clear <<<')
python
{ "resource": "" }
q11025
ArchivableCaches.archive
train
def archive(base_dir: str) -> int: """ Archive schema, cred def, revocation caches to disk as json. :param base_dir: archive base directory :return: timestamp (epoch seconds) used as subdirectory """ LOGGER.debug('archive >>> base_dir: %s', base_dir) rv = int(time()) timestamp_dir = join(base_dir, str(rv)) makedirs(timestamp_dir, exist_ok=True) with SCHEMA_CACHE.lock: with open(join(timestamp_dir, 'schema'), 'w') as archive: print(json.dumps(SCHEMA_CACHE.schemata()), file=archive) with CRED_DEF_CACHE.lock: with open(join(timestamp_dir, 'cred_def'), 'w') as archive: print(json.dumps(CRED_DEF_CACHE), file=archive) with REVO_CACHE.lock: with open(join(timestamp_dir, 'revocation'), 'w') as archive: revo_cache_dict = {} for rr_id in REVO_CACHE: revo_cache_dict[rr_id] = { 'rev_reg_def': REVO_CACHE[rr_id].rev_reg_def, 'rr_delta_frames': [vars(f) for f in REVO_CACHE[rr_id].rr_delta_frames], 'rr_state_frames': [vars(f) for f in REVO_CACHE[rr_id].rr_state_frames] } print(json.dumps(revo_cache_dict), file=archive) LOGGER.debug('archive <<< %s', rv) return rv
python
{ "resource": "" }
q11026
storage_record2pairwise_info
train
def storage_record2pairwise_info(storec: StorageRecord) -> PairwiseInfo: """ Given indy-sdk non_secrets implementation of pairwise storage record dict, return corresponding PairwiseInfo. :param storec: (non-secret) storage record to convert to PairwiseInfo :return: PairwiseInfo on record DIDs, verkeys, metadata """ return PairwiseInfo( storec.id, # = their did storec.value, # = their verkey storec.tags['~my_did'], storec.tags['~my_verkey'], { tag[tag.startswith('~'):]: storec.tags[tag] for tag in (storec.tags or {}) # strip any leading '~' })
python
{ "resource": "" }
q11027
WalletManager._config2indy
train
def _config2indy(self, config: dict) -> dict: """ Given a configuration dict with indy and possibly more configuration values, return the corresponding indy wallet configuration dict from current default and input values. :param config: input configuration :return: configuration dict for indy wallet """ assert {'name', 'id'} & {k for k in config} return { 'id': config.get('name', config.get('id')), 'storage_type': config.get('storage_type', self.default_storage_type), 'freshness_time': config.get('freshness_time', self.default_freshness_time) }
python
{ "resource": "" }
q11028
WalletManager._config2von
train
def _config2von(self, config: dict, access: str = None) -> dict: """ Given a configuration dict with indy and possibly more configuration values, return the corresponding VON wallet configuration dict from current default and input values. :param config: input configuration :param access: access credentials value :return: configuration dict for VON wallet with VON-specific entries """ rv = {k: config.get(k, self._defaults[k]) for k in ('auto_create', 'auto_remove')} rv['access'] = access or self.default_access for key in ('seed', 'did', 'link_secret_label'): if key in config: rv[key] = config[key] return rv
python
{ "resource": "" }
q11029
WalletManager.create
train
async def create(self, config: dict = None, access: str = None, replace: bool = False) -> Wallet: """ Create wallet on input name with given configuration and access credential value. Raise ExtantWallet if wallet on input name exists already and replace parameter is False. Raise BadAccess on replacement for bad access credentials value. FAIR WARNING: specifying replace=True attempts to remove any matching wallet before proceeding; to succeed, the existing wallet must use the same access credentials that the input configuration has. :param config: configuration data for both indy-sdk and VON anchor wallet: - 'name' or 'id': wallet name - 'storage_type': storage type - 'freshness_time': freshness time - 'did': (optional) DID to use - 'seed': (optional) seed to use - 'auto_create': whether to create the wallet on first open (persists past close, can work with auto_remove) - 'auto_remove': whether to remove the wallet on next close - 'link_secret_label': (optional) link secret label to use to create link secret :param access: indy wallet access credential ('key') value, if different than default :param replace: whether to replace old wallet if it exists :return: wallet created """ LOGGER.debug('WalletManager.create >>> config %s, access %s, replace %s', config, access, replace) assert {'name', 'id'} & {k for k in config} wallet_name = config.get('name', config.get('id')) if replace: von_wallet = self.get(config, access) if not await von_wallet.remove(): LOGGER.debug('WalletManager.create <!< Failed to remove wallet %s for replacement', wallet_name) raise ExtantWallet('Failed to remove wallet {} for replacement'.format(wallet_name)) indy_config = self._config2indy(config) von_config = self._config2von(config, access) rv = Wallet(indy_config, von_config) await rv.create() LOGGER.debug('WalletManager.create <<< %s', rv) return rv
python
{ "resource": "" }
q11030
WalletManager.get
train
def get(self, config: dict, access: str = None) -> Wallet: """ Instantiate and return VON anchor wallet object on given configuration, respecting wallet manager default configuration values. :param config: configuration data for both indy-sdk and VON anchor wallet: - 'name' or 'id': wallet name - 'storage_type': storage type - 'freshness_time': freshness time - 'did': (optional) DID to use - 'seed': (optional) seed to use - 'auto_create': whether to create the wallet on first open (persists past close, can work with auto_remove) - 'auto_remove': whether to remove the wallet on next close - 'link_secret_label': (optional) link secret label to use to create link secret :param access: indy access credentials value :return: VON anchor wallet """ LOGGER.debug('WalletManager.get >>> config %s, access %s', config, access) rv = Wallet( self._config2indy(config), self._config2von(config, access)) LOGGER.debug('WalletManager.get <<< %s', rv) return rv
python
{ "resource": "" }
q11031
WalletManager.export_wallet
train
async def export_wallet(self, von_wallet: Wallet, path: str) -> None: """ Export an existing VON anchor wallet. Raise WalletState if wallet is closed. :param von_wallet: open wallet :param path: path to which to export wallet """ LOGGER.debug('WalletManager.export_wallet >>> von_wallet %s, path %s', von_wallet, path) if not von_wallet.handle: LOGGER.debug('WalletManager.export_wallet <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) await wallet.export_wallet( von_wallet.handle, json.dumps({ 'path': path, **von_wallet.access_creds })) LOGGER.debug('WalletManager.export_wallet <<<')
python
{ "resource": "" }
q11032
WalletManager.import_wallet
train
async def import_wallet(self, indy_config: dict, path: str, access: str = None) -> None: """ Import a VON anchor wallet. Raise BadAccess on bad access credential value. :param indy_config: indy wallet configuration to use, with: - 'id' - 'storage_type' (optional) - 'storage_config' (optional) :param path: path from which to import wallet file :param access: indy access credentials value (default value from wallet manager) """ LOGGER.debug('WalletManager.import_wallet >>> indy_config %s, path: %s', indy_config, path) try: await wallet.import_wallet( json.dumps(indy_config), json.dumps({'key': access or self.default_access}), json.dumps({'path': path, 'key': access or self.default_access})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.CommonInvalidStructure: # indy-sdk raises on bad access LOGGER.debug( 'WalletManager.import_wallet <!< bad access credential value for wallet %s', indy_config.get('id', '(no id)')) raise BadAccess('Bad access credential value for wallet {}'.format(indy_config.get('id', '(no id)'))) LOGGER.debug( 'WalletManager.import_wallet <!< indy error code %s on wallet %s import', x_indy.error_code, indy_config.get('id', '(no id)')) raise LOGGER.debug('WalletManager.import_wallet <<<')
python
{ "resource": "" }
q11033
WalletManager.remove
train
async def remove(self, von_wallet: Wallet) -> None: """ Remove serialized wallet if it exists. Raise WalletState if wallet is open. :param von_wallet: (closed) wallet to remove """ LOGGER.debug('WalletManager.remove >>> wallet %s', von_wallet) await von_wallet.remove() LOGGER.debug('WalletManager.remove <<<')
python
{ "resource": "" }
q11034
WalletManager.register_storage_library
train
async def register_storage_library(storage_type: str, c_library: str, entry_point: str) -> None: """ Load a wallet storage plug-in. An indy-sdk wallet storage plug-in is a shared library; relying parties must explicitly load it before creating or opening a wallet with the plug-in. The implementation loads a dynamic library and calls an entry point; internally, the plug-in calls the indy-sdk wallet async def register_wallet_storage_library(storage_type: str, c_library: str, fn_pfx: str). :param storage_type: wallet storage type :param c_library: plug-in library :param entry_point: function to initialize the library """ LOGGER.debug( 'WalletManager.register_storage_library >>> storage_type %s, c_library %s, entry_point %s', storage_type, c_library, entry_point) try: stg_lib = CDLL(c_library) result = stg_lib[entry_point]() if result: LOGGER.debug( 'WalletManager.register_storage_library <!< indy error code %s on storage library entry at %s', result, entry_point) raise IndyError(result) LOGGER.info('Loaded storage library type %s (%s)', storage_type, c_library) except IndyError as x_indy: LOGGER.debug( 'WalletManager.register_storage_library <!< indy error code %s on load of storage library %s %s', x_indy.error_code, storage_type, c_library) raise LOGGER.debug('WalletManager.register_storage_library <<<')
python
{ "resource": "" }
q11035
Wallet.create_signing_key
train
async def create_signing_key(self, seed: str = None, metadata: dict = None) -> KeyInfo: """ Create a new signing key pair. Raise WalletState if wallet is closed, ExtantRecord if verification key already exists. :param seed: optional seed allowing deterministic key creation :param metadata: optional metadata to store with key pair :return: KeyInfo for new key pair """ LOGGER.debug('Wallet.create_signing_key >>> seed: [SEED], metadata: %s', metadata) if not self.handle: LOGGER.debug('Wallet.create_signing_key <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: verkey = await crypto.create_key(self.handle, json.dumps({'seed': seed} if seed else {})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemAlreadyExists: LOGGER.debug('Wallet.create_signing_key <!< Verification key already present in wallet %s', self.name) raise ExtantRecord('Verification key already present in wallet {}'.format(self.name)) LOGGER.debug('Wallet.create_signing_key <!< indy-sdk raised error %s', x_indy.error_code) raise await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {})) # coerce None to empty rv = KeyInfo(verkey, metadata or {}) LOGGER.debug('Wallet.create_signing_key <<< %s', rv) return rv
python
{ "resource": "" }
q11036
Wallet.get_signing_key
train
async def get_signing_key(self, verkey: str) -> KeyInfo: """ Get signing key pair for input verification key. Raise WalletState if wallet is closed, AbsentRecord for no such key pair. :param verkey: verification key of key pair :return: KeyInfo for key pair """ LOGGER.debug('Wallet.get_signing_key >>> seed: [SEED], verkey: %s', verkey) if not self.handle: LOGGER.debug('Wallet.get_signing_key <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: metadata = await crypto.get_key_metadata(self.handle, verkey) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.get_signing_key <!< Verification key %s not in wallet %s', verkey, self.name) raise AbsentRecord('Verification key not in wallet {}'.format(self.name)) LOGGER.debug('Wallet.get_signing_key <!< indy-sdk raised error %s', x_indy.error_code) raise rv = KeyInfo(verkey, json.loads(metadata) if metadata else {}) LOGGER.debug('Wallet.get_signing_key <<< %s', rv) return rv
python
{ "resource": "" }
q11037
Wallet.create_local_did
train
async def create_local_did(self, seed: str = None, loc_did: str = None, metadata: dict = None) -> DIDInfo: """ Create and store a new local DID for use in pairwise DID relations. :param seed: seed from which to create (default random) :param loc_did: local DID value (default None to let indy-sdk generate) :param metadata: metadata to associate with the local DID (operation always sets 'since', 'modified' epoch timestamps) :return: DIDInfo for new local DID """ LOGGER.debug('Wallet.create_local_did >>> seed: [SEED] loc_did: %s metadata: %s', loc_did, metadata) cfg = {} if seed: cfg['seed'] = seed if loc_did: cfg['did'] = loc_did if not self.handle: LOGGER.debug('Wallet.create_local_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: (created_did, verkey) = await did.create_and_store_my_did(self.handle, json.dumps(cfg)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.DidAlreadyExistsError: LOGGER.debug('Wallet.create_local_did <!< DID %s already present in wallet %s', loc_did, self.name) raise ExtantRecord('Local DID {} already present in wallet {}'.format(loc_did, self.name)) LOGGER.debug('Wallet.create_local_did <!< indy-sdk raised error %s', x_indy.error_code) raise now = int(time()) loc_did_metadata = {**(metadata or {}), 'since': now, 'modified': now} await did.set_did_metadata(self.handle, created_did, json.dumps(loc_did_metadata)) rv = DIDInfo(created_did, verkey, loc_did_metadata) LOGGER.debug('Wallet.create_local_did <<< %s', rv) return rv
python
{ "resource": "" }
q11038
Wallet.replace_local_did_metadata
train
async def replace_local_did_metadata(self, loc_did: str, metadata: dict) -> DIDInfo: """ Replace the metadata associated with a local DID. Raise WalletState if wallet is closed, AbsentRecord for no such local DID. :param loc_did: local DID of interest :param metadata: new metadata to store :return: DIDInfo for local DID after write """ LOGGER.debug('Wallet.replace_local_did_metadata >>> loc_did: %s, metadata: %s', loc_did, metadata) old = await self.get_local_did(loc_did) # raises exceptions if applicable now = int(time()) loc_did_metadata = {**(metadata or {}), 'since': (old.metadata or {}).get('since', now), 'modified': now} try: await did.set_did_metadata(self.handle, loc_did, json.dumps(loc_did_metadata)) except IndyError as x_indy: LOGGER.debug('Wallet.replace_local_did_metadata <!< indy-sdk raised error %s', x_indy.error_code) raise rv = await self.get_local_did(loc_did) LOGGER.debug('Wallet.replace_local_did_metadata <<< %s', rv) return rv
python
{ "resource": "" }
q11039
Wallet.get_local_dids
train
async def get_local_dids(self) -> Sequence[DIDInfo]: """ Get list of DIDInfos for local DIDs. :return: list of local DIDInfos """ LOGGER.debug('Wallet.get_local_dids >>>') dids_with_meta = json.loads(did.list_my_dids_with_meta(self.handle)) # list rv = [] for did_with_meta in dids_with_meta: meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {} if meta.get('anchor', False): continue # exclude anchor DIDs past and present rv.append(DIDInfo(did_with_meta['did'], did_with_meta['verkey'], meta)) LOGGER.debug('Wallet.get_local_dids <<< %s', rv) return rv
python
{ "resource": "" }
q11040
Wallet.get_local_did
train
async def get_local_did(self, loc: str) -> DIDInfo: """ Get local DID info by local DID or verification key. Raise AbsentRecord for no such local DID. :param loc: DID or verification key of interest :return: DIDInfo for local DID """ LOGGER.debug('Wallet.get_local_did >>> loc: %s', loc) if not self.handle: LOGGER.debug('Wallet.get_local_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if ok_did(loc): # it's a DID try: did_with_meta = json.loads(await did.get_my_did_with_meta(self.handle, loc)) rv = DIDInfo( did_with_meta['did'], did_with_meta['verkey'], json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}) # nudge None to empty except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.get_local_did <!< DID %s not present in wallet %s', loc, self.name) raise AbsentRecord('Local DID {} not present in wallet {}'.format(loc, self.name)) LOGGER.debug('Wallet.get_local_did <!< indy-sdk raised error %s', x_indy.error_code) raise else: # it's a verkey dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list for did_with_meta in dids_with_meta: if did_with_meta['verkey'] == loc: rv = DIDInfo( did_with_meta['did'], did_with_meta['verkey'], json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}) break else: LOGGER.debug('Wallet.get_local_did <!< Wallet %s has no local DID for verkey %s', self.name, loc) raise AbsentRecord('Wallet {} has no local DID for verkey {}'.format(self.name, loc)) LOGGER.debug('Wallet.get_local_did <<< %s', rv) return rv
python
{ "resource": "" }
q11041
Wallet.get_anchor_did
train
async def get_anchor_did(self) -> str: """ Get current anchor DID by metadata, None for not yet set. :return: DID """ LOGGER.debug('Wallet.get_anchor_did >>>') if not self.handle: LOGGER.debug('Wallet.get_anchor_did <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = None dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list latest = 0 for did_with_meta in dids_with_meta: try: meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {} if not meta.get('anchor', False): continue if isinstance(meta, dict) and meta.get('since', -1) > latest: rv = did_with_meta.get('did') except json.decoder.JSONDecodeError: continue # it's not an anchor DID, carry on LOGGER.debug('Wallet.get_anchor_did <<< %s', rv) return rv
python
{ "resource": "" }
q11042
Wallet._write_link_secret_label
train
async def _write_link_secret_label(self, label) -> None: """ Update non-secret storage record with link secret label. :param label: link secret label """ LOGGER.debug('Wallet._write_link_secret_label <<< %s', label) if await self.get_link_secret_label() == label: LOGGER.info('Wallet._write_link_secret_label abstaining - already current') else: await self.write_non_secret(StorageRecord( TYPE_LINK_SECRET_LABEL, label, tags=None, ident=str(int(time())))) # indy requires str LOGGER.debug('Wallet._write_link_secret_label <<<')
python
{ "resource": "" }
q11043
Wallet.get_link_secret_label
train
async def get_link_secret_label(self) -> str: """ Get current link secret label from non-secret storage records; return None for no match. :return: latest non-secret storage record for link secret label """ LOGGER.debug('Wallet.get_link_secret_label >>>') if not self.handle: LOGGER.debug('Wallet.get_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = None records = await self.get_non_secret(TYPE_LINK_SECRET_LABEL) if records: rv = records[str(max(int(k) for k in records))].value # str to int, max, and back again LOGGER.debug('Wallet.get_link_secret_label <<< %s', rv) return rv
python
{ "resource": "" }
q11044
Wallet.create
train
async def create(self) -> None: """ Persist the wallet. Raise ExtantWallet if it already exists. Actuators should prefer WalletManager.create() to calling this method directly - the wallet manager filters wallet configuration through preset defaults. """ LOGGER.debug('Wallet.create >>>') try: await wallet.create_wallet( config=json.dumps(self.config), credentials=json.dumps(self.access_creds)) LOGGER.info('Created wallet %s', self.name) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletAlreadyExistsError: LOGGER.debug('Wallet.create <!< Wallet %s already exists', self.name) raise ExtantWallet('Wallet {} already exists'.format(self.name)) LOGGER.debug( 'Wallet.create <!< indy error code %s on creation of wallet %s', x_indy.error_code, self.name) raise auto_remove = self.auto_remove self.auto_remove = False # defer past this creation process async with self: did_info = await self.create_local_did( self._von_config.get('seed', None), self._von_config.get('did', None), {'anchor': True}) self.did = did_info.did self.verkey = did_info.verkey if 'link_secret_label' in self._von_config: await self.create_link_secret(self._von_config['link_secret_label']) self.auto_remove = auto_remove LOGGER.debug('Wallet.create <<<')
python
{ "resource": "" }
q11045
Wallet.write_pairwise
train
async def write_pairwise( self, their_did: str, their_verkey: str = None, my_did: str = None, metadata: dict = None, replace_meta: bool = False) -> PairwiseInfo: """ Store a pairwise DID for a secure connection. Use verification key for local DID in wallet if supplied; otherwise, create one first. If local DID specified but not present, raise AbsentRecord. With supplied metadata, replace or augment and overwrite any existing metadata for the pairwise relation if one already exists in the wallet. Always include local and remote DIDs and keys in metadata to allow for WQL search. Raise AbsentRecord on call to update a non-existent record. Raise BadRecord if metadata does not coerce into non-secrets API tags specification {str:str}. :param their_did: remote DID :param their_verkey: remote verification key (default None is OK if updating an existing pairwise DID) :param my_did: local DID :param metadata: metadata for pairwise connection :param replace_meta: whether to (True) replace or (False) augment and overwrite existing metadata :return: resulting PairwiseInfo """ LOGGER.debug( 'Wallet.write_pairwise >>> their_did: %s, their_verkey: %s, my_did: %s, metadata: %s, replace_meta: %s', their_did, their_verkey, my_did, metadata, replace_meta) if their_verkey is None: match = await self.get_pairwise(their_did) if not match: LOGGER.debug( 'Wallet.write_pairwise <!< Wallet %s has no pairwise DID on %s to update', self.name, their_did) raise AbsentRecord('Wallet {} has no pairwise DID on {} to update'.format(self.name, their_did)) their_verkey = [pwise for pwise in match.values()][0].their_verkey try: await did.store_their_did(self.handle, json.dumps({'did': their_did, 'verkey': their_verkey})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemAlreadyExists: pass # exists already, carry on else: LOGGER.debug( 'Wallet.write_pairwise <!< Wallet %s write of their_did %s raised indy error code %s', self.name, their_did, x_indy.error_code) raise if my_did: my_did_info = await self.get_local_did(my_did) # raises AbsentRecord if no such local did else: my_did_info = await self.create_local_did(None, None, {'pairwise_for': their_did}) pairwise = PairwiseInfo(their_did, their_verkey, my_did_info.did, my_did_info.verkey, metadata) try: storec = await self.write_non_secret( StorageRecord(TYPE_PAIRWISE, their_verkey, tags=pairwise_info2tags(pairwise), ident=their_did), replace_meta) except BadRecord: LOGGER.debug( 'Wallet.write_pairwise <!< Pairwise metadata %s does not coerce into flat {str:str} tags dict', pairwise.metadata) raise rv = storage_record2pairwise_info(storec) LOGGER.debug('Wallet.write_pairwise <<< %s', rv) return rv
python
{ "resource": "" }
q11046
Wallet.delete_pairwise
train
async def delete_pairwise(self, their_did: str) -> None: """ Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove """ LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did) if not ok_did(their_did): LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did) raise BadIdentifier('Bad DID {}'.format(their_did)) await self.delete_non_secret(TYPE_PAIRWISE, their_did) LOGGER.debug('Wallet.delete_pairwise <<<')
python
{ "resource": "" }
q11047
Wallet.get_pairwise
train
async def get_pairwise(self, pairwise_filt: str = None) -> dict: """ Return dict mapping each pairwise DID of interest in wallet to its pairwise info, or, for no filter specified, mapping them all. If wallet has no such item, return empty dict. :param pairwise_filt: remote DID of interest, or WQL json (default all) :return: dict mapping remote DIDs to PairwiseInfo """ LOGGER.debug('Wallet.get_pairwise >>> pairwise_filt: %s', pairwise_filt) if not self.handle: LOGGER.debug('Wallet.get_pairwise <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) storecs = await self.get_non_secret( TYPE_PAIRWISE, pairwise_filt if ok_did(pairwise_filt) or not pairwise_filt else json.loads(pairwise_filt), canon_pairwise_wql) rv = {k: storage_record2pairwise_info(storecs[k]) for k in storecs} # touch up tags, mute leading ~ LOGGER.debug('Wallet.get_pairwise <<< %s', rv) return rv
python
{ "resource": "" }
q11048
Wallet.write_non_secret
train
async def write_non_secret(self, storec: StorageRecord, replace_meta: bool = False) -> StorageRecord: """ Add or update non-secret storage record to the wallet; return resulting wallet non-secret record. :param storec: non-secret storage record :param replace_meta: whether to replace any existing metadata on matching record or to augment it :return: non-secret storage record as it appears in the wallet after write """ LOGGER.debug('Wallet.write_non_secret >>> storec: %s, replace_meta: %s', storec, replace_meta) if not self.handle: LOGGER.debug('Wallet.write_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not StorageRecord.ok_tags(storec.tags): LOGGER.debug('Wallet.write_non_secret <!< bad storage record tags %s; use flat {str: str} dict', storec) raise BadRecord('Bad storage record tags {}; use flat {{str:str}} dict'.format(storec)) try: record = json.loads(await non_secrets.get_wallet_record( self.handle, storec.type, storec.id, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True }))) if record['value'] != storec.value: await non_secrets.update_wallet_record_value( self.handle, storec.type, storec.id, storec.value) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: await non_secrets.add_wallet_record( self.handle, storec.type, storec.id, storec.value, json.dumps(storec.tags) if storec.tags else None) else: LOGGER.debug( 'Wallet.write_non_secret <!< Wallet lookup raised indy error code %s', x_indy.error_code) raise else: if (record['tags'] or None) != storec.tags: # record maps no tags to {}, not None tags = (storec.tags or {}) if replace_meta else {**record['tags'], **(storec.tags or {})} await non_secrets.update_wallet_record_tags( self.handle, storec.type, storec.id, json.dumps(tags)) # indy-sdk takes '{}' instead of None for null tags record = json.loads(await non_secrets.get_wallet_record( self.handle, storec.type, storec.id, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True }))) rv = StorageRecord(storec.type, record['value'], tags=record.get('tags', None), ident=record['id']) LOGGER.debug('Wallet.write_non_secret <<< %s', rv) return rv
python
{ "resource": "" }
q11049
Wallet.delete_non_secret
train
async def delete_non_secret(self, typ: str, ident: str) -> None: """ Remove a non-secret record by its type and identifier. Silently return if no such record is present. Raise WalletState for closed wallet. :param typ: non-secret storage record type :param ident: non-secret storage record identifier """ LOGGER.debug('Wallet.delete_non_secret >>> typ: %s, ident: %s', typ, ident) if not self.handle: LOGGER.debug('Wallet.delete_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await non_secrets.delete_wallet_record(self.handle, typ, ident) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.info('Wallet.delete_non_secret <!< no record for type %s on identifier %s', typ, ident) else: LOGGER.debug( 'Wallet.delete_non_secret <!< deletion of %s record on identifier %s raised indy error code %s', typ, ident, x_indy.error_code) raise LOGGER.debug('Wallet.delete_non_secret <<<')
python
{ "resource": "" }
q11050
Wallet.get_non_secret
train
async def get_non_secret( self, typ: str, filt: Union[dict, str] = None, canon_wql: Callable[[dict], dict] = None, limit: int = None) -> dict: """ Return dict mapping each non-secret storage record of interest by identifier or, for no filter specified, mapping them all. If wallet has no such item, return empty dict. :param typ: non-secret storage record type :param filt: non-secret storage record identifier or WQL json (default all) :param canon_wql: WQL canonicalization function (default von_anchor.canon.canon_non_secret_wql()) :param limit: maximum number of results to return (default no limit) :return: dict mapping identifiers to non-secret storage records """ LOGGER.debug('Wallet.get_non_secret >>> typ: %s, filt: %s, canon_wql: %s', typ, filt, canon_wql) if not self.handle: LOGGER.debug('Wallet.get_non_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) records = [] if isinstance(filt, str): # ordinary lookup by value try: records = [json.loads(await non_secrets.get_wallet_record( self.handle, typ, filt, json.dumps({ 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True })))] except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: pass else: LOGGER.debug( 'Wallet.get_non_secret <!< Wallet %s lookup raised indy exception %s', self.name, x_indy.error_code) raise else: canon = canon_wql or canon_non_secret_wql s_handle = await non_secrets.open_wallet_search( self.handle, typ, json.dumps(canon(filt or {})), json.dumps({ 'retrieveRecords': True, 'retrieveTotalCount': True, 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True })) records = [] cardinality = int(json.loads( await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, 0))['totalCount']) chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK) if limit: cardinality = min(limit, cardinality) try: while len(records) != cardinality: batch = json.loads( await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, chunk))['records'] records.extend(batch) if len(batch) < chunk: break if len(records) != cardinality: LOGGER.warning( 'Non-secret search/limit indicated %s results but fetched %s', cardinality, len(records)) finally: await non_secrets.close_wallet_search(s_handle) rv = {record['id']: StorageRecord(typ, record['value'], record['tags'], record['id']) for record in records} LOGGER.debug('Wallet.get_non_secret <<< %s', rv) return rv
python
{ "resource": "" }
q11051
Wallet.encrypt
train
async def encrypt( self, message: bytes, authn: bool = False, to_verkey: str = None, from_verkey: str = None) -> bytes: """ Encrypt plaintext for owner of DID, anonymously or via authenticated encryption scheme. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: plaintext, as bytes :param authn: whether to use authenticated encryption scheme :param to_verkey: verification key of recipient, None for anchor's own :param from_verkey: verification key of sender for authenticated encryption, None for anchor's own :return: ciphertext, as bytes """ LOGGER.debug( 'Wallet.encrypt >>> message: %s, authn: %s, to_verkey: %s, from_verkey: %s', message, authn, to_verkey, from_verkey) if not message: LOGGER.debug('Wallet.encrypt <!< No message to encrypt') raise AbsentMessage('No message to encrypt') if not self.handle: LOGGER.debug('Wallet.encrypt <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if authn: rv = await crypto.auth_crypt(self.handle, from_verkey or self.verkey, to_verkey or self.verkey, message) else: rv = await crypto.anon_crypt(to_verkey or self.verkey, message) LOGGER.debug('Wallet.auth_encrypt <<< %s', rv) return rv
python
{ "resource": "" }
q11052
Wallet.sign
train
async def sign(self, message: bytes, verkey: str = None) -> bytes: """ Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: Content to sign, as bytes :param verkey: verification key corresponding to private signing key (default anchor's own) :return: signature, as bytes """ LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey) if not message: LOGGER.debug('Wallet.sign <!< No message to sign') raise AbsentMessage('No message to sign') if not self.handle: LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message) LOGGER.debug('Wallet.sign <<< %s', rv) return rv
python
{ "resource": "" }
q11053
Wallet.unpack
train
async def unpack(self, ciphertext: bytes) -> (str, str, str): """ Unpack a message. Return triple with cleartext, sender verification key, and recipient verification key. Raise AbsentMessage for missing ciphertext, or WalletState if wallet is closed. Raise AbsentRecord if wallet has no key to unpack ciphertext. :param ciphertext: JWE-like formatted message as pack() produces :return: cleartext, sender verification key, recipient verification key """ LOGGER.debug('Wallet.unpack >>> ciphertext: %s', ciphertext) if not ciphertext: LOGGER.debug('Wallet.pack <!< No ciphertext to unpack') raise AbsentMessage('No ciphertext to unpack') try: unpacked = json.loads(await crypto.unpack_message(self.handle, ciphertext)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.unpack <!< Wallet %s has no local key to unpack ciphertext', self.name) raise AbsentRecord('Wallet {} has no local key to unpack ciphertext'.format(self.name)) LOGGER.debug('Wallet.unpack <!< Wallet %s unpack() raised indy error code {}', x_indy.error_code) raise rv = (unpacked['message'], unpacked.get('sender_verkey', None), unpacked.get('recipient_verkey', None)) LOGGER.debug('Wallet.unpack <<< %s', rv) return rv
python
{ "resource": "" }
q11054
Wallet.reseed_apply
train
async def reseed_apply(self) -> DIDInfo: """ Replace verification key with new verification key from reseed operation. Raise WalletState if wallet is closed. :return: DIDInfo with new verification key and metadata for DID """ LOGGER.debug('Wallet.reseed_apply >>>') if not self.handle: LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) await did.replace_keys_apply(self.handle, self.did) self.verkey = await did.key_for_local_did(self.handle, self.did) now = int(time()) rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now}) await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata)) LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did) LOGGER.debug('Wallet.reseed_apply <<< %s', rv) return rv
python
{ "resource": "" }
q11055
Origin.send_schema
train
async def send_schema(self, schema_data_json: str) -> str: """ Send schema to ledger, then retrieve it as written to the ledger and return it. Raise BadLedgerTxn on failure. Raise BadAttribute for attribute name with spaces or reserved for indy-sdk. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori) """ LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json) schema_data = json.loads(schema_data_json) for attr in schema_data['attr_names']: if not (re.match(r'(?=[^- ])[-_a-zA-Z0-9 ]+(?<=[^- ])$', attr)) or attr.strip().lower() == 'hash': LOGGER.debug('Origin.send_schema <!< Bad attribute name [%s]', attr) raise BadAttribute('Bad attribute name [{}]'.format(attr)) s_id = schema_id(self.did, schema_data['name'], schema_data['version']) s_key = schema_key(s_id) rv_json = None with SCHEMA_CACHE.lock: try: rv_json = await self.get_schema(s_key) LOGGER.error( 'Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) except AbsentSchema: # OK - about to create and send it (_, schema_json) = await anoncreds.issuer_create_schema( self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names'])) req_json = await ledger.build_schema_request(self.did, schema_json) await self._sign_submit(req_json) for _ in range(16): # reasonable timeout try: rv_json = await self.get_schema(s_key) # adds to cache break except AbsentSchema: await sleep(1) LOGGER.info('Sent schema %s to ledger, waiting 1s for its appearance', s_id) if not rv_json: LOGGER.debug('Origin.send_schema <!< timed out waiting on sent schema %s', s_id) raise BadLedgerTxn('Timed out waiting on sent schema {}'.format(s_id)) LOGGER.debug('Origin.send_schema <<< %s', rv_json) return rv_json
python
{ "resource": "" }
q11056
NominalAnchor.least_role
train
def least_role() -> Role: """ Return the indy-sdk null role for a tails sync anchor, which does not need write access. :return: USER role """ LOGGER.debug('NominalAnchor.least_role >>>') rv = Role.USER LOGGER.debug('NominalAnchor.least_role <<< %s', rv) return rv
python
{ "resource": "" }
q11057
parse_requirements
train
def parse_requirements(filename): """ Load requirements from a pip requirements file. :param filename: file name with requirements to parse """ try: with open(filename) as fh_req: return [line.strip() for line in fh_req if line.strip() and not line.startswith('#')] except FileNotFoundError: print('File not found: {}'.format(realpath(filename)), file=stderr) raise
python
{ "resource": "" }
q11058
NodePoolManager.list
train
async def list(self) -> List[str]: """ Return list of pool names configured, empty list for none. :return: list of pool names. """ LOGGER.debug('NodePoolManager.list >>>') rv = [p['pool'] for p in await pool.list_pools()] LOGGER.debug('NodePoolManager.list <<< %s', rv) return rv
python
{ "resource": "" }
q11059
NodePoolManager.get
train
def get(self, name: str, config: dict = None) -> NodePool: """ Return node pool in input name and optional configuration. :param name: name of configured pool :param config: pool configuration with optional 'timeout' int, 'extended_timeout' int, 'preordered_nodes' array of strings :return: node pool """ LOGGER.debug('NodePoolManager.node_pool >>>') rv = NodePool(name, self.protocol, config) LOGGER.debug('NodePoolManager.node_pool <<< %s', rv) return rv
python
{ "resource": "" }
q11060
NodePoolManager.remove
train
async def remove(self, name: str) -> None: """ Remove serialized pool info if it exists. Abstain from removing open node pool. """ LOGGER.debug('NodePoolManager.remove >>> name: %s', name) try: await pool.delete_pool_ledger_config(name) except IndyError as x_indy: LOGGER.info('Abstaining from node pool removal; indy-sdk error code %s', x_indy.error_code) LOGGER.debug('NodePool.remove <<<')
python
{ "resource": "" }
q11061
DIDDoc.authnkey
train
def authnkey(self) -> dict: """ Accessor for public keys marked as authentication keys, by identifier. """ return {k: self._pubkey[k] for k in self._pubkey if self._pubkey[k].authn}
python
{ "resource": "" }
q11062
DIDDoc.set
train
def set(self, item: Union[Service, PublicKey]) -> 'DIDDoc': """ Add or replace service or public key; return current DIDDoc. Raise BadDIDDocItem if input item is neither service nor public key. :param item: service or public key to set :return: current DIDDoc """ if isinstance(item, Service): self.service[item.id] = item elif isinstance(item, PublicKey): self.pubkey[item.id] = item else: raise BadDIDDocItem('Cannot add item {} to DIDDoc on DID {}'.format(item, self.did))
python
{ "resource": "" }
q11063
DIDDoc.serialize
train
def serialize(self) -> str: """ Dump current object to a JSON-compatible dictionary. :return: dict representation of current DIDDoc """ return { '@context': DIDDoc.CONTEXT, 'id': canon_ref(self.did, self.did), 'publicKey': [pubkey.to_dict() for pubkey in self.pubkey.values()], 'authentication': [{ 'type': pubkey.type.authn_type, 'publicKey': canon_ref(self.did, pubkey.id) } for pubkey in self.pubkey.values() if pubkey.authn], 'service': [service.to_dict() for service in self.service.values()] }
python
{ "resource": "" }
q11064
DIDDoc.add_service_pubkeys
train
def add_service_pubkeys(self, service: dict, tags: Union[Sequence[str], str]) -> List[PublicKey]: """ Add public keys specified in service. Return public keys so discovered. Raise AbsentDIDDocItem for public key reference not present in DID document. :param service: service from DID document :param tags: potential tags marking public keys of type of interest - the standard is still coalescing :return: list of public keys that service specification in DID document identifies. """ rv = [] for tag in [tags] if isinstance(tags, str) else list(tags): for svc_key in service.get(tag, {}): canon_key = canon_ref(self.did, svc_key) pubkey = None if '#' in svc_key: if canon_key in self.pubkey: pubkey = self.pubkey[canon_key] else: # service key refers to another DID doc LOGGER.debug( 'DIDDoc.add_service_pubkeys <!< DID document %s has no public key %s', self.did, svc_key) raise AbsentDIDDocItem('DID document {} has no public key {}'.format(self.did, svc_key)) else: for existing_pubkey in self.pubkey.values(): if existing_pubkey.value == svc_key: pubkey = existing_pubkey break else: pubkey = PublicKey( self.did, ident=svc_key[-9:-1], # industrial-grade uniqueness value=svc_key) self._pubkey[pubkey.id] = pubkey if pubkey and pubkey not in rv: # perverse case: could specify same key multiple ways; append once rv.append(pubkey) return rv
python
{ "resource": "" }
q11065
DIDDoc.deserialize
train
def deserialize(cls, did_doc: dict) -> 'DIDDoc': """ Construct DIDDoc object from dict representation. Raise BadIdentifier for bad DID. :param did_doc: DIDDoc dict reprentation. :return: DIDDoc from input json. """ rv = None if 'id' in did_doc: rv = DIDDoc(did_doc['id']) else: # get DID to serve as DID document identifier from first public key if 'publicKey' not in did_doc: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: pubkey_did = canon_did(resource(pubkey['id'])) if ok_did(pubkey_did): rv = DIDDoc(pubkey_did) break else: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: # include public keys and authentication keys by reference pubkey_type = PublicKeyType.get(pubkey['type']) authn = any( canon_ref(rv.did, ak.get('publicKey', '')) == canon_ref(rv.did, pubkey['id']) for ak in did_doc.get('authentication', {}) if isinstance(ak.get('publicKey', None), str)) key = PublicKey( # initialization canonicalizes id rv.did, pubkey['id'], pubkey[pubkey_type.specifier], pubkey_type, canon_did(pubkey['controller']), authn) rv.pubkey[key.id] = key for akey in did_doc.get('authentication', {}): # include embedded authentication keys pk_ref = akey.get('publicKey', None) if pk_ref: pass # got it already with public keys else: pubkey_type = PublicKeyType.get(akey['type']) key = PublicKey( # initialization canonicalized id rv.did, akey['id'], akey[pubkey_type.specifier], pubkey_type, canon_did(akey['controller']), True) rv.pubkey[key.id] = key for service in did_doc.get('service', {}): endpoint = service['serviceEndpoint'] svc = Service( # initialization canonicalizes id rv.did, service.get('id', canon_ref(rv.did, 'assigned-service-{}'.format(len(rv.service)), ';')), service['type'], rv.add_service_pubkeys(service, 'recipientKeys'), rv.add_service_pubkeys(service, ['mediatorKeys', 'routingKeys']), canon_ref(rv.did, endpoint, ';') if ';' in endpoint else endpoint, service.get('priority', None)) rv.service[svc.id] = svc return rv
python
{ "resource": "" }
q11066
Protocol.txn_data2schema_key
train
def txn_data2schema_key(self, txn: dict) -> SchemaKey: """ Return schema key from ledger transaction data. :param txn: get-schema transaction (by sequence number) :return: schema key identified """ rv = None if self == Protocol.V_13: rv = SchemaKey(txn['identifier'], txn['data']['name'], txn['data']['version']) else: txn_txn = txn.get('txn', None) or txn # may have already run this txn through txn2data() below rv = SchemaKey( txn_txn['metadata']['from'], txn_txn['data']['data']['name'], txn_txn['data']['data']['version']) return rv
python
{ "resource": "" }
q11067
Protocol.txn2data
train
def txn2data(self, txn: dict) -> str: """ Given ledger transaction, return its data json. :param txn: transaction as dict :return: transaction data json """ rv_json = json.dumps({}) if self == Protocol.V_13: rv_json = json.dumps(txn['result'].get('data', {})) else: rv_json = json.dumps((txn['result'].get('data', {}) or {}).get('txn', {})) # "data": null for no such txn return rv_json
python
{ "resource": "" }
q11068
Protocol.txn2epoch
train
def txn2epoch(self, txn: dict) -> int: """ Given ledger transaction, return its epoch time. :param txn: transaction as dict :return: transaction time """ rv = None if self == Protocol.V_13: rv = txn['result']['txnTime'] else: rv = txn['result']['txnMetadata']['txnTime'] return rv
python
{ "resource": "" }
q11069
Protocol.genesis_host_port
train
def genesis_host_port(self, genesis_txn: dict) -> tuple: """ Given a genesis transaction, return its node host and port. :param genesis_txn: genesis transaction as dict :return: node host and port """ txn_data = genesis_txn['data'] if self == Protocol.V_13 else genesis_txn['txn']['data']['data'] return (txn_data['node_ip'], txn_data['node_port'])
python
{ "resource": "" }
q11070
Tails.open
train
async def open(self) -> 'Tails': """ Open reader handle and return current object. :return: current object """ LOGGER.debug('Tails.open >>>') self._reader_handle = await blob_storage.open_reader('default', self._tails_config_json) LOGGER.debug('Tails.open <<<') return self
python
{ "resource": "" }
q11071
Tails.ok_hash
train
def ok_hash(token: str) -> bool: """ Whether input token looks like a valid tails hash. :param token: candidate string :return: whether input token looks like a valid tails hash """ LOGGER.debug('Tails.ok_hash >>> token: %s', token) rv = re.match('[{}]{{42,44}}$'.format(B58), token) is not None LOGGER.debug('Tails.ok_hash <<< %s', rv) return rv
python
{ "resource": "" }
q11072
Predicate.get
train
def get(relation: str) -> 'Predicate': """ Return enum instance corresponding to input relation string """ for pred in Predicate: if relation.upper() in (pred.value.fortran, pred.value.wql.upper(), pred.value.math): return pred return None
python
{ "resource": "" }
q11073
Predicate.to_int
train
def to_int(value: Any) -> int: """ Cast a value as its equivalent int for indy predicate argument. Raise ValueError for any input but int, stringified int, or boolean. :param value: value to coerce. """ if isinstance(value, (bool, int)): return int(value) return int(str(value))
python
{ "resource": "" }
q11074
Role.get
train
def get(token: Union[str, int] = None) -> 'Role': """ Return enum instance corresponding to input token. :param token: token identifying role to indy-sdk: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', '' or None :return: enum instance corresponding to input token """ if token is None: return Role.USER for role in Role: if role == Role.ROLE_REMOVE: continue # ROLE_REMOVE is not a sensible role to parse from any configuration if isinstance(token, int) and token in role.value: return role if str(token).upper() == role.name or token in (str(v) for v in role.value): # could be numeric string return role return None
python
{ "resource": "" }
q11075
Role.token
train
def token(self) -> str: """ Return token identifying role to indy-sdk. :return: token: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', or None (for USER) """ return self.value[0] if self in (Role.USER, Role.ROLE_REMOVE) else self.name
python
{ "resource": "" }
q11076
cached_get
train
def cached_get(timeout, *params): """Decorator applied specifically to a view's get method""" def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(view_or_request, *args, **kwargs): # The type of the request gets muddled when using a function based # decorator. We must use a function based decorator so it can be # used in urls.py. request = getattr(view_or_request, "request", view_or_request) if not hasattr(_thread_locals, "ultracache_request"): setattr(_thread_locals, "ultracache_request", request) # If request not GET or HEAD never cache if request.method.lower() not in ("get", "head"): return view_func(view_or_request, *args, **kwargs) # If request contains messages never cache l = 0 try: l = len(request._messages) except (AttributeError, TypeError): pass if l: return view_func(view_or_request, *args, **kwargs) # Compute a cache key li = [str(view_or_request.__class__), view_func.__name__] # request.get_full_path is implicitly added it no other request # path is provided. get_full_path includes the querystring and is # the more conservative approach but makes it trivially easy for a # request to bust through the cache. if not set(params).intersection(set(( "request.get_full_path()", "request.path", "request.path_info" ))): li.append(request.get_full_path()) if "django.contrib.sites" in settings.INSTALLED_APPS: li.append(get_current_site_pk(request)) # Pre-sort kwargs keys = list(kwargs.keys()) keys.sort() for key in keys: li.append("%s,%s" % (key, kwargs[key])) # Extend cache key with custom variables for param in params: if not isinstance(param, str): param = str(param) li.append(eval(param)) s = ":".join([str(l) for l in li]) hashed = hashlib.md5(s.encode("utf-8")).hexdigest() cache_key = "ucache-get-%s" % hashed cached = cache.get(cache_key, None) if cached is None: # The get view as outermost caller may bluntly set _ultracache request._ultracache = [] response = view_func(view_or_request, *args, **kwargs) content = None if isinstance(response, TemplateResponse): content = response.render().rendered_content elif isinstance(response, HttpResponse): content = response.content if content is not None: headers = getattr(response, "_headers", {}) cache.set( cache_key, {"content": content, "headers": headers}, timeout ) cache_meta(request, cache_key) else: response = HttpResponse(cached["content"]) # Headers has a non-obvious format for k, v in cached["headers"].items(): response[v[0]] = v[1] return response return _wrapped_view return decorator
python
{ "resource": "" }
q11077
ultracache
train
def ultracache(timeout, *params): """Decorator applied to a view class. The get method is decorated implicitly.""" def decorator(cls): class WrappedClass(cls): def __init__(self, *args, **kwargs): super(WrappedClass, self).__init__(*args, **kwargs) @cached_get(timeout, *params) def get(self, *args, **kwargs): return super(WrappedClass, self).get(*args, **kwargs) return WrappedClass return decorator
python
{ "resource": "" }
q11078
Issuer._send_rev_reg_def
train
async def _send_rev_reg_def(self, rr_id: str) -> None: """ Move tails file from hopper; deserialize revocation registry definition and initial entry; send to ledger and cache revocation registry definition. Operation serializes to subdirectory within tails hopper directory; symbolic link presence signals completion. Raise AbsentRevReg if revocation registry is not ready in hopper, or AbsentTails if tails file is not yet linked by its revocation registry identifier. :param rr_id: revocation registry identifier """ LOGGER.debug('Issuer._send_rev_reg_def >>> rr_id: %s', rr_id) dir_tails_rr_id = self.rrb.dir_tails_top(rr_id) dir_target = self.rrb.dir_tails_target(rr_id) if not Tails.linked(dir_tails_rr_id, rr_id): LOGGER.debug( 'Issuer._send_rev_reg_def <!< Tails file for rev reg %s not ready in dir %s', rr_id, dir_target) raise AbsentRevReg('Tails file for rev reg {} not ready in dir {}'.format(rr_id, dir_target)) file_rr_def = join(dir_target, 'rr_def.json') if not isfile(file_rr_def): LOGGER.debug('Issuer._send_rev_reg_def <!< Rev reg def file %s not present', file_rr_def) raise AbsentRevReg('Rev reg def file {} not present'.format(file_rr_def)) with open(file_rr_def, 'r') as fh_rr_def: rr_def_json = fh_rr_def.read() file_rr_ent = join(dir_target, 'rr_ent.json') if not isfile(file_rr_ent): LOGGER.debug('Issuer._send_rev_reg_def <!< Rev reg entry file %s not present', file_rr_ent) raise AbsentRevReg('Rev reg entry file {} not present'.format(file_rr_ent)) with open(file_rr_ent, 'r') as fh_rr_ent: rr_ent_json = fh_rr_ent.read() file_tails = Tails.linked(dir_tails_rr_id, rr_id) if not file_tails: LOGGER.debug('Issuer._send_rev_reg_def <!< Tails link %s not present in dir %s', rr_id, dir_target) raise AbsentTails('Tails link {} not present in dir {}'.format(rr_id, dir_target)) if self.rrbx: dir_cd_id = join(self.dir_tails, rev_reg_id2cred_def_id(rr_id)) makedirs(dir_cd_id, exist_ok=True) rename(file_tails, join(dir_cd_id, basename(file_tails))) with REVO_CACHE.lock: rr_def_req_json = await ledger.build_revoc_reg_def_request(self.did, rr_def_json) await self._sign_submit(rr_def_req_json) await self.get_rev_reg_def(rr_id) # add to cache en passant rr_ent_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rr_ent_json) await self._sign_submit(rr_ent_req_json) if self.rrbx: Tails.associate(self.dir_tails, rr_id, basename(file_tails)) rmtree(dir_tails_rr_id) else: remove(file_rr_def) remove(file_rr_ent) LOGGER.debug('Issuer._send_rev_reg_def <<<')
python
{ "resource": "" }
q11079
Issuer._set_rev_reg
train
async def _set_rev_reg(self, rr_id: str, rr_size: int) -> None: """ Move precomputed revocation registry data from hopper into place within tails directory. :param rr_id: revocation registry identifier :param rr_size: revocation registry size, in case creation required """ LOGGER.debug('Issuer._set_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size) assert self.rrbx dir_hopper_rr_id = join(self.rrb.dir_tails_hopper, rr_id) while Tails.linked(dir_hopper_rr_id, rr_id) is None: await asyncio.sleep(1) await self._send_rev_reg_def(rr_id) cd_id = rev_reg_id2cred_def_id(rr_id) (next_tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id) rr_id = rev_reg_id(cd_id, next_tag) self.rrb.mark_in_progress(rr_id, rr_size or rr_size_suggested) LOGGER.debug('Issuer._set_rev_reg <<<')
python
{ "resource": "" }
q11080
Issuer._sync_revoc_for_issue
train
async def _sync_revoc_for_issue(self, rr_id: str, rr_size: int = None) -> None: """ Create revocation registry if need be for input revocation registry identifier; open and cache tails file reader. :param rr_id: revocation registry identifier :param rr_size: if new revocation registry necessary, its size (default as per RevRegBuilder.create_rev_reg()) """ LOGGER.debug('Issuer._sync_revoc_for_issue >>> rr_id: %s, rr_size: %s', rr_id, rr_size) if not ok_rev_reg_id(rr_id): LOGGER.debug('Issuer._sync_revoc_for_issue <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) (cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id) try: await self.get_cred_def(cd_id) except AbsentCredDef: LOGGER.debug( 'Issuer._sync_revoc_for_issue <!< tails tree %s may be for another ledger; no cred def found on %s', self.dir_tails, cd_id) raise AbsentCredDef('Tails tree {} may be for another ledger; no cred def found on {}'.format( self.dir_tails, cd_id)) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = None if revo_cache_entry is None else revo_cache_entry.tails if tails is None: # it's a new revocation registry, or not yet set in cache try: tails = await Tails(self.dir_tails, cd_id, tag).open() except AbsentTails: # it's a new revocation registry if self.rrbx: await self._set_rev_reg(rr_id, rr_size) else: await self.rrb.create_rev_reg(rr_id, rr_size) await self._send_rev_reg_def(rr_id) tails = await Tails(self.dir_tails, cd_id, tag).open() # symlink should exist now if revo_cache_entry is None: REVO_CACHE[rr_id] = RevoCacheEntry(None, tails) else: REVO_CACHE[rr_id].tails = tails LOGGER.debug('Issuer._sync_revoc_for_issue <<<')
python
{ "resource": "" }
q11081
canon_ref
train
def canon_ref(did: str, ref: str, delimiter: str = None): """ Given a reference in a DID document, return it in its canonical form of a URI. :param did: DID acting as the identifier of the DID document :param ref: reference to canonicalize, either a DID or a fragment pointing to a location in the DID doc :param delimiter: delimiter character marking fragment (default '#') or introducing identifier (';') against DID resource """ if not ok_did(did): raise BadIdentifier('Bad DID {} cannot act as DID document identifier'.format(did)) if ok_did(ref): # e.g., LjgpST2rjsoxYegQDRm7EL return 'did:sov:{}'.format(did) if ok_did(resource(ref, delimiter)): # e.g., LjgpST2rjsoxYegQDRm7EL#keys-1 return 'did:sov:{}'.format(ref) if ref.startswith('did:sov:'): # e.g., did:sov:LjgpST2rjsoxYegQDRm7EL, did:sov:LjgpST2rjsoxYegQDRm7EL#3 rv = ref[8:] if ok_did(resource(rv, delimiter)): return ref raise BadIdentifier('Bad URI {} does not correspond to a sovrin DID'.format(ref)) if urlparse(ref).scheme: # e.g., https://example.com/messages/8377464 return ref return 'did:sov:{}{}{}'.format(did, delimiter if delimiter else '#', ref)
python
{ "resource": "" }
q11082
_get_pwned
train
def _get_pwned(prefix): """ Fetches a dict of all hash suffixes from Pwned Passwords for a given SHA-1 prefix. """ try: response = requests.get( url=API_ENDPOINT.format(prefix), headers={'User-Agent': USER_AGENT}, timeout=getattr( settings, 'PWNED_PASSWORDS_API_TIMEOUT', REQUEST_TIMEOUT, ), ) response.raise_for_status() except requests.RequestException as e: # Gracefully handle timeouts and HTTP error response codes. log.warning( 'Skipped Pwned Passwords check due to error: %r', e ) return None results = {} for line in response.text.splitlines(): line_suffix, _, times = line.partition(':') results[line_suffix] = int(times) return results
python
{ "resource": "" }
q11083
pwned_password
train
def pwned_password(password): """ Checks a password against the Pwned Passwords database. """ if not isinstance(password, text_type): raise TypeError('Password values to check must be Unicode strings.') password_hash = hashlib.sha1(password.encode('utf-8')).hexdigest().upper() prefix, suffix = password_hash[:5], password_hash[5:] results = _get_pwned(prefix) if results is None: # Gracefully handle timeouts and HTTP error response codes. return None return results.get(suffix, 0)
python
{ "resource": "" }
q11084
Optimizer.add_regularizer
train
def add_regularizer(self, proxfun, **kwargs): """ Add a regularizer from the operators module to the list of objectives Parameters ---------- proxfun : string or function If a string, then it must be the name of a corresponding function in the `operators` module. If a function, then it must apply a proximal update given an initial point x0, momentum parameter rho, and optional arguments given in `**kwargs`. \\*\\*kwargs : keyword arguments Any optional arguments required for the given function """ # if proxfun is a string, grab the corresponding function from operators.py if isinstance(proxfun, str): try: proxfun_name = proxfun.split(None, 1)[0] # Ignore everything after white space op = getattr(operators, proxfun_name) self.objectives.append(lambda theta, rho: op(theta.copy(), float(rho), **kwargs)) except AttributeError as e: print(str(e) + '\n' + 'Could not find the function ' + proxfun + ' in the operators module!') # if proxfun is a function, add it as its own proximal operator elif hasattr(proxfun, '__call__'): self.objectives.append(lambda theta, rho: proxfun(theta.copy(), float(rho))) # type of proxfun must be a string or a function else: raise TypeError('The argument "proxfun" must be a string or a function!')
python
{ "resource": "" }
q11085
Optimizer.set_regularizers
train
def set_regularizers(self, regularizers, clear=True): """ Adds a set of regularizers Parameters ---------- regularizers : dict Each key is the name of a corresponding proximal operator, and the value associated with that key is a set of keyword arguments clear : boolean, optional Whether or not to clear the existing regularizers. (Default: True) """ # clear existing operators if clear: self.clear() # add new regularizers list([self.add_regularizer(proxfun, **regularizers[proxfun]) for proxfun in regularizers.keys()])
python
{ "resource": "" }
q11086
Optimizer.minimize
train
def minimize(self, theta_init, max_iter=50, callback=None, disp=0, tau=(10., 2., 2.), tol=1e-3): """ Minimize a list of objectives using a proximal consensus algorithm Parameters ---------- theta_init : ndarray Initial parameter vector (numpy array) max_iter : int, optional Maximum number of iterations to run (default: 50) callback : function, optional a function that gets called on each iteration with the following arguments: the current parameter value (ndarray), and a dictionary that contains a information about the status of the algorithm disp : int, optional determines how much information to display when running. Ranges from 0 (nothing) to 3 (lots of information) Returns ------- theta : ndarray The parameters found after running the optimization procedure Other Parameters ---------------- tau : (float, float, float), optional initial, increment and decrement parameters for the momentum scheduler (default: (10, 2, 2)) tol : float, optional residual tolerance for assessing convergence. if both the primal and dual residuals are less than this value, then the algorithm has converged (default: 1e-3) """ # get list of objectives for this parameter num_obj = len(self.objectives) assert num_obj >= 1, "There must be at least one objective!" # initialize lists of primal and dual variable copies, one for each objective orig_shape = theta_init.shape primals = [theta_init.flatten() for _ in range(num_obj)] duals = [np.zeros(theta_init.size) for _ in range(num_obj)] theta_avg = np.mean(primals, axis=0).ravel() # initialize penalty parameter tau = namedtuple('tau', ('init', 'inc', 'dec'))(*tau) rho = tau.init # store cumulative runtimes of each iteration, starting now tstart = time.time() # clear metadata self.metadata = defaultdict(list) # run ADMM iterations self.converged = False for cur_iter in range(max_iter): # store the parameters from the previous iteration theta_prev = theta_avg # update each primal variable copy by taking a proximal step via each objective for varidx, dual in enumerate(duals): primals[varidx] = self.objectives[varidx]((theta_prev - dual).reshape(orig_shape), rho).ravel() # average primal copies theta_avg = np.mean(primals, axis=0) # update the dual variables (after primal update has finished) for varidx, primal in enumerate(primals): duals[varidx] += primal - theta_avg # compute primal and dual residuals primal_resid = float(np.sum([np.linalg.norm(primal - theta_avg) for primal in primals])) dual_resid = num_obj * rho ** 2 * np.linalg.norm(theta_avg - theta_prev) # update penalty parameter according to primal and dual residuals # (see sect. 3.4.1 of the Boyd and Parikh ADMM paper) if primal_resid > tau.init * dual_resid: rho *= float(tau.inc) elif dual_resid > tau.init * primal_resid: rho /= float(tau.dec) # update metadata for this iteration self.metadata['Primal resid'].append(primal_resid) self.metadata['Dual resid'].append(dual_resid) self.metadata['Time (s)'].append(time.time() - tstart) self.metadata['rho'].append(rho) # invoke the callback function with the current parameters and # history if callback is not None: # get the metadata from this iteration data = valmap(last, self.metadata) callback(theta_avg.reshape(orig_shape), data) # update the display self.update_display(cur_iter + 1, disp) # check for convergence if (primal_resid <= tol) & (dual_resid <= tol): self.converged = True break # clean up display self.update_display(-1, disp) # store and return final parameters self.theta = theta_avg.reshape(orig_shape) return self.theta
python
{ "resource": "" }
q11087
Optimizer.update_display
train
def update_display(self, iteration, disp_level, col_width=12): # pragma: no cover """ Prints information about the optimization procedure to standard output Parameters ---------- iteration : int The current iteration. Must either a positive integer or -1, which indicates the end of the algorithm disp_level : int An integer which controls how much information to display, ranging from 0 (nothing) to 3 (lots of stuff) col_width : int The width of each column in the data table, used if disp_level > 1 """ # exit and print nothing if disp_level is zero if disp_level == 0: return else: # simple update, no table if disp_level == 1 and iteration >= 0: print('[Iteration %i]' % iteration) # fancy table updates if disp_level > 1: # get the metadata from this iteration data = valmap(last, self.metadata) # choose what keys to use keys = ['Time (s)', 'Primal resid', 'Dual resid', 'rho'] # initial update. print out table headers if iteration == 1: print(tableprint.header(keys, width=col_width)) # print data print(tableprint.row([data[k] for k in keys], width=col_width, format_spec='4g')) if iteration == -1: print(tableprint.bottom(len(keys), width=col_width) + '\n') # print convergence statement if iteration == -1 and self.converged: print('Converged after %i iterations!' % len(self.metadata['Primal resid']))
python
{ "resource": "" }
q11088
susvd
train
def susvd(x, x_obs, rho, penalties): """ Sequential unfolding SVD Parameters ---------- x : Tensor x_obs : array_like rho : float penalties : array_like penalty for each unfolding of the input tensor """ assert type(x) == Tensor, "Input array must be a Tensor" while True: # proximal operator for the Fro. norm x = squared_error(x, rho, x_obs) # sequential singular value thresholding for ix, penalty in enumerate(penalties): x = x.unfold(ix).svt(penalty / rho).fold() yield x
python
{ "resource": "" }
q11089
Construct.build
train
def build(self, obj, context=None) -> bytes: """ Build bytes from the python object. :param obj: Python object to build bytes from. :param context: Optional context dictionary. """ stream = BytesIO() self.build_stream(obj, stream, context) return stream.getvalue()
python
{ "resource": "" }
q11090
Construct.parse
train
def parse(self, data: bytes, context=None): """ Parse some python object from the data. :param data: Data to be parsed. :param context: Optional context dictionary. """ stream = BytesIO(data) return self.parse_stream(stream, context)
python
{ "resource": "" }
q11091
Construct.build_stream
train
def build_stream(self, obj, stream: BytesIO, context=None) -> None: """ Build bytes from the python object into the stream. :param obj: Python object to build bytes from. :param stream: A ``io.BytesIO`` instance to write bytes into. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: self._build_stream(obj, stream, context) except Error: raise except Exception as exc: raise BuildingError(str(exc))
python
{ "resource": "" }
q11092
Construct.parse_stream
train
def parse_stream(self, stream: BytesIO, context=None): """ Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._parse_stream(stream, context) except Error: raise except Exception as exc: raise ParsingError(str(exc))
python
{ "resource": "" }
q11093
Construct.sizeof
train
def sizeof(self, context=None) -> int: """ Return the size of the construct in bytes. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._sizeof(context) except Error: raise except Exception as exc: raise SizeofError(str(exc))
python
{ "resource": "" }
q11094
poissreg
train
def poissreg(x0, rho, x, y): """ Proximal operator for Poisson regression Computes the proximal operator of the negative log-likelihood loss assumping a Poisson noise distribution. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) x : (n, k) array_like A design matrix consisting of n examples of k-dimensional features (or input). y : (n,) array_like A vector containing the responses (outupt) to the n features given in x. Returns ------- theta : array_like The parameter vector found after running the proximal update step """ # objective and gradient n = float(x.shape[0]) f = lambda w: np.mean(np.exp(x.dot(w)) - y * x.dot(w)) df = lambda w: (x.T.dot(np.exp(x.dot(w))) - x.T.dot(y)) / n # minimize via BFGS return bfgs(x0, rho, f, df)
python
{ "resource": "" }
q11095
bfgs
train
def bfgs(x0, rho, f_df, maxiter=50, method='BFGS'): """ Proximal operator for minimizing an arbitrary function using BFGS Uses the BFGS algorithm to find the proximal update for an arbitrary function, `f`, whose gradient is known. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) f_df : function The objective function and gradient maxiter : int, optional Maximum number of iterations to take (default: 50) method : str, optional Which scipy.optimize algorithm to use (default: 'BFGS') Returns ------- theta : array_like The parameter vector found after running the proximal update step """ # keep track of the original shape orig_shape = x0.shape # specify the objective function and gradient for the proximal operator def f_df_augmented(x): xk = x.reshape(orig_shape) obj, grad = f_df(xk) g = obj + (rho / 2.) * np.sum((xk - x0) ** 2) dg = (grad + rho * (xk - x0)).ravel() return g, dg # minimize via BFGS options = {'maxiter': maxiter, 'disp': False} return opt.minimize(f_df_augmented, x0.ravel(), method=method, jac=True, options=options).x.reshape(orig_shape)
python
{ "resource": "" }
q11096
smooth
train
def smooth(x0, rho, gamma, axis=0): """ Proximal operator for a smoothing function enforced via the discrete laplacian operator Notes ----- Currently only works with matrices (2-D arrays) as input Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step """ # Apply Laplacian smoothing n = x0.shape[axis] lap_op = spdiags([(2 + rho / gamma) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format='csc') x_out = np.rollaxis(spsolve(gamma * lap_op, rho * np.rollaxis(x0, axis, 0)), axis, 0) return x_out
python
{ "resource": "" }
q11097
tvd
train
def tvd(x0, rho, gamma): """ Proximal operator for the total variation denoising penalty Requires scikit-image be installed Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step Raises ------ ImportError If scikit-image fails to be imported """ try: from skimage.restoration import denoise_tv_bregman except ImportError: print('Error: scikit-image not found. TVD will not work.') return x0 return denoise_tv_bregman(x0, rho / gamma)
python
{ "resource": "" }
q11098
linsys
train
def linsys(x0, rho, P, q): """ Proximal operator for the linear approximation Ax = b Minimizes the function: .. math:: f(x) = (1/2)||Ax-b||_2^2 = (1/2)x^TA^TAx - (b^TA)x + b^Tb Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) P : array_like The symmetric matrix A^TA, where we are trying to approximate Ax=b q : array_like The vector A^Tb, where we are trying to approximate Ax=b Returns ------- theta : array_like The parameter vector found after running the proximal update step """ return np.linalg.solve(rho * np.eye(q.shape[0]) + P, rho * x0.copy() + q)
python
{ "resource": "" }
q11099
StatusCodeAssertionsMixin.assert_redirect
train
def assert_redirect(self, response, expected_url=None): """ assertRedirects from Django TestCase follows the redirects chains, this assertion does not - which is more like real unit testing """ self.assertIn( response.status_code, self.redirect_codes, self._get_redirect_assertion_message(response), ) if expected_url: location_header = response._headers.get('location', None) self.assertEqual( location_header, ('Location', str(expected_url)), 'Response should redirect to {0}, but it redirects to {1} instead'.format( expected_url, location_header[1], ) )
python
{ "resource": "" }