code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
self.stream_files_lock.acquire()
try:
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
already_set = path in self.store_files and self.store_files[path] == data
if is_debug3():
sys.__stderr__.write('git:store_file(%s, %s, %s), already_set=%s\n'
% (str(path), str(data)[0:180], str(fast_lane), str(already_set)))
if already_set:
return
open(full_path, 'wb').write(data)
self.store_files[path] = data
if self.client.online is not False:
self.client.send({'type': 'store-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
finally:
self.stream_files_lock.release()
|
def store_file(self, path, data, fast_lane=True)
|
Store the file in temp folder and stream it to server if online.
This makes sure that we have all newest data of this file on the server directly.
This method always overwrites the content of path. If you want to append always the content,
use Git.stream_file() instead.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again.
| 3.271437
| 3.134875
| 1.043562
|
# create temp file
# open temp file
# register stream file and write locally
# on end() git_commit that file locally
# create socket connection to server
# stream file to server
# on end() send server end signal, so he can store its content in git as blob as well.
# A git push would detect that both sides have the same content already,
# except when server connection broke between start() and end().
# Result -> already transmitted logs/channel data (probably many MBs) won't transfered twice
# when doing a git-push.
# return handler to write to this file
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
handle = open(full_path, 'wb')
self.streamed_files[path] = handle
class Stream():
def __init__(self, git):
self.git = git
def write(self, data):
if path not in self.git.streamed_files:
# already committed to server
return
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
try:
self.git.stream_files_lock.acquire()
if not handle.closed:
handle.write(data)
handle.flush()
except IOError as e:
handle.close()
if 'No space left' in e.__str__():
sys.stderr.write(traceback.format_exc() + '\n')
self.git.logger.error(e.__str__())
finally:
self.git.stream_files_lock.release()
if self.git.client.online is not False:
self.git.client.send({'type': 'stream-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
return Stream(self)
|
def stream_file(self, path, fast_lane=True)
|
Create a temp file, stream it to the server if online and append its content using the write() method.
This makes sure that we have all newest data of this file on the server directly.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again. Very handy for rather large files that will append over time (like channel data, logs)
Example:
self.log_stream = git.stream_file('log.txt')
self.log_stream.write("new line\n");
self.log_stream.write("another line\n");
| 6.318229
| 5.793599
| 1.090553
|
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path])
|
def add_index(self, mode, blob_id, path)
|
Add new entry to the current index
:param tree:
:return:
| 14.078933
| 17.14378
| 0.821227
|
blob_id = self.write_blob(content)
self.add_index('100644', blob_id, git_path)
|
def add_file(self, git_path, content)
|
Add a new file as blob in the storage and add its tree entry into the index.
:param git_path: str
:param content: str
| 5.311971
| 6.268772
| 0.84737
|
args = ['--work-tree', work_tree, 'add', '-f']
if verbose:
args.append('--verbose')
args.append(path)
self.command_exec(args, show_output=verbose)
|
def add_file_path_in_work_tree(self, path, work_tree, verbose=True)
|
Add a new file as blob in the storage and add its tree entry into the index.
| 3.469519
| 2.961688
| 1.171467
|
if self.git_batch_commit:
self.add_file(path, content)
self.git_batch_commit_messages.append(message)
else:
with self.lock_write():
if self.job_id:
self.read_tree(self.ref_head)
self.add_file(path, content)
return self.commit_index(message)
|
def commit_file(self, message, path, content)
|
Add a new file as blob in the storage, add its tree entry into the index and commit the index.
:param message: str
:param path: str
:param content: str
:return:
| 4.992706
| 5.284237
| 0.94483
|
tree_id = self.write_tree()
args = ['commit-tree', tree_id, '-p', self.ref_head]
# todo, this can end in a race-condition with other processes adding commits
commit = self.command_exec(args, message)[0].decode('utf-8').strip()
self.command_exec(['update-ref', self.ref_head, commit])
return commit
|
def commit_index(self, message)
|
Commit the current index.
:param message: str
:return: str the generated commit sha
| 6.118577
| 6.788196
| 0.901355
|
try:
out, code, err = self.command_exec(['cat-file', '-p', self.ref_head+':'+path])
if not code:
return out.decode('utf-8')
except Exception:
pass
return None
|
def contents(self, path)
|
Reads the given path of current ref_head and returns its content as utf-8
| 6.789996
| 5.234303
| 1.297211
|
job_backend.set_status('STARTING')
job_model = job_backend.get_job_model()
model_provider = job_model.get_model_provider()
job_backend.set_status('LOAD DATA')
datasets = job_model.get_datasets(trainer)
print('trainer.input_shape = %s\n' % (simplejson.dumps(trainer.input_shape, default=invalid_json_values),))
print('trainer.classes = %s\n' % (simplejson.dumps(trainer.classes, default=invalid_json_values),))
multiple_inputs = len(datasets) > 1
insights_x = [] if multiple_inputs else None
for dataset_name in job_model.get_input_dataset_names():
dataset = datasets[dataset_name]
if is_generator(dataset['X_train']):
batch_x, batch_y = dataset['X_train'].next()
if multiple_inputs:
insights_x.append(batch_x[0])
else:
insights_x = batch_x[0]
else:
if multiple_inputs:
insights_x.append(dataset['X_train'][0])
else:
insights_x = dataset['X_train'][0]
keras_callback.insights_x = insights_x
print('Insights sample shape', keras_callback.insights_x.shape)
keras_callback.write("Possible data keys '%s'\n" % "','".join(list(datasets.keys())))
data_train = model_provider.get_training_data(trainer, datasets)
data_validation = model_provider.get_validation_data(trainer, datasets)
keras_callback.set_validation_data(data_validation, trainer.nb_val_samples)
trainer.set_status('CONSTRUCT')
model = model_provider.get_model(trainer)
trainer.set_model(model)
trainer.set_status('COMPILING')
loss = model_provider.get_loss(trainer)
optimizer = model_provider.get_optimizer(trainer)
model_provider.compile(trainer, model, loss, optimizer)
model.summary()
trainer.callbacks.append(keras_callback)
model_provider.train(trainer, model, data_train, data_validation)
|
def job_start(job_backend, trainer, keras_callback)
|
Starts the training of a job. Needs job_prepare() first.
:type job_backend: JobBackend
:type trainer: Trainer
:return:
| 2.765798
| 2.806132
| 0.985627
|
concurrent = psutil.cpu_count()
dataset_config = dataset['config']
controller = {'running': True}
q = Queue(concurrent)
result = {
'X_train': [],
'Y_train': [],
'X_test': [],
'Y_test': []
}
images = []
max = 0
path = job_model.get_dataset_downloads_dir(dataset)
if 'path' in dataset['config']:
path = dataset['config']['path']
classes_count = 0
category_map = {}
classes = []
trainer.set_status('LOAD IMAGES INTO MEMORY')
try:
for i in range(concurrent):
t = ImageReadWorker(q, job_model, node, path, images, controller)
t.daemon = True
t.start()
for validation_or_training in ['validation', 'training']:
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
if category_name not in category_map:
category_map[category_name] = classes_count
if 'classes' in dataset_config and 'category_' in category_name:
category_idx = int(category_name.replace('category_', ''))
category_map[category_name] = category_idx
target_category = dataset_config['classes'][category_idx]
classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
else:
classes.append(category_name)
classes_count += 1
for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
file_path = os.path.join(path, validation_or_training, category_name, id)
q.put([file_path, validation_or_training == 'validation', category_name])
max += 1
q.join()
controller['running'] = False
train_images = []
test_images = []
for v in images:
image, validation, category_dir = v
if validation is True:
test_images.append([image, category_map[category_dir]])
else:
train_images.append([image, category_map[category_dir]])
train_datagen = None
augmentation = bool(get_option(dataset_config, 'augmentation', False))
if augmentation:
train_datagen = get_image_data_augmentor_from_dataset(dataset)
train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])
test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])
nb_sample = len(train_images)
trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
trainer.set_generator_training_nb(nb_sample)
trainer.set_generator_validation_nb(len(test_images))
trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
(classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))
if classes_count == 0:
trainer.logger.warning("Could not find any classes. Does the directory contains images?")
sys.exit(1)
trainer.output_size = classes_count
trainer.set_info('classes', classes)
trainer.classes = classes
result['X_train'] = train
result['Y_train'] = train
result['X_test'] = test
result['Y_test'] = test
return result
except KeyboardInterrupt:
controller['running'] = False
sys.exit(1)
|
def read_images_in_memory(job_model, dataset, node, trainer)
|
Reads all images into memory and applies augmentation if enabled
| 2.875242
| 2.834873
| 1.01424
|
libcudart = get_libcudart()
devices = {}
for i in range(0, get_installed_devices()):
gpu = get_device_properties(i)
pciBusId = ctypes.create_string_buffer(64)
libcudart.cudaDeviceGetPCIBusId(ctypes.byref(pciBusId), 64, i)
full_id = pciBusId.value.decode('utf-8')
gpu['fullId'] = full_id
devices[full_id] = gpu
ordered = []
i = 0
for key in sorted(devices):
devices[key]['id'] = i
ordered.append(devices[key])
i += 1
del libcudart
return ordered
|
def get_ordered_devices()
|
Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker.
Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID.
https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation
| 3.252327
| 3.119856
| 1.042461
|
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
|
def sorted_nicely(l)
|
Sort the given iterable in the way that humans expect.
http://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
| 1.511049
| 1.462981
| 1.032856
|
string = str(self)
if '.' not in string:
return string
# take care of the case of when there is no scaling unit
if not string[-1].isalpha():
if sub_letter is not None:
return string.replace('.', sub_letter)
return string
letter = string[-1]
return string.replace('.', letter)[:-1]
|
def to_pn(self, sub_letter=None)
|
Returns the part number equivalent. For instance,
a '1k' would still be '1k', but a
'1.2k' would, instead, be a '1k2'
:return:
| 5.237278
| 5.097866
| 1.027347
|
# sys.getsizeof is nearly useless. All our data is stringable so rather
# use that as a measure of size.
size = len(repr(li))
keep = li
toss = []
n = len(li)
decrement_by = max(n / 10, 10)
while (size >= MAX_SIZE) and (n > 0):
n -= decrement_by
toss = li[:-n]
keep = li[-n:]
size = len(repr(keep))
return keep, toss
|
def reduce_list_size(li)
|
Return two lists
- the last N items of li whose total size is less than MAX_SIZE
- the rest of the original list li
| 6.949893
| 5.904063
| 1.177137
|
LOGGER.debug('AnchorSmith.least_role >>>')
rv = Role.TRUSTEE.token()
LOGGER.debug('AnchorSmith.least_role <<< %s', rv)
return rv
|
def least_role() -> Role
|
Return the TRUSTEE indy-sdk role for an anchor acting in an AnchorSmith capacity.
:return: TRUSTEE role
| 11.895416
| 5.319106
| 2.236356
|
LOGGER.debug(
'AnchorSmith.send_nym >>> did: %s, verkey: %s, alias: %s, role: %s', did, verkey, alias, role)
if not ok_did(did):
LOGGER.debug('AnchorSmith.send_nym <!< Bad DID %s', did)
raise BadIdentifier('Bad DID {}'.format(did))
req_json = await ledger.build_nym_request(self.did, did, verkey, alias, (role or Role.USER).token())
await self._sign_submit(req_json)
LOGGER.debug('AnchorSmith.send_nym <<<')
|
async def send_nym(self, did: str, verkey: str = None, alias: str = None, role: Role = None) -> None
|
Send input anchor's cryptonym (including DID, verification key, plus optional alias and role)
to the distributed ledger.
Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role.
:param did: anchor DID to send to ledger
:param verkey: optional anchor verification key
:param alias: optional alias
:param role: anchor role on the ledger (default value of USER)
| 3.209972
| 2.982746
| 1.07618
|
print()
print('Usage: setnym.py <config-ini>')
print()
print('where <config-ini> represents the path to the configuration file.')
print()
print('The operation submits a nym to a trustee anchor to send to the ledger,')
print('if the ledger does not have it already as configured.')
print()
print('The configuration file has sections and entries as follows:')
print(' * section [Node Pool]:')
print(' - name: the name of the node pool to which the operation applies')
print(' - genesis.txn.path: the path to the genesis transaction file')
print(' for the node pool (may omit if node pool already exists)')
print(' * section [Trustee Anchor]:')
print(" - name: the trustee anchor's (wallet) name")
print(" - wallet.type: (default blank) the trustee anchor's wallet type")
print(" - wallet.access: (default blank) the trustee anchor's")
print(' wallet access credential (password) value')
print(' * section [VON Anchor]:')
print(' - role: the role to request in the send-nym transaction; specify:')
print(' - (default) empty value for user with no additional write privileges')
print(' - TRUST_ANCHOR for VON anchor with write privileges for indy artifacts')
print(' - TRUSTEE for VON anchor sending further cryptonyms to the ledger')
print(" - name: the VON anchor's (wallet) name")
print(" - seed: the VON anchor's seed (optional, for wallet creation only)")
print(" - did: the VON anchor's DID (optional, for wallet creation only)")
print(' - wallet.create: whether create the wallet if it does not yet exist')
print(' (value True/False, 1/0, or Yes/No)')
print(" - wallet.type: (default blank) the VON anchor's wallet type")
print(" - wallet.access: (default blank) the VON anchor's")
print(' wallet access credential (password) value.')
print()
|
def usage() -> None
|
Print usage advice.
| 4.811228
| 4.720824
| 1.01915
|
w_mgr = WalletManager()
rv = {}
for profile in an_data:
w_cfg = {'id': an_data[profile].name}
if an_data[profile].wallet_type:
w_cfg['storage_type'] = an_data[profile].wallet_type
if an_data[profile].seed:
w_cfg['seed'] = an_data[profile].seed
if an_data[profile].did:
w_cfg['did'] = an_data[profile].did
if an_data[profile].wallet_create:
try:
await w_mgr.create(w_cfg, access=an_data[profile].wallet_access)
except ExtantWallet:
pass
rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access)
return rv
|
async def _set_wallets(an_data: dict) -> dict
|
Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects
| 2.756587
| 2.48366
| 1.109889
|
config = inis2dict(ini_path)
if config['Trustee Anchor']['name'] == config['VON Anchor']['name']:
raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor')
cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None
if not ok_role(cfg_van_role):
raise BadRole('Configured role {} is not valid'.format(cfg_van_role))
pool_data = NodePoolData(
config['Node Pool']['name'],
config['Node Pool'].get('genesis.txn.path', None) or None)
an_data = {
'tan': AnchorData(
Role.TRUSTEE,
config['Trustee Anchor']['name'],
config['Trustee Anchor'].get('seed', None) or None,
config['Trustee Anchor'].get('did', None) or None,
config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['Trustee Anchor'].get('wallet.type', None) or None,
config['Trustee Anchor'].get('wallet.access', None) or None),
'van': AnchorData(
Role.get(cfg_van_role),
config['VON Anchor']['name'],
config['VON Anchor'].get('seed', None) or None,
config['VON Anchor'].get('did', None) or None,
config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['VON Anchor'].get('wallet.type', None) or None,
config['VON Anchor'].get('wallet.access', None) or None)
}
an_wallet = await _set_wallets(an_data)
p_mgr = NodePoolManager()
if pool_data.name not in await p_mgr.list():
if pool_data.genesis_txn_path:
await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path)
else:
raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format(
pool_data.name,
ini_path))
async with an_wallet['tan'] as w_tan, (
an_wallet['van']) as w_van, (
p_mgr.get(pool_data.name)) as pool, (
TrusteeAnchor(w_tan, pool)) as tan, (
NominalAnchor(w_van, pool)) as van:
send_verkey = van.verkey
try:
nym_role = await tan.get_nym_role(van.did)
if an_data['van'].role == nym_role:
return 0 # ledger is as per configuration
send_verkey = None # only owner can touch verkey
if nym_role != Role.USER: # only remove role when it is not already None on the ledger
await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE)
except AbsentNym:
pass # cryptonym not there yet, fall through
await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role)
return 0
|
async def setnym(ini_path: str) -> int
|
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure
| 3.291236
| 3.094948
| 1.063422
|
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s | %(levelname)-8s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('von_anchor').setLevel(logging.WARNING)
logging.getLogger('indy').setLevel(logging.ERROR)
if args is None:
args = sys.argv[1:]
if len(sys.argv) == 2:
try:
return do_wait(setnym(sys.argv[1]))
except VonAnchorError as vax:
print(str(vax))
return 1
else:
usage()
return 1
|
def main(args: Sequence[str] = None) -> int
|
Main line for script: check arguments and dispatch operation to set nym.
:param args: command-line arguments
:return: 0 for OK, 1 for failure
| 3.048308
| 2.683457
| 1.135963
|
return '{}:2:{}:{}'.format(origin_did, name, version)
|
def schema_id(origin_did: str, name: str, version: str) -> str
|
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
| 8.943666
| 8.412571
| 1.063131
|
return bool(re.match('[{}]{{21,22}}$'.format(B58), token or ''))
|
def ok_did(token: str) -> bool
|
Whether input token looks like a valid distributed identifier.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
| 19.509113
| 17.256947
| 1.130508
|
return bool(re.match('[{}]{{21,22}}:2:.+:[0-9.]+$'.format(B58), token or ''))
|
def ok_schema_id(token: str) -> bool
|
Whether input token looks like a valid schema identifier;
i.e., <issuer-did>:2:<name>:<version>.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
| 16.348175
| 11.643745
| 1.404031
|
s_key = s_id.split(':')
s_key.pop(1) # take out indy-sdk schema marker: 2 marks indy-sdk schema id
return SchemaKey(*s_key)
|
def schema_key(s_id: str) -> SchemaKey
|
Return schema key (namedtuple) convenience for schema identifier components.
:param s_id: schema identifier
:return: schema key (namedtuple) object
| 10.713346
| 9.9976
| 1.071592
|
return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no)
|
def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str
|
Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier
| 5.017966
| 5.928267
| 0.846447
|
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did)
|
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool
|
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
| 5.337448
| 4.894128
| 1.090582
|
if ok_cred_def_id(cd_id):
return int(cd_id.split(':')[3]) # sequence number is token at 0-based position 3
raise BadIdentifier('Bad credential definition identifier {}'.format(cd_id))
|
def cred_def_id2seq_no(cd_id: str) -> int
|
Given a credential definition identifier, return its schema sequence number.
Raise BadIdentifier on input that is not a credential definition identifier.
:param cd_id: credential definition identifier
:return: sequence number
| 7.055903
| 5.498284
| 1.283292
|
return '{}:4:{}:CL_ACCUM:{}'.format(cd_id.split(":", 1)[0], cd_id, tag)
|
def rev_reg_id(cd_id: str, tag: Union[str, int]) -> str
|
Given a credential definition identifier and a tag, return the corresponding
revocation registry identifier, repeating the issuer DID from the
input identifier.
:param cd_id: credential definition identifier
:param tag: tag to use
:return: revocation registry identifier
| 9.481825
| 8.593278
| 1.1034
|
rr_id_m = re.match(
'([{0}]{{21,22}}):4:([{0}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?:CL_ACCUM:.+$'.format(B58),
token or '')
return bool(rr_id_m) and ((not issuer_did) or (rr_id_m.group(1) == issuer_did and rr_id_m.group(2) == issuer_did))
|
def ok_rev_reg_id(token: str, issuer_did: str = None) -> bool
|
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier
| 4.391204
| 3.423315
| 1.282734
|
if ok_rev_reg_id(rr_id):
return ':'.join(rr_id.split(':')[2:-2]) # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id))
|
def rev_reg_id2cred_def_id(rr_id: str) -> str
|
Given a revocation registry identifier, return its corresponding credential definition identifier.
Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier
| 8.229055
| 7.162324
| 1.148936
|
if ok_rev_reg_id(rr_id):
return (
':'.join(rr_id.split(':')[2:-2]), # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
str(rr_id.split(':')[-1]) # tag is last token
)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id))
|
def rev_reg_id2cred_def_id_tag(rr_id: str) -> (str, str)
|
Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag. Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag
| 5.916657
| 5.58742
| 1.058925
|
if isinstance(briefs, dict):
if all(ok_wallet_reft(k) for k in briefs):
return tuple(briefs.values())
return (briefs,)
return tuple(briefs)
|
def iter_briefs(briefs: Union[dict, Sequence[dict]]) -> tuple
|
Given a cred-brief/cred-info, an sequence thereof, or cred-brief-dict
(as HolderProver.get_cred_briefs_by_proof_req_q() returns), return tuple with
all contained cred-briefs.
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:return: tuple of cred-briefs
| 7.282805
| 6.508822
| 1.118913
|
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
cred_id = cred_info['referent']
if ((cred_id not in rv) and (not cred_ids or cred_id in [cred_ids, [cred_ids]][isinstance(cred_ids, str)])):
rv[cred_id] = {
'schema_id': cred_info['schema_id'],
'cred_def_id': cred_info['cred_def_id'],
'rev_reg_id': cred_info['rev_reg_id']
}
return rv
|
def box_ids(briefs: Union[dict, Sequence[dict]], cred_ids: Union[Sequence[str], str] = None) -> dict
|
Given one or more cred-briefs/cred-infos, and an optional sequence of credential identifiers
(aka wallet cred ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:param cred_ids: credential identifier or sequence thereof for which to find corresponding
schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
| 3.231514
| 2.540356
| 1.272071
|
rv = deepcopy(creds)
for key in ('attrs', 'predicates'):
for attr_uuid, creds_by_uuid in rv[key].items():
rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids]
empties = [attr_uuid for attr_uuid in rv[key] if not rv[key][attr_uuid]]
for attr_uuid in empties:
del rv[key][attr_uuid]
return json.dumps(rv)
|
def prune_creds_json(creds: dict, cred_ids: set) -> str
|
Strip all creds out of the input json structure that do not match any of the input credential identifiers.
:param creds: indy-sdk creds structure
:param cred_ids: the set of credential identifiers of interest
:return: the reduced creds json
| 2.986479
| 2.465142
| 1.211484
|
rv = []
refts = proof_req_attr_referents(proof_req)
for info in iter_briefs(infos):
if info['cred_def_id'] not in refts:
continue
brief = {
'cred_info': info,
'interval': {}
}
fro = None
to = None
for uuid in refts[info['cred_def_id']].values():
interval = proof_req['requested_attributes'][uuid].get('non_revoked', {})
if 'from' in interval:
fro = min(fro or interval['from'], interval['from'])
if 'to' in interval:
to = max(to or interval['to'], interval['to'])
if to:
brief['interval']['to'] = to
if fro:
brief['interval']['from'] = fro
if not brief['interval']:
brief['interval'] = None
rv.append(brief)
return rv
|
def proof_req_infos2briefs(proof_req: dict, infos: Union[dict, Sequence[dict]]) -> list
|
Given a proof request and corresponding cred-info(s), return a list of cred-briefs
(i.e., cred-info plus interval).
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param infos: cred-info or sequence thereof; e.g.,
::
[
{
'attrs': {
'auditDate': '2018-07-30',
'greenLevel': 'Silver',
'legalName': 'Tart City'
},
'cred_rev_id': '48',
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:17:tag',
'referent': 'c15674a9-7321-440d-bbed-e1ac9273abd5',
'rev_reg_id': 'WgWxqztrNooG92RXvxSTWv:4:WgWxqztrNooG92RXvxSTWv:3:CL:17:tag:CL_ACCUM:0',
'schema_id': 'WgWxqztrNooG92RXvxSTWv:2:green:1.0'
},
...
]
:return: list of cred-briefs
| 2.701763
| 2.499095
| 1.081097
|
rv = {}
attr_refts = proof_req_attr_referents(proof_req)
for cd_id in [k for k in attr_refts if k not in ([x_cd_ids] if isinstance(x_cd_ids, str) else x_cd_ids or [])]:
rv[set(attr_refts[cd_id].values()).pop()] = {"cred_def_id": cd_id}
return rv
|
def proof_req2wql_all(proof_req: dict, x_cd_ids: Union[str, Sequence[str]] = None) -> dict
|
Given a proof request and a list of cred def ids to omit, return an extra WQL query dict
that will find all corresponding credentials in search.
The proof request must have cred def id restrictions on all requested attribute specifications.
At present, the utility does not support predicates.
:param proof_req: proof request
:param x_cd_ids: cred def identifier or sequence thereof to omit
:return: extra WQL dict to fetch all corresponding credentials in search.
| 3.623859
| 3.137127
| 1.155152
|
rv = {}
for uuid, spec in proof_req['requested_attributes'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
rv[cd_id][spec['name']] = uuid
return rv
|
def proof_req_attr_referents(proof_req: dict) -> dict
|
Given a proof request with all requested attributes having cred def id restrictions,
return its attribute referents by cred def id and attribute.
The returned structure can be useful in populating the extra WQL query parameter
in the credential search API.
:param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g.,
::
{
'name": 'proof_req',
'version': '0.0',
'requested_attributes': {
'18_greenLevel_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'greenLevel',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'18_legalName_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'legalName',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-(
'name': 'id',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
}
}
'requested_predicates': {
}
}
:return: nested dict mapping cred def id to name to proof request referent; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': {
'legalName': '18_legalName_uuid'
'greenLevel': '18_greenLevel_uuid'
}
}
| 3.219794
| 2.53094
| 1.272173
|
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
(rr_id, cr_id) = (cred_info['rev_reg_id'], cred_info['cred_rev_id'])
if (rr_id, cr_id) in rv or rr_id is None or cr_id is None:
continue
if not filt:
rv[(rr_id, cr_id)] = cred_info['attrs']
continue
if ({attr: str(filt[attr]) for attr in filt}.items() <= cred_info['attrs'].items()):
rv[(rr_id, cr_id)] = cred_info['attrs']
return rv
|
def revoc_info(briefs: Union[dict, Sequence[dict]], filt: dict = None) -> dict
|
Given a cred-brief, cred-info or sequence of either, return a dict mapping pairs
(revocation registry identifier, credential revocation identifier)
to attribute name: (raw) value dicts.
If the caller includes a filter of attribute:value pairs, retain only matching attributes.
:param briefs: cred-brief/cred-info, or sequence thereof
:param filt: dict mapping attributes to values of interest; e.g.,
::
{
'legalName': 'Flan Nebula',
'effectiveDate': '2018-01-01',
'endDate': None
}
:return: dict mapping (rev_reg_id, cred_rev_id) pairs to (raw) attributes; e.g.,
::
{
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '2'): {
'busId': '11121398',
'effectiveDate': '2010-10-10',
'endDate': '',
'id': '1',
'jurisdictionId': '1',
'legalName': 'The Original House of Pies',
'orgTypeId': '2'},
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '3'): {
'busId': '11133333',
'effectiveDate': '2011-10-01',
'endDate': '',
'id': '2',
'jurisdictionId': '1',
'legalName': 'Planet Cake',
'orgTypeId': '1'}
}
| 3.223594
| 2.609209
| 1.235468
|
rv = {}
for sub_index in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][sub_index]['cred_def_id']
rv[cd_id] = ({ # uses von_anchor convention for uuid (referent) construction: will break on foreign anchor's
'_'.join(uuid.split('_')[1:-1]): proof['requested_proof']['revealed_attrs'][uuid]['raw']
for uuid in proof['requested_proof']['revealed_attrs']
if proof['requested_proof']['revealed_attrs'][uuid]['sub_proof_index'] == sub_index})
return rv
|
def revealed_attrs(proof: dict) -> dict
|
Fetch revealed attributes from input proof and return dict mapping credential definition identifiers
to dicts, each dict mapping attribute names to (raw) values, for processing in further creds downstream.
:param proof: indy-sdk proof as dict
:return: dict mapping cred-ids to dicts, each mapping revealed attribute names to (raw) values
| 7.155907
| 6.151875
| 1.163208
|
self._config = value or {}
validate_config('verifier', self._config)
|
def config(self, value: dict) -> None
|
Set configuration dict
:param value: configuration dict
| 27.219109
| 21.537014
| 1.263829
|
LOGGER.debug('_Verifier._build_rr_state_json >>> rr_id: %s, timestamp: %s', rr_id, timestamp)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier._build_rr_state_json <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_json = None
ledger_timestamp = None
get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp)
resp_json = await self._submit(get_rr_req_json)
resp = json.loads(resp_json)
if resp.get('result', {}).get('data', None) and resp['result']['data'].get('value', None):
# timestamp at or beyond rev reg creation, carry on
try:
(_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json)
except IndyError: # ledger replied, but there is no such rev reg available
LOGGER.debug('Verifier._build_rr_state_json <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
else:
LOGGER.debug(
'_Verifier._build_rr_state_json <!< Rev reg %s created after asked-for time %s',
rr_id,
timestamp)
raise BadRevStateTime('Rev reg {} created after asked-for time {}'.format(rr_id, timestamp))
rv = (rr_json, ledger_timestamp)
LOGGER.debug('_Verifier._build_rr_state_json <<< %s', rv)
return rv
|
async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int)
|
Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds)
| 2.819784
| 2.410491
| 1.169796
|
LOGGER.debug('Verifier.load_cache_for_verification >>> archive: %s', archive)
rv = int(time())
for s_id in self.config.get('archive-verifier-caches-on-close', {}).get('schema_id', {}):
if ok_schema_id(s_id):
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
else:
LOGGER.info('Not archiving schema for specified bad id %s', s_id)
for cd_id in self.config.get('archive-verifier-caches-on-close', {}).get('cred_def_id', {}):
if ok_cred_def_id(cd_id):
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
else:
LOGGER.info('Not archiving cred def for specified bad id %s', cd_id)
for rr_id in self.config.get('archive-verifier-caches-on-close', {}).get('rev_reg_id', {}):
if ok_rev_reg_id(rr_id):
await self.get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s',
self.name,
self.pool.name,
rr_id,
rv)
except AbsentPool:
LOGGER.warning(
'Verifier %s has no pool, cannot update revo cache reg state for %s to %s',
self.name,
rr_id,
rv)
else:
LOGGER.info('Not archiving rev reg for specified bad id %s', rr_id)
if archive:
ArchivableCaches.archive(self.dir_cache)
LOGGER.debug('Verifier.load_cache_for_verification <<< %s', rv)
return rv
|
async def load_cache_for_verification(self, archive: bool = False) -> int
|
Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to verify proof on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds)
| 2.771846
| 2.540216
| 1.091185
|
LOGGER.debug('Verifier.open >>>')
await super().open()
if self.config.get('parse-caches-on-open', False):
ArchivableCaches.parse(self.dir_cache)
LOGGER.debug('Verifier.open <<<')
return self
|
async def open(self) -> 'Verifier'
|
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
| 8.635029
| 7.636155
| 1.130809
|
LOGGER.debug('Verifier.close >>>')
if self.config.get('archive-verifier-caches-on-close', {}):
await self.load_cache_for_verification(True)
ArchivableCaches.purge_archives(self.dir_cache, True)
await BaseAnchor.close(self)
LOGGER.debug('Verifier.close <<<')
|
async def close(self) -> None
|
Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object
| 12.779502
| 9.443688
| 1.353232
|
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True
|
def check_encoding(proof_req: dict, proof: dict) -> bool
|
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
| 3.372679
| 3.233106
| 1.04317
|
for pktype in PublicKeyType:
if value in (pktype.ver_type, pktype.authn_type):
return pktype
return None
|
def get(value: str) -> 'Protocol'
|
Return enum instance corresponding to input version value ('RsaVerificationKey2018' etc.)
| 14.045477
| 9.72917
| 1.443646
|
return {
'id': self.id,
'type': str(self.type.ver_type),
'controller': canon_ref(self.did, self.controller),
**self.type.specification(self.value)
}
|
def to_dict(self)
|
Return dict representation of public key to embed in DID document.
| 11.736194
| 6.527452
| 1.797975
|
logging.basicConfig(level=logging.WARN, format='%(levelname)-8s | %(name)-12s | %(message)s')
logging.getLogger('indy').setLevel(logging.ERROR)
path_start = join(RevRegBuilder.dir_tails_sentinel(wallet_name), '.start')
with open(path_start, 'r') as fh_start:
start_data = json.loads(fh_start.read())
remove(path_start)
logging.getLogger(__name__).setLevel(start_data['logging']['level'])
for path_log in start_data['logging']['paths']:
logging.getLogger(__name__).addHandler(logging.FileHandler(path_log))
wallet = WalletManager().get(
{
'id': wallet_name,
'storage_type': start_data['wallet']['storage_type'],
**start_data['wallet']['config'],
},
access=start_data['wallet']['access_creds'].get('key', None))
async with wallet, RevRegBuilder(wallet, rrbx=True) as rrban:
await rrban.serve()
|
async def main(wallet_name: str) -> None
|
Main line for revocation registry builder operating in external process on behalf of issuer agent.
:param wallet_name: wallet name - must match that of issuer with existing wallet
| 4.43446
| 4.115571
| 1.077483
|
rv = {
'logging': {
'paths': []
},
'wallet': {
}
}
logger = LOGGER
while not logger.level:
logger = logger.parent
if logger is None:
break
rv['logging']['level'] = logger.level
logger = LOGGER
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
while not log_paths:
logger = logger.parent
if logger is None:
break
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
for log_path in log_paths:
rv['logging']['paths'].append(log_path)
rv['wallet']['storage_type'] = self.wallet.storage_type
rv['wallet']['config'] = self.wallet.config
rv['wallet']['access_creds'] = self.wallet.access_creds
return json.dumps(rv)
|
def _start_data_json(self) -> str
|
Output json with start data to write for external revocation registry builder process pickup.
:return: logging and wallet init data json
| 2.743318
| 2.541827
| 1.07927
|
dir_sentinel = RevRegBuilder.dir_tails_sentinel(wallet_name)
file_pid = join(dir_sentinel, '.pid')
file_start = join(dir_sentinel, '.start')
file_stop = join(dir_sentinel, '.stop')
if isfile(file_stop):
return _STATE.STOPPING
if isfile(file_start) or isfile(file_pid):
return _STATE.RUNNING
return _STATE.ABSENT
|
def _get_state(wallet_name: str) -> _STATE
|
Return current state of revocation registry builder process.
:param wallet_name: name of wallet for corresponding Issuer
:return: current process state as _STATE enum
| 4.841468
| 3.914337
| 1.236855
|
return join(self.dir_tails_hopper, rr_id) if self.external else self.dir_tails
|
def dir_tails_top(self, rr_id) -> str
|
Return top of tails tree for input rev reg id.
:param rr_id: revocation registry identifier
:return: top of tails tree
| 11.672411
| 11.401006
| 1.023805
|
return join(self.dir_tails_top(rr_id), rev_reg_id2cred_def_id(rr_id))
|
def dir_tails_target(self, rr_id) -> str
|
Return target directory for revocation registry and tails file production.
:param rr_id: revocation registry identifier
:return: tails target directory
| 11.14429
| 9.280419
| 1.200839
|
try:
makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False)
except FileExistsError:
LOGGER.warning('Rev reg %s construction already in progress', rr_id)
else:
open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close()
|
def mark_in_progress(self, rr_id: str, rr_size: int) -> None
|
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
| 5.368577
| 4.063992
| 1.321011
|
LOGGER.debug('RevRegBuilder.serve >>>')
assert self.external
file_pid = join(self._dir_tails_sentinel, '.pid')
if isfile(file_pid):
with open(file_pid, 'r') as fh_pid:
pid = int(fh_pid.read())
try:
kill(pid, 0)
except ProcessLookupError:
remove(file_pid)
LOGGER.info('RevRegBuilder removed derelict .pid file')
except PermissionError:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
else:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
pid = getpid()
with open(file_pid, 'w') as pid_fh:
print(str(pid), file=pid_fh)
file_stop = join(self._dir_tails_sentinel, '.stop')
while True:
if isfile(file_stop): # stop now, pick up any pending tasks next invocation
remove(file_stop)
remove(file_pid)
break
p_pending = [join(self._dir_tails_sentinel, d) for d in listdir(self._dir_tails_sentinel)
if isdir(join(self._dir_tails_sentinel, d))]
p_pending = [p for p in p_pending if [s for s in listdir(p) if s.startswith('.')]] # size marker
if p_pending:
pdir = basename(p_pending[0])
rr_id = pdir
rr_size = int([s for s in listdir(p_pending[0]) if s.startswith('.')][0][1:])
open(join(p_pending[0], '.in-progress'), 'w').close()
await self.create_rev_reg(rr_id, rr_size or None)
rmtree(p_pending[0])
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.serve <<<')
|
async def serve(self) -> None
|
Write pidfile to sentinel directory if need be, and wait for sentinels
to shut down or build revocation registry and tails file.
| 3.471516
| 3.217746
| 1.078866
|
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<')
|
async def stop(wallet_name: str) -> None
|
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
| 6.403008
| 4.486384
| 1.427209
|
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<')
|
async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None
|
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
| 2.941674
| 2.702183
| 1.088629
|
nodelist = parser.parse(("endultracache",))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError(""%r" tag requires at least 2 arguments." % tokens[0])
return UltraCacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can"t be a variable.
[parser.compile_filter(token) for token in tokens[3:]])
|
def do_ultracache(parser, token)
|
Based on Django's default cache template tag
| 2.921975
| 2.857742
| 1.022477
|
if elide_to is not None:
elide_to = max(elide_to, 3) # make room for ellipses '...'
try:
rv = json.dumps(json.loads(dumpit) if isinstance(dumpit, str) else dumpit, indent=4)
except TypeError:
rv = '{}'.format(pformat(dumpit, indent=4, width=120))
return rv if elide_to is None or len(rv) <= elide_to else '{}...'.format(rv[0 : elide_to - 3])
|
def ppjson(dumpit: Any, elide_to: int = None) -> str
|
JSON pretty printer, whether already json-encoded or not
:param dumpit: object to pretty-print
:param elide_to: optional maximum length including ellipses ('...')
:return: json pretty-print
| 2.928441
| 2.646636
| 1.106477
|
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro)
|
def do_wait(coro: Callable) -> Any
|
Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result
| 1.903814
| 1.795124
| 1.060548
|
var_dflt = r'\${(.*?):-(.*?)}'
def _interpolate(content):
rv = expandvars(content)
while True:
match = re.search(var_dflt, rv)
if match is None:
break
bash_var = '${{{}}}'.format(match.group(1))
value = expandvars(bash_var)
rv = re.sub(var_dflt, match.group(2) if value == bash_var else value, rv, count=1)
return rv
parser = ConfigParser()
for ini in [ini_paths] if isinstance(ini_paths, str) else ini_paths:
if not isfile(ini):
raise FileNotFoundError('No such file: {}'.format(ini))
with open(ini, 'r') as ini_fh:
ini_text = _interpolate(ini_fh.read())
parser.read_string(ini_text)
return {s: dict(parser[s].items()) for s in parser.sections()}
|
def inis2dict(ini_paths: Union[str, Sequence[str]]) -> dict
|
Take one or more ini files and return a dict with configuration from all,
interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}.
:param ini_paths: path or paths to .ini files
| 3.006112
| 2.904697
| 1.034914
|
self._mark[:] = [self._mark[1], time()]
rv = self._mark[1] - self._mark[0]
if digits is not None and digits > 0:
rv = round(rv, digits)
elif digits == 0 or self._digits == 0:
rv = int(rv)
elif self._digits is not None and self._digits > 0:
rv = round(rv, self._digits)
return rv
|
def mark(self, digits: int = None) -> float
|
Return time in seconds since last mark, reset, or construction.
:param digits: number of fractional decimal digits to retain (default as constructed)
| 3.364184
| 3.117268
| 1.079209
|
LOGGER.debug('StorageRecordSearch.open >>>')
if self.opened:
LOGGER.debug('StorageRecordSearch.open <!< Search is already opened')
raise BadSearch('Search is already opened')
if not self._wallet.opened:
LOGGER.debug('StorageRecordSearch.open <!< Wallet %s is closed', self._wallet.name)
raise WalletState('Wallet {} is closed'.format(self._wallet.name))
self._handle = await non_secrets.open_wallet_search(
self._wallet.handle,
self._type,
self._query_json,
StorageRecordSearch.OPTIONS_JSON)
LOGGER.debug('StorageRecordSearch.open <<<')
|
async def open(self) -> None
|
Begin the search operation.
| 5.091667
| 4.613303
| 1.103692
|
LOGGER.debug('StorageRecordSearch.fetch >>> limit: %s', limit)
if not self.opened:
LOGGER.debug('StorageRecordSearch.fetch <!< Storage record search is closed')
raise BadSearch('Storage record search is closed')
if not self._wallet.opened:
LOGGER.debug('StorageRecordSearch.fetch <!< Wallet %s is closed', self._wallet.name)
raise WalletState('Wallet {} is closed'.format(self._wallet.name))
records = json.loads(await non_secrets.fetch_wallet_search_next_records(
self._wallet.handle,
self.handle,
limit or Wallet.DEFAULT_CHUNK))['records'] or [] # at exhaustion results['records'] = None
rv = [StorageRecord(typ=rec['type'], value=rec['value'], tags=rec['tags'], ident=rec['id']) for rec in records]
LOGGER.debug('StorageRecordSearch.fetch <<< %s', rv)
return rv
|
async def fetch(self, limit: int = None) -> Sequence[StorageRecord]
|
Fetch next batch of search results.
Raise BadSearch if search is closed, WalletState if wallet is closed.
:param limit: maximum number of records to return (default value Wallet.DEFAULT_CHUNK)
:return: next batch of records found
| 5.066691
| 3.600633
| 1.407167
|
LOGGER.debug('StorageRecordSearch.close >>>')
if self._handle:
await non_secrets.close_wallet_search(self.handle)
self._handle = None
LOGGER.debug('StorageRecordSearch.close <<<')
|
async def close(self) -> None
|
Close search.
| 12.079213
| 9.525003
| 1.268158
|
if self._cache_id:
return self._cache_id
with open(join(expanduser('~'), '.indy_client', 'pool', self.name, '{}.txn'.format(self.name))) as fh_genesis:
genesis = [json.loads(line) for line in fh_genesis.readlines() if line]
hps = []
for gen_txn in genesis:
hps.append(self.protocol.genesis_host_port(gen_txn))
hps.sort() # canonicalize to make order irrelevant
self._cache_id = ':'.join('{}:{}'.format(hp[0], hp[1]) for hp in hps)
return self._cache_id
|
def cache_id(self) -> str
|
Return identifier for archivable caches, computing it first and retaining it if need be.
Raise AbsentPool if ledger configuration is not yet available.
:param name: pool name
:return: archivable cache identifier
| 4.369932
| 4.044396
| 1.080491
|
LOGGER.debug('NodePool.open >>>')
await pool.set_protocol_version(self.protocol.indy())
LOGGER.info('Pool ledger %s set protocol %s', self.name, self.protocol)
try:
self._handle = await pool.open_pool_ledger(self.name, json.dumps(self.config))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.PoolLedgerNotCreatedError:
LOGGER.debug('NodePool.open <!< Absent node pool %s ledger configuration', self.name)
raise AbsentPool('Absent node pool {} ledger configuration'.format(self.name))
LOGGER.debug(
'NodePool.open <!< cannot open node pool %s: indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('NodePool.open <<<')
return self
|
async def open(self) -> 'NodePool'
|
Explicit entry. Opens pool as configured, for later closure via close().
Creates pool if it does not yet exist, using configured genesis transaction file.
For use when keeping pool open across multiple calls.
Raise any AbsentPool if node pool ledger configuration is not available.
:return: current object
| 3.436645
| 3.208381
| 1.071146
|
LOGGER.debug('NodePool.close >>>')
if not self.handle:
LOGGER.warning('Abstaining from closing pool %s: already closed', self.name)
else:
await pool.close_pool_ledger(self.handle)
self._handle = None
LOGGER.debug('NodePool.close <<<')
|
async def close(self) -> None
|
Explicit exit. Closes pool. For use when keeping pool open across multiple calls.
| 7.008117
| 5.925852
| 1.182634
|
LOGGER.debug('NodePool.refresh >>>')
await pool.refresh_pool_ledger(self.handle)
LOGGER.debug('NodePool.refresh <<<')
|
async def refresh(self) -> None
|
Refresh local copy of pool ledger and update node pool connections.
| 14.176077
| 6.341352
| 2.235497
|
LOGGER.debug('BaseAnchor.reseed >>> seed: [SEED]')
verkey = await self.wallet.reseed_init(seed)
req_json = await ledger.build_nym_request(
self.did,
self.did,
verkey,
self.name,
(await self.get_nym_role()).token())
await self._sign_submit(req_json)
await self.wallet.reseed_apply()
LOGGER.debug('BaseAnchor.reseed <<<')
|
async def reseed(self, seed: str = None) -> None
|
Rotate key for VON anchor: generate new key, submit to ledger, update wallet.
Raise WalletState if wallet is currently closed.
:param seed: new seed for ed25519 key pair (default random)
| 6.410273
| 5.361929
| 1.195516
|
LOGGER.debug('BaseAnchor.get_nym >>> target_did: %s', target_did)
if target_did and not ok_did(target_did):
LOGGER.debug('BaseAnchor.get_nym <!< Bad DID %s', target_did)
raise BadIdentifier('Bad DID {}'.format(target_did))
if not (target_did or self.did):
LOGGER.debug('BaseAnchor.get_nym <!< Bad wallet state: DID for %s unavailable', self.name)
raise WalletState('Bad wallet state: DID for {} unavailable'.format(self.name))
rv = json.dumps({})
get_nym_req = await ledger.build_get_nym_request(self.did, target_did or self.did)
resp_json = await self._submit(get_nym_req)
data_json = (json.loads(resp_json))['result']['data'] # it's double-encoded on the ledger
if data_json:
rv = data_json
LOGGER.debug('BaseAnchor.get_nym <<< %s', rv)
return rv
|
async def get_nym(self, target_did: str = None) -> str
|
Get json cryptonym (including current verification key) for input (anchor) DID from ledger.
Return empty production {} if the ledger has no such cryptonym.
Raise BadLedgerTxn on failure. Raise WalletState if target DID is default (own DID) value but
wallet does not have it (neither created nor opened since initialization).
:param target_did: DID of cryptonym to fetch (default own DID)
:return: cryptonym json
| 2.846352
| 2.620557
| 1.086163
|
LOGGER.debug('BaseAnchor.get_nym_role >>> target_did: %s', target_did)
nym = json.loads(await self.get_nym(target_did))
if not nym:
LOGGER.debug('BaseAnchor.get_nym_role <!< Ledger has no cryptonym for anchor %s', self.name)
raise AbsentNym('Ledger has no cryptonym for anchor {}'.format(self.name))
rv = Role.get(nym['role'])
LOGGER.debug('BaseAnchor.get_nym_role <<< %s', rv)
return rv
|
async def get_nym_role(self, target_did: str = None) -> Role
|
Return the cryptonym role for input did from the ledger - note that this may exceed
the role of least privilege for the class.
Raise AbsentNym if current anchor has no cryptonym on the ledger, or WalletState if current DID unavailable.
:param target_did: DID of cryptonym role to fetch (default own DID)
:return: identifier for current cryptonym role on ledger
| 3.057786
| 2.487502
| 1.22926
|
LOGGER.debug('BaseAnchor.least_role >>>')
rv = Role.TRUST_ANCHOR
LOGGER.debug('BaseAnchor.least_role <<< %s', rv)
return rv
|
def least_role() -> Role
|
Return the indy-sdk role of least privilege for an anchor (class) in building
its cryptonym for the trust anchor to send to the ledger.
:return: role of least privilege by anchor class
| 7.606794
| 4.454543
| 1.707649
|
LOGGER.debug('BaseAnchor.set_did_endpoint >>> remote_did: %s, did_endpoint: %s', remote_did, did_endpoint)
if not ok_did(remote_did):
LOGGER.debug('BaseAnchor.set_did_endpoint <!< Bad DID %s', remote_did)
raise BadIdentifier('Bad DID {}'.format(remote_did))
pairwise_info = (await self.wallet.get_pairwise(remote_did)).get(remote_did, None)
if not pairwise_info:
LOGGER.debug(
'BaseAnchor.set_did_endpoint <!< Anchor %s has no pairwise relation for remote DID %s',
self.name,
remote_did)
raise AbsentRecord('Anchor {} has no pairwise relation for remote DID {}'.format(
self.name,
remote_did))
await self.wallet.write_pairwise(
pairwise_info.their_did,
pairwise_info.their_verkey,
pairwise_info.my_did,
{'did_endpoint': did_endpoint})
rv = EndpointInfo(did_endpoint, pairwise_info.their_verkey)
LOGGER.debug('BaseAnchor.set_did_endpoint <<< %s', rv)
return rv
|
async def set_did_endpoint(self, remote_did: str, did_endpoint: str) -> EndpointInfo
|
Set endpoint as metadata for pairwise remote DID in wallet. Pick up (transport)
verification key from pairwise relation and return with endpoint in EndpointInfo.
Raise BadIdentifier on bad DID. Raise WalletState if wallet is closed.
Raise AbsentRecord if pairwise relation not present in wallet.
:param remote_did: pairwise remote DID
:param endpoint: value to set as endpoint in wallet and cache
:return: endpoint and (transport) verification key
| 2.33408
| 2.070624
| 1.127235
|
LOGGER.debug('BaseAnchor.get_did_endpoint >>> remote_did: %s', remote_did)
if not ok_did(remote_did):
LOGGER.debug('BaseAnchor.get_did_endpoint <!< Bad DID %s', remote_did)
raise BadIdentifier('Bad DID {}'.format(remote_did))
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.get_did_endpoint <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
pairwise_info = (await self.wallet.get_pairwise(remote_did)).get(remote_did, None)
if not (pairwise_info and 'did_endpoint' in pairwise_info.metadata):
LOGGER.debug('BaseAnchor.get_did_endpoint <!< No endpoint for remote DID %s', remote_did)
raise AbsentRecord('No endpoint for remote DID {}'.format(remote_did))
rv = EndpointInfo(pairwise_info.metadata['did_endpoint'], pairwise_info.their_verkey)
LOGGER.debug('BaseAnchor.get_did_endpoint <<< %s', rv)
return rv
|
async def get_did_endpoint(self, remote_did: str) -> EndpointInfo
|
Return endpoint info for remote DID.
Raise BadIdentifier for bad remote DID. Raise WalletState if bypassing cache but wallet is closed.
Raise AbsentRecord for no such endpoint.
:param remote_did: pairwise remote DID
:return: endpoint and (transport) verification key as EndpointInfo
| 2.410407
| 2.077856
| 1.160046
|
LOGGER.debug('BaseAnchor.send_endpoint >>> endpoint: %s', endpoint)
ledger_endpoint = await self.get_endpoint()
if ledger_endpoint == endpoint:
LOGGER.info('%s endpoint already set as %s', self.name, endpoint)
LOGGER.debug('BaseAnchor.send_endpoint <<< (%s already set for %s )')
return
attr_json = json.dumps({
'endpoint': {
'endpoint': endpoint # indy-sdk likes 'ha' here but won't map 'ha' to a URL, only ip:port
}
})
req_json = await ledger.build_attrib_request(self.did, self.did, None, attr_json, None)
await self._sign_submit(req_json)
for _ in range(16): # reasonable timeout
if await self.get_endpoint(None, False) == endpoint:
break
await asyncio.sleep(1)
LOGGER.info('Sent endpoint %s to ledger, waiting 1s for its confirmation', endpoint)
else:
LOGGER.debug('BaseAnchor.send_endpoint <!< timed out waiting on send endpoint %s', endpoint)
raise BadLedgerTxn('Timed out waiting on sent endpoint {}'.format(endpoint))
LOGGER.debug('BaseAnchor.send_endpoint <<<')
|
async def send_endpoint(self, endpoint: str) -> None
|
Send anchor's own endpoint attribute to ledger (and endpoint cache),
if ledger does not yet have input value. Specify None to clear.
Raise BadIdentifier on endpoint not formatted as '<ip-address>:<port>',
BadLedgerTxn on failure, WalletState if wallet is closed.
:param endpoint: value to set as endpoint attribute on ledger and cache:
specify URL or None to clear.
| 5.406131
| 4.487263
| 1.204773
|
LOGGER.debug('BaseAnchor.get_endpoint >>> target_did: %s, from_cache: %s', target_did, from_cache)
rv = None
if not (target_did or self.did):
LOGGER.debug('BaseAnchor.get_endpoint <!< Bad wallet state: DID for %s unavailable', self.name)
raise WalletState('Bad wallet state: DID for {} unavailable'.format(self.name))
target_did = target_did or self.did
if not ok_did(target_did):
LOGGER.debug('BaseAnchor.get_endpoint <!< Bad DID %s', target_did)
raise BadIdentifier('Bad DID {}'.format(target_did))
if from_cache:
with ENDPOINT_CACHE.lock:
if target_did in ENDPOINT_CACHE:
LOGGER.info('BaseAnchor.get_endpoint: got endpoint for %s from cache', target_did)
rv = ENDPOINT_CACHE[target_did]
LOGGER.debug('BaseAnchor.get_endpoint <<< %s', rv)
return rv
req_json = await ledger.build_get_attrib_request(
self.did,
target_did,
'endpoint',
None,
None)
resp_json = await self._submit(req_json)
data_json = (json.loads(resp_json))['result']['data'] # it's double-encoded on the ledger
if data_json:
rv = json.loads(data_json)['endpoint'].get('endpoint', None)
else:
LOGGER.info('_AgentCore.get_endpoint: ledger query returned response with no data')
with ENDPOINT_CACHE.lock:
if rv:
ENDPOINT_CACHE[target_did] = rv
else:
ENDPOINT_CACHE.pop(target_did, None)
assert target_did not in ENDPOINT_CACHE
LOGGER.debug('BaseAnchor.get_endpoint <<< %s', rv)
return rv
|
async def get_endpoint(self, target_did: str = None, from_cache: bool = True) -> str
|
Get endpoint attribute for anchor having input DID (default own DID).
Raise WalletState if target DID is default (own DID) value but wallet does not have it
(neither created nor opened since initialization).
:param target_did: DID of anchor for which to find endpoint attribute on ledger
:param from_cache: check endpoint cache first before visiting ledger; always update cache with ledger value
:return: endpoint attribute value, or None for no such value
| 2.708592
| 2.519419
| 1.075086
|
LOGGER.debug('BaseAnchor._submit >>> req_json: %s', req_json)
if not self.pool:
LOGGER.debug('BaseAnchor._submit <!< absent pool')
raise AbsentPool('Cannot submit request: absent pool')
if not self.pool.handle:
LOGGER.debug('BaseAnchor._submit <!< closed pool %s', self.pool.name)
raise ClosedPool('Cannot submit request to closed pool {}'.format(self.pool.name))
rv_json = await ledger.submit_request(self.pool.handle, req_json)
await asyncio.sleep(0)
resp = json.loads(rv_json)
if resp.get('op', '') in ('REQNACK', 'REJECT'):
LOGGER.debug('BaseAnchor._submit <!< ledger rejected request: %s', resp['reason'])
raise BadLedgerTxn('Ledger rejected transaction request: {}'.format(resp['reason']))
LOGGER.debug('BaseAnchor._submit <<< %s', rv_json)
return rv_json
|
async def _submit(self, req_json: str) -> str
|
Submit (json) request to ledger; return (json) result.
Raise AbsentPool for no pool, ClosedPool if pool is not yet open, or BadLedgerTxn on failure.
:param req_json: json of request to sign and submit
:return: json response
| 3.047966
| 2.427999
| 1.25534
|
LOGGER.debug('BaseAnchor._verkey_for >>> target: %s', target)
rv = target
if rv is None or not ok_did(rv): # it's None or already a verification key
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
if self.wallet.handle:
try:
rv = await did.key_for_local_did(self.wallet.handle, target)
LOGGER.info('Anchor %s got verkey for DID %s from wallet', self.name, target)
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
except IndyError as x_indy:
if x_indy.error_code != ErrorCode.WalletItemNotFound: # on not found, try the pool
LOGGER.debug(
'BaseAnchor._verkey_for <!< key lookup for local DID %s raised indy error code %s',
target,
x_indy.error_code)
raise
nym = json.loads(await self.get_nym(target))
if not nym:
LOGGER.debug(
'BaseAnchor._verkey_for <!< Wallet %s closed and ledger has no cryptonym for DID %s',
self.name,
target)
raise AbsentNym('Wallet {} closed, and ledger has no cryptonym for DID {}'.format(self.name, target))
rv = json.loads(await self.get_nym(target))['verkey']
LOGGER.info('Anchor %s got verkey for DID %s from pool %s', self.name, target, self.pool.name)
LOGGER.debug('BaseAnchor._verkey_for <<< %s', rv)
return rv
|
async def _verkey_for(self, target: str) -> str
|
Given a DID, retrieve its verification key, looking in wallet, then pool.
Given a verification key or None, return input.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
If no such verification key is on the ledger, raise AbsentNym.
:param target: verification key, or DID to resolve to such
:return: verification key
| 2.808217
| 2.582981
| 1.0872
|
LOGGER.debug('BaseAnchor.encrypt >>> message: %s, authn: %s, recip: %s', message, authn, recip)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.encrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await self.wallet.encrypt(message, authn, await self._verkey_for(recip))
LOGGER.debug('BaseAnchor.auth_encrypt <<< %s', rv)
return rv
|
async def encrypt(self, message: bytes, authn: bool = False, recip: str = None) -> bytes
|
Encrypt plaintext for owner of DID or verification key, anonymously or via
authenticated encryption scheme. If given DID, first check wallet and then pool
for corresponding verification key.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param recip: DID or verification key of recipient, None for anchor's own
:return: ciphertext, as bytes
| 4.294186
| 3.150273
| 1.363116
|
LOGGER.debug('BaseAnchor.decrypt >>> ciphertext: %s, sender: %s', ciphertext, sender)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.decrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
from_verkey = None
if sender:
from_verkey = await self._verkey_for(sender)
rv = await self.wallet.decrypt(
ciphertext,
True if from_verkey else None,
to_verkey=None,
from_verkey=from_verkey)
LOGGER.debug('BaseAnchor.decrypt <<< %s', rv)
return rv
|
async def decrypt(self, ciphertext: bytes, sender: str = None) -> (bytes, str)
|
Decrypt ciphertext and optionally authenticate sender.
Raise BadKey if authentication operation reveals sender key distinct from current
verification key of owner of input DID. Raise WalletState if wallet is closed.
:param ciphertext: ciphertext, as bytes
:param sender: DID or verification key of sender, None for anonymously encrypted ciphertext
:return: decrypted bytes and sender verification key (None for anonymous decryption)
| 3.569638
| 2.957271
| 1.207071
|
LOGGER.debug('BaseAnchor.sign >>> message: %s', message)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.sign <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await self.wallet.sign(message)
LOGGER.debug('BaseAnchor.sign <<< %s', rv)
return rv
|
async def sign(self, message: bytes) -> bytes
|
Sign message; return signature. Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:return: signature, as bytes
| 4.478707
| 3.486209
| 1.284693
|
LOGGER.debug('BaseAnchor.verify >>> signer: %s, message: %s, signature: %s', signer, message, signature)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.verify <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
verkey = None
if signer:
verkey = await self._verkey_for(signer)
rv = await self.wallet.verify(message, signature, verkey)
LOGGER.debug('BaseAnchor.verify <<< %s', rv)
return rv
|
async def verify(self, message: bytes, signature: bytes, signer: str = None) -> bool
|
Verify signature with input signer verification key (via lookup by DID first if need be).
Raise WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param signer: signer DID or verification key; omit for anchor's own
:return: whether signature is valid
| 3.782896
| 2.751343
| 1.374927
|
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json
|
async def get_txn(self, seq_no: int) -> str
|
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
| 4.067361
| 3.73188
| 1.089896
|
rv = {
'id': self.id,
'type': self.type,
'priority': self.priority
}
if self.recip_keys:
rv['routingKeys'] = [canon_ref(k.did, k.id, '#') for k in self.recip_keys]
if self.routing_keys:
rv['routingKeys'] = [canon_ref(k.did, k.id, '#') for k in self.routing_keys]
rv['serviceEndpoint'] = self.endpoint
return rv
|
def to_dict(self)
|
Return dict representation of service to embed in DID document.
| 4.430158
| 3.239119
| 1.367705
|
rv = await self.wallet.get_link_secret_label()
if rv is None:
LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)
raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action))
return rv
|
async def _assert_link_secret(self, action: str) -> str
|
Return current wallet link secret label. Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret
| 5.884002
| 3.997338
| 1.47198
|
LOGGER.debug('HolderProver._sync_revoc_for_proof >>> rr_id: %s', rr_id)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.debug(
'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger',
self._dir_tails)
raise AbsentCredDef('Corrupt tails tree {} may be for another ledger'.format(self._dir_tails))
except ClosedPool:
pass # carry on, may be OK from cache only
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
tails = revo_cache_entry.tails if revo_cache_entry else None
if tails is None: # it's not yet set in cache
try:
tails = await Tails(self._dir_tails, cd_id, tag).open()
except AbsentTails: # get hash from ledger and check for tails file
rr_def = json.loads(await self.get_rev_reg_def(rr_id))
tails_hash = rr_def['value']['tailsHash']
path_tails = join(Tails.dir(self._dir_tails, rr_id), tails_hash)
if not isfile(path_tails):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< No tails file present at %s', path_tails)
raise AbsentTails('No tails file present at {}'.format(path_tails))
Tails.associate(self._dir_tails, rr_id, tails_hash)
tails = await Tails(self._dir_tails, cd_id, tag).open() # OK now since tails file present
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(None, tails)
else:
REVO_CACHE[rr_id].tails = tails
LOGGER.debug('HolderProver._sync_revoc_for_proof <<<')
|
async def _sync_revoc_for_proof(self, rr_id: str) -> None
|
Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier
| 2.661902
| 2.520662
| 1.056033
|
LOGGER.debug('HolderProver.dir_tails >>>')
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver.dir_tails <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rv = Tails.dir(self._dir_tails, rr_id)
LOGGER.debug('HolderProver.dir_tails <<< %s', rv)
return rv
|
def dir_tails(self, rr_id: str) -> str
|
Return path to the correct directory for the tails file on input revocation registry identifier.
:param rr_id: revocation registry identifier of interest
:return: path to tails dir for input revocation registry identifier
| 3.664958
| 3.133497
| 1.169606
|
LOGGER.debug('HolderProver.open >>>')
await super().open()
if self.config.get('parse-caches-on-open', False):
ArchivableCaches.parse(self.dir_cache)
for path_rr_id in Tails.links(self._dir_tails):
await self._sync_revoc_for_proof(basename(path_rr_id))
LOGGER.debug('HolderProver.open <<<')
return self
|
async def open(self) -> 'HolderProver'
|
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
| 8.846891
| 7.260338
| 1.218523
|
LOGGER.debug('HolderProver.close >>>')
if self.config.get('archive-holder-prover-caches-on-close', False):
await self.load_cache_for_proof(True)
ArchivableCaches.purge_archives(self.dir_cache, True)
await BaseAnchor.close(self)
for path_rr_id in Tails.links(self._dir_tails):
rr_id = basename(path_rr_id)
try:
await self._sync_revoc_for_proof(rr_id)
except ClosedPool:
LOGGER.warning('HolderProver sync-revoc on close required ledger for %s but pool was closed', rr_id)
LOGGER.debug('HolderProver.close <<<')
|
async def close(self) -> None
|
Explicit exit. If so configured, populate cache to prove all creds in
wallet offline if need be, archive cache, and purge prior cache archives.
:return: current object
| 9.035809
| 7.666643
| 1.178587
|
LOGGER.debug('HolderProver.rev_regs >>>')
for path_rr_id in Tails.links(self._dir_tails):
await self._sync_revoc_for_proof(basename(path_rr_id))
rv = [basename(f) for f in Tails.links(self._dir_tails)]
LOGGER.debug('HolderProver.rev_regs <<< %s', rv)
return rv
|
async def rev_regs(self) -> list
|
Return list of revocation registry identifiers for which HolderProver has associated tails files.
The operation creates associations for any (newly copied, via service wrapper API) tails files without.
:return: list of revocation registry identifiers for which HolderProver has associated tails files
| 7.785888
| 5.456877
| 1.426803
|
LOGGER.debug('HolderProver.offline_intervals >>> cd_ids: %s', cd_ids)
rv = {}
for cd_id in [cd_ids] if isinstance(cd_ids, str) else cd_ids:
if not ok_cred_def_id(cd_id):
LOGGER.debug('HolderProver.offline_intervals <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
try:
cred_def = json.loads(await self.get_cred_def(cd_id))
except ClosedPool:
LOGGER.debug('HolderProver.offline_intervals <!< no such cred def %s in cred def cache', cd_id)
raise CacheIndex('No cached delta for non-revoc interval on {}'.format(cd_id))
rv[cd_id] = {}
if 'revocation' in cred_def['value']:
with REVO_CACHE.lock:
(fro, to) = REVO_CACHE.dflt_interval(cd_id)
if not (fro and to):
LOGGER.debug(
'HolderProver.offline_intervals <!< no cached delta for non-revoc interval on %s',
cd_id)
raise CacheIndex('No cached delta for non-revoc interval on {}'.format(cd_id))
rv[cd_id]['interval'] = to if fro == to else (fro, to)
LOGGER.debug('HolderProver.offline_intervals <<< %s', rv)
return rv
|
async def offline_intervals(self, cd_ids: Union[str, Sequence[str]]) -> dict
|
Return default non-revocation intervals for input cred def ids, based on content of revocation cache,
for augmentation into specification for Verifier.build_proof_req_json. Note that the close() call
to set the anchor off-line extends all revocation cache registry delta entries to its time of execution:
in this case, the intervals will all be single timestamps rather than (to, fro) pairs.
Raise CacheIndex if proof request cites credential definition without corresponding
content in cred def cache or revocation cache.
:param cd_ids: credential definition identifier or sequence thereof
:return: dict mapping revocable cred def ids to interval specifications to augment into cd_id2spec
parameter for Verifier.build_proof_req_json(), and non-revocable cred def ids to empty dict; e.g.,
::
{
'Vx4E82R17q...:3:CL:16:tag': {
'interval': (1528111730, 1528115832)
},
'R17v42T4pk...:3:CL:19:tag': {},
'Z9ccax812j...:3:CL:27:tag': {
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:tag': {
'interval': 1528116426
},
'6caBcmLi33...:tag:CL:41:tag': {},
...
}
| 3.019686
| 2.501169
| 1.20731
|
LOGGER.debug('HolderProver.create_link_secret >>> label: %s', label)
await self.wallet.create_link_secret(label)
LOGGER.debug('HolderProver.create_link_secret <<<')
|
async def create_link_secret(self, label: str) -> None
|
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the
current link secret does not already correspond to the input link secret label.
Raise WalletState if wallet is closed, or any other IndyError causing failure
to set link secret in wallet.
:param label: label for link secret; indy-sdk uses label to generate link secret
| 4.620384
| 3.142764
| 1.470166
|
LOGGER.debug(
'HolderProver.store_cred >>> cred_json: %s, cred_req_metadata_json: %s',
cred_json,
cred_req_metadata_json)
if not self.wallet.handle:
LOGGER.debug('HolderProver.store_cred <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
cred = json.loads(cred_json)
cred_def_json = await self.get_cred_def(cred['cred_def_id'])
rr_id = cred['rev_reg_id']
rr_def_json = None
if rr_id:
await self._sync_revoc_for_proof(rr_id)
rr_def_json = await self.get_rev_reg_def(rr_id)
rv = await anoncreds.prover_store_credential(
self.wallet.handle,
None, # cred_id, let indy-sdk generate random uuid
cred_req_metadata_json,
cred_json,
cred_def_json,
rr_def_json)
LOGGER.debug('HolderProver.store_cred <<< %s', rv)
return rv
|
async def store_cred(self, cred_json: str, cred_req_metadata_json: str) -> str
|
Store cred in wallet as HolderProver, return its credential identifier as created in wallet.
Raise AbsentTails if tails file not available for revocation registry for input credential.
Raise WalletState if wallet is closed.
:param cred_json: credential json as HolderProver created
:param cred_req_metadata_json: credential request metadata json as HolderProver created via create_cred_req()
:return: credential identifier within wallet
| 2.784511
| 2.446868
| 1.13799
|
LOGGER.debug('HolderProver.load_cache_for_proof >>> archive: %s', archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_held())
for s_id in box_ids['schema_id']:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids['cred_def_id']:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids['rev_reg_id']:
await self.get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'HolderProver %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',
self.name,
self.pool.name,
rr_id,
rv)
except AbsentPool:
LOGGER.warning(
'HolderProver %s has no pool, cannot update revo cache reg delta for %s to %s',
self.name,
rr_id,
rv)
if archive:
ArchivableCaches.archive(self.dir_cache)
LOGGER.debug('HolderProver.load_cache_for_proof <<< %s', rv)
return rv
|
async def load_cache_for_proof(self, archive: bool = False) -> int
|
Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to generate proof on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds)
| 3.353127
| 2.930413
| 1.144251
|
LOGGER.debug('HolderProver.get_box_ids_held >>>')
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_box_ids_held <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rr_ids = {basename(link) for link in Tails.links(self._dir_tails)}
un_rr_ids = set()
for rr_id in rr_ids:
if not json.loads(await self.get_cred_infos_by_q(json.dumps({'rev_reg_id': rr_id}), 1)):
un_rr_ids.add(rr_id)
rr_ids -= un_rr_ids
cd_ids = {cd_id for cd_id in listdir(self._dir_tails)
if isdir(join(self._dir_tails, cd_id)) and ok_cred_def_id(cd_id)}
s_ids = set()
for cd_id in cd_ids:
s_ids.add(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])
un_cd_ids = set()
for cd_id in cd_ids:
if not json.loads(await self.get_cred_infos_by_q(json.dumps({'cred_def_id': cd_id}), 1)):
un_cd_ids.add(cd_id)
cd_ids -= un_cd_ids
un_s_ids = set()
for s_id in s_ids:
if not json.loads(await self.get_cred_infos_by_q(json.dumps({'schema_id': s_id}), 1)):
un_s_ids.add(s_id)
s_ids -= un_s_ids
rv = json.dumps({
'schema_id': list(s_ids),
'cred_def_id': list(cd_ids),
'rev_reg_id': list(rr_ids)
})
LOGGER.debug('HolderProver.get_box_ids_held <<< %s', rv)
return rv
|
async def get_box_ids_held(self) -> str
|
Return json object on lists of all unique box identifiers for credentials in wallet, as
evidenced by tails directory content:
* schema identifiers
* credential definition identifiers
* revocation registry identifiers.
E.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:tag",
"9cHbp54C8n...:3:CL:37:tag",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:tag:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:tag:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:tag:CL_ACCUM:2",
...
]
}
Raise WalletState if wallet is closed.
:return: tuple of sets for schema ids, cred def ids, rev reg ids
| 2.325567
| 2.031806
| 1.144581
|
LOGGER.debug('HolderProver.get_cred_infos_by_q >>> query_json: %s, limit: %s', query_json, limit)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_infos_by_q <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
infos = []
if limit and limit < 0:
limit = None
(handle, cardinality) = await anoncreds.prover_search_credentials(
self.wallet.handle,
json.dumps(canon_cred_wql(json.loads(query_json)))) # indy-sdk requires attr name canonicalization
chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK)
if limit:
cardinality = min(limit, cardinality)
try:
while len(infos) != cardinality:
batch = json.loads(await anoncreds.prover_fetch_credentials(handle, chunk))
infos.extend(batch)
if len(batch) < chunk:
break
if len(infos) != cardinality:
LOGGER.warning('Credential search/limit indicated %s results but fetched %s', cardinality, len(infos))
finally:
await anoncreds.prover_close_credentials_search(handle)
rv_json = json.dumps(infos)
LOGGER.debug('HolderProver.get_cred_infos_by_q <<< %s', rv_json)
return rv_json
|
async def get_cred_infos_by_q(self, query_json: str, limit: int = None) -> str
|
A cred-info aggregates:
* its wallet cred-id (aka wallet referent)
* its attribute names and values
* its schema identifier
* its credential definition identifier
* its revocation registry identifier
* its credential revocation identifier.
Return list of cred-infos from wallet by input WQL query;
return cred-infos for all credentials in wallet for no query.
Raise WalletState if the wallet is closed.
The operation supports a subset of WQL; i.e.,
::
query = {subquery}
subquery = {subquery, ..., subquery} - WHERE subquery AND ... AND subquery
subquery = $or: [{subquery},..., {subquery}] - WHERE subquery OR ... OR subquery
subquery = $not: {subquery} - Where NOT (subquery)
subquery = "tagName": tagValue - WHERE tagName == tagValue
subquery = "tagName": {$in: [tagValue, ..., tagValue]} - WHERE tagName IN (tagValue, ..., tagValue)
subquery = "tagName": {$neq: tagValue} - WHERE tagName != tagValue
but not
::
subquery = "tagName": {$gt: tagValue} - WHERE tagName > tagValue
subquery = "tagName": {$gte: tagValue} - WHERE tagName >= tagValue
subquery = "tagName": {$lt: tagValue} - WHERE tagName < tagValue
subquery = "tagName": {$lte: tagValue} - WHERE tagName <= tagValue
subquery = "tagName": {$like: tagValue} - WHERE tagName LIKE tagValue
:param query_json: WQL query json
:param limit: maximum number of results to return
:return: cred-infos as json list; i.e.,
::
[
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
},
...
]
| 3.546616
| 3.312107
| 1.070803
|
LOGGER.debug('HolderProver.get_cred_infos_by_filter >>> filt: %s', filt)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_infos_by_filter <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv_json = await anoncreds.prover_get_credentials(self.wallet.handle, json.dumps(filt or {}))
LOGGER.debug('HolderProver.get_cred_infos_by_filter <<< %s', rv_json)
return rv_json
|
async def get_cred_infos_by_filter(self, filt: dict = None) -> str
|
Return cred-info (json list) from wallet by input filter for
schema identifier and/or credential definition identifier components;
return info of all credentials for no filter.
Raise WalletState if the wallet is closed.
:param filt: indy-sdk filter for credentials; i.e.,
::
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
}
:return: credential infos as json list; i.e.,
::
[
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
},
...
]
| 2.967026
| 2.427804
| 1.222103
|
LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_info_by_id <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id)
except IndyError as x_indy: # no such cred
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< no cred in wallet %s for cred id %s',
self.name,
cred_id)
raise AbsentCred('No cred in wallet for {}'.format(cred_id))
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< wallet %s, cred id %s: indy error code %s',
self.name,
cred_id,
x_indy.error_code)
raise
LOGGER.debug('HolderProver.get_cred_info_by_id <<< %s', rv_json)
return rv_json
|
async def get_cred_info_by_id(self, cred_id: str) -> str
|
Return cred-info json from wallet by wallet credential identifier.
Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed.
:param cred_id: credential identifier of interest
:return: json with cred for input credential identifier
:return: cred-info json; i.e.,
::
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
| 2.363979
| 1.984821
| 1.191029
|
LOGGER.debug('HolderProver.reset_wallet >>>')
self.wallet = await WalletManager().reset(self.wallet, seed)
rv = self.wallet
LOGGER.debug('HolderProver.reset_wallet <<< %s', rv)
return rv
|
async def reset_wallet(self, seed: str = None) -> Wallet
|
Close and delete HolderProver wallet, then create and open a replacement on prior link secret.
Note that this operation effectively destroys private keys for credential definitions. Its
intended use is primarily for testing and demonstration.
Raise AbsentLinkSecret if link secret not set. Raise WalletState if the wallet is closed.
:param seed: seed to use for new wallet (default random)
:return: replacement wallet
| 6.262543
| 5.320464
| 1.177067
|
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don"t return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, "do_not_call_in_templates", False):
pass
elif getattr(current, "alters_data", False):
try:
current = context.template.engine.string_if_invalid
except AttributeError:
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
elif isinstance(current, Model):
if ("request" in context) and hasattr(context["request"], "_ultracache"):
# get_for_model itself is cached
ct = ContentType.objects.get_for_model(current.__class__)
context["request"]._ultracache.append((ct.id, current.pk))
except Exception as e:
template_name = getattr(context, "template_name", None) or "unknown"
if logger is not None:
logger.debug(
"Exception while resolving variable \"%s\" in template \"%s\".",
bit,
template_name,
exc_info=True,
)
if getattr(e, "silent_variable_failure", False):
current = context.template.engine.string_if_invalid
else:
raise
return current
|
def my_resolve_lookup(self, context)
|
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn"t be called by external code. Use Variable.resolve()
instead.
| 4.561264
| 4.523516
| 1.008345
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.