sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def keypair():
'''
Show the server-side information of the currently configured access key.
'''
fields = [
('User ID', 'user_id'),
('Access Key', 'access_key'),
('Secret Key', 'secret_key'),
('Active?', 'is_active'),
('Admin?', 'is_admin'),
('Created At', 'created_at'),
('Last Used', 'last_used'),
('Res.Policy', 'resource_policy'),
('Rate Limit', 'rate_limit'),
('Concur.Limit', 'concurrency_limit'),
('Concur.Used', 'concurrency_used'),
]
with Session() as session:
try:
kp = session.KeyPair(session.config.access_key)
info = kp.info(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
rows = []
for name, key in fields:
rows.append((name, info[key]))
print(tabulate(rows, headers=('Field', 'Value')))
|
Show the server-side information of the currently configured access key.
|
entailment
|
def keypairs(ctx, user_id, is_active):
'''
List and manage keypairs.
To show all keypairs or other user's, your access key must have the admin
privilege.
(admin privilege required)
'''
if ctx.invoked_subcommand is not None:
return
fields = [
('User ID', 'user_id'),
('Access Key', 'access_key'),
('Secret Key', 'secret_key'),
('Active?', 'is_active'),
('Admin?', 'is_admin'),
('Created At', 'created_at'),
('Last Used', 'last_used'),
('Res.Policy', 'resource_policy'),
('Rate Limit', 'rate_limit'),
('Concur.Limit', 'concurrency_limit'),
('Concur.Used', 'concurrency_used'),
]
try:
user_id = int(user_id)
except (TypeError, ValueError):
pass # string-based user ID for Backend.AI v1.4+
with Session() as session:
try:
items = session.KeyPair.list(user_id, is_active,
fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no matching keypairs associated '
'with the user ID {0}'.format(user_id))
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields)))
|
List and manage keypairs.
To show all keypairs or other user's, your access key must have the admin
privilege.
(admin privilege required)
|
entailment
|
def add(user_id, resource_policy, admin, inactive, rate_limit):
'''
Add a new keypair.
USER_ID: User ID of a new key pair.
RESOURCE_POLICY: resource policy for new key pair.
'''
try:
user_id = int(user_id)
except ValueError:
pass # string-based user ID for Backend.AI v1.4+
with Session() as session:
try:
data = session.KeyPair.create(
user_id,
is_active=not inactive,
is_admin=admin,
resource_policy=resource_policy,
rate_limit=rate_limit)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair creation has failed: {0}'.format(data['msg']))
sys.exit(1)
item = data['keypair']
print('Access Key: {0}'.format(item['access_key']))
print('Secret Key: {0}'.format(item['secret_key']))
|
Add a new keypair.
USER_ID: User ID of a new key pair.
RESOURCE_POLICY: resource policy for new key pair.
|
entailment
|
def update(access_key, resource_policy, is_admin, is_active, rate_limit):
'''
Update an existing keypair.
ACCESS_KEY: Access key of an existing key pair.
'''
with Session() as session:
try:
data = session.KeyPair.update(
access_key,
is_active=is_active,
is_admin=is_admin,
resource_policy=resource_policy,
rate_limit=rate_limit)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair creation has failed: {0}'.format(data['msg']))
sys.exit(1)
print('Key pair is updated: ' + access_key + '.')
|
Update an existing keypair.
ACCESS_KEY: Access key of an existing key pair.
|
entailment
|
def delete(access_key):
"""
Delete an existing keypair.
ACCESSKEY: ACCESSKEY for a keypair to delete.
"""
with Session() as session:
try:
data = session.KeyPair.delete(access_key)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair deletion has failed: {0}'.format(data['msg']))
sys.exit(1)
print('Key pair is deleted: ' + access_key + '.')
|
Delete an existing keypair.
ACCESSKEY: ACCESSKEY for a keypair to delete.
|
entailment
|
def login(self):
""" perform API auth test returning user and team """
log.debug('performing auth test')
test = self._get(urls['test'])
user = User({ 'name': test['user'], 'id': test['user_id'] })
self._refresh()
return test['team'], user
|
perform API auth test returning user and team
|
entailment
|
def user(self, match):
""" Return User object for a given Slack ID or name """
if len(match) == 9 and match[0] == 'U':
return self._lookup(User, 'id', match)
return self._lookup(User, 'name', match)
|
Return User object for a given Slack ID or name
|
entailment
|
def channel(self, match):
""" Return Channel object for a given Slack ID or name """
if len(match) == 9 and match[0] in ('C','G','D'):
return self._lookup(Channel, 'id', match)
return self._lookup(Channel, 'name', match)
|
Return Channel object for a given Slack ID or name
|
entailment
|
def _refresh(self):
""" refresh internal directory cache """
log.debug('refreshing directory cache')
self._users.update(list(self._user_gen()))
self._channels.update(list(self._channel_gen()))
|
refresh internal directory cache
|
entailment
|
def match(self, attr, val):
""" lookup object in directory with attribute matching value """
self._lock.acquire()
try:
for x in self:
if getattr(x, attr) == val:
return x
finally:
self._lock.release()
|
lookup object in directory with attribute matching value
|
entailment
|
def _groups_of_size(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# _groups_of_size('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
|
Collect data into fixed-length chunks or blocks.
|
entailment
|
def slice_repr(slice_instance):
"""
Turn things like `slice(None, 2, -1)` into `:2:-1`.
"""
if not isinstance(slice_instance, slice):
raise TypeError('Unhandled type {}'.format(type(slice_instance)))
start = slice_instance.start or ''
stop = slice_instance.stop or ''
step = slice_instance.step or ''
msg = '{}:'.format(start)
if stop:
msg += '{}'.format(stop)
if step:
msg += ':'
if step:
msg += '{}'.format(step)
return msg
|
Turn things like `slice(None, 2, -1)` into `:2:-1`.
|
entailment
|
def biggus_chunk(chunk_key, biggus_array, masked):
"""
A function that lazily evaluates a biggus.Chunk. This is useful for
passing through as a dask task so that we don't have to compute the
chunk in order to compute the graph.
"""
if masked:
array = biggus_array.masked_array()
else:
array = biggus_array.ndarray()
return biggus._init.Chunk(chunk_key, array)
|
A function that lazily evaluates a biggus.Chunk. This is useful for
passing through as a dask task so that we don't have to compute the
chunk in order to compute the graph.
|
entailment
|
def _make_stream_handler_nodes(self, dsk_graph, array, iteration_order,
masked):
"""
Produce task graph entries for an array that comes from a biggus
StreamsHandler.
This is essentially every type of array that isn't already a thing on
disk/in-memory. StreamsHandler arrays include all aggregations and
elementwise operations.
"""
nodes = {}
handler = array.streams_handler(masked)
input_iteration_order = handler.input_iteration_order(iteration_order)
def input_keys_transform(input_array, keys):
if hasattr(input_array, 'streams_handler'):
handler = input_array.streams_handler(masked)
# Get the transformer of the input array, and apply it to the
# keys.
input_transformer = getattr(handler,
'output_keys', None)
if input_transformer is not None:
keys = input_transformer(keys)
return keys
sources_keys = []
sources_chunks = []
for input_array in array.sources:
# Bring together all chunks that influence the same part of this
# (resultant) array.
source_chunks_by_key = {}
sources_chunks.append(source_chunks_by_key)
source_keys = []
sources_keys.append(source_keys)
# Make nodes for the source arrays (if they don't already exist)
# before we do anything else.
input_nodes = self._make_nodes(dsk_graph, input_array,
input_iteration_order, masked)
for chunk_id, task in input_nodes.items():
chunk_keys = task[1]
t_keys = chunk_keys
t_keys = input_keys_transform(array, t_keys)
source_keys.append(t_keys)
this_key = str(t_keys)
source_chunks_by_key.setdefault(this_key,
[]).append([chunk_id, task])
sources_keys_grouped = key_grouper.group_keys(array.shape,
*sources_keys)
for slice_group, sources_keys_group in sources_keys_grouped.items():
# Each group is entirely independent and can have its own task
# without knowledge of results from items in other groups.
t_keys = tuple(slice(*slice_tuple) for slice_tuple in slice_group)
all_chunks = []
for source_keys, source_chunks_by_key in zip(sources_keys_group,
sources_chunks):
dependencies = tuple(
the_id
for keys in source_keys
for the_id, task in source_chunks_by_key[str(keys)])
# Uniquify source_keys, but keep the order.
dependencies = tuple(_unique_everseen(dependencies))
def normalize_keys(keys, shape):
result = []
for key, dim_length in zip(keys, shape):
result.append(key_grouper.normalize_slice(key,
dim_length))
return tuple(result)
# If we don't have the same chunks for all inputs then we
# should combine them before passing them on to the handler.
# TODO: Fix slice equality to deal with 0 and None etc.
if not all(t_keys == normalize_keys(keys, array.shape)
for keys in source_keys):
combined = self.collect(array[t_keys], masked, chunk=True)
new_task = (combined, ) + dependencies
new_id = ('chunk shape: {}\n\n{}'
''.format(array[t_keys].shape, uuid.uuid()))
dsk_graph[new_id] = new_task
dependencies = (new_id, )
all_chunks.append(dependencies)
pivoted = all_chunks
sub_array = array[t_keys]
handler = sub_array.streams_handler(masked)
name = getattr(handler, 'nice_name', handler.__class__.__name__)
if hasattr(handler, 'axis'):
name += '\n(axis={})'.format(handler.axis)
# For ElementwiseStreams handlers, use the function that they wrap
# (e.g "add")
if hasattr(handler, 'operator'):
name = handler.operator.__name__
n_sources = len(array.sources)
handler_of_chunks_fn = self.create_chunks_handler_fn(handler,
n_sources,
name)
shape = sub_array.shape
if all(key == slice(None) for key in t_keys):
subset = ''
else:
pretty_index = ', '.join(map(slice_repr, t_keys))
subset = 'target subset [{}]\n'.format(pretty_index)
# Flatten out the pivot so that dask can dereferences the IDs
source_chunks = [item for sublist in pivoted for item in sublist]
task = tuple([handler_of_chunks_fn, t_keys] + source_chunks)
shape_repr = ', '.join(map(str, shape))
chunk_id = 'chunk shape: ({})\n\n{}{}'.format(shape_repr,
subset,
uuid.uuid4())
assert chunk_id not in dsk_graph
dsk_graph[chunk_id] = task
nodes[chunk_id] = task
return nodes
|
Produce task graph entries for an array that comes from a biggus
StreamsHandler.
This is essentially every type of array that isn't already a thing on
disk/in-memory. StreamsHandler arrays include all aggregations and
elementwise operations.
|
entailment
|
def lazy_chunk_creator(name):
"""
Create a lazy chunk creating function with a nice name that is suitable
for representation in a dask graph.
"""
# TODO: Could this become a LazyChunk class?
def biggus_chunk(chunk_key, biggus_array, masked):
"""
A function that lazily evaluates a biggus.Chunk. This is useful for
passing through as a dask task so that we don't have to compute the
chunk in order to compute the graph.
"""
if masked:
array = biggus_array.masked_array()
else:
array = biggus_array.ndarray()
return biggus._init.Chunk(chunk_key, array)
biggus_chunk.__name__ = name
return biggus_chunk
|
Create a lazy chunk creating function with a nice name that is suitable
for representation in a dask graph.
|
entailment
|
def _make_nodes(self, dsk_graph, array, iteration_order, masked,
top=False):
"""
Recursive function that returns the dask items for the given array.
NOTE: Currently assuming that all tasks are a tuple, with the second
item being the keys used to index the source of the respective input
array.
"""
cache_key = _array_id(array, iteration_order, masked)
# By the end of this function Nodes will be a dictionary with one item
# per chunk to be processed for this array.
nodes = self._node_cache.get(cache_key, None)
if nodes is None:
if hasattr(array, 'streams_handler'):
nodes = self._make_stream_handler_nodes(dsk_graph, array,
iteration_order,
masked)
else:
nodes = {}
chunks = []
name = '{}\n{}'.format(array.__class__.__name__, array.shape)
biggus_chunk_func = self.lazy_chunk_creator(name)
chunk_index_gen = biggus._init.ProducerNode.chunk_index_gen
for chunk_key in chunk_index_gen(array.shape,
iteration_order[::-1]):
biggus_array = array[chunk_key]
pretty_key = ', '.join(map(slice_repr, chunk_key))
chunk_id = ('chunk shape: {}\nsource key: [{}]\n\n{}'
''.format(biggus_array.shape, pretty_key,
uuid.uuid4()))
task = (biggus_chunk_func, chunk_key, biggus_array, masked)
chunks.append(task)
assert chunk_id not in dsk_graph
dsk_graph[chunk_id] = task
nodes[chunk_id] = task
self._node_cache[cache_key] = nodes
return nodes
|
Recursive function that returns the dask items for the given array.
NOTE: Currently assuming that all tasks are a tuple, with the second
item being the keys used to index the source of the respective input
array.
|
entailment
|
def group_keys(shape, *inputs_keys):
"""
Usecase: Two sets of chunks, one spans the whole of a dimension, the other
chunked it up. We need to know that we need to collect together the
chunked form, so that we can work with both sets at the same time.
Conceptually we have multiple source inputs, each with multiple key sets
for indexing.
NOTE: We treat the grouping independently per dimension. In practice this
means we may be grouping more than is strictly necessary if we were being
smart about multi-dimensional grouping. Anecdotally, that optimisation is
currently not worth the implementation effort.
"""
# Store the result as a slice mapping to a subset of the inputs_keys. We
# start with the assumption that there will be only one group, and
# subdivide when we find this not to be the case.
ndim = len(inputs_keys[0][0])
grouped_inputs_keys = {tuple((None, None, None)
for _ in range(ndim)): inputs_keys}
for dim, dim_len in enumerate(shape):
# Compute the groups for this dimension.
for group_keys, group_inputs_keys in grouped_inputs_keys.copy(
).items():
group_inputs_key_for_dim = [[keys[dim] for keys in input_keys]
for input_keys in group_inputs_keys]
grouped_inputs_key = dimension_group_to_lowest_common(
dim_len, group_inputs_key_for_dim).items()
# If this group hasn't sub-divided, continue on to next group.
if len(grouped_inputs_key) == 1:
continue
else:
# Drop the bigger group from the result dictionary and in its
# place, add all of the subgroups.
grouped_inputs_keys.pop(group_keys)
# Make the group keys mutable so that we can inject our
# subgroups.
group_keys = list(group_keys)
group_inputs_keys = list(group_inputs_keys)
for subgroup_key, subgroup_inputs_key in grouped_inputs_key:
group_keys[dim] = subgroup_key
# Start with an empty list, one for each input.
subgroup_inputs_keys = [[] for _ in subgroup_inputs_key]
per_input = zip(group_inputs_keys, subgroup_inputs_key,
subgroup_inputs_keys)
for (input_keys, subgroup_input_key,
new_input_keys) in per_input:
for keys in input_keys[:]:
norm_key = normalize_slice(keys[dim], dim_len)
if norm_key in subgroup_input_key:
input_keys.remove(keys)
new_input_keys.append(keys)
subgroup_inputs_keys = tuple(subgroup_inputs_keys)
grouped_inputs_keys[tuple(
group_keys)] = subgroup_inputs_keys
return grouped_inputs_keys
|
Usecase: Two sets of chunks, one spans the whole of a dimension, the other
chunked it up. We need to know that we need to collect together the
chunked form, so that we can work with both sets at the same time.
Conceptually we have multiple source inputs, each with multiple key sets
for indexing.
NOTE: We treat the grouping independently per dimension. In practice this
means we may be grouping more than is strictly necessary if we were being
smart about multi-dimensional grouping. Anecdotally, that optimisation is
currently not worth the implementation effort.
|
entailment
|
def get_password(prompt='Password: ', confirm=False):
"""
<Purpose>
Return the password entered by the user. If 'confirm' is True, the user is
asked to enter the previously entered password once again. If they match,
the password is returned to the caller.
<Arguments>
prompt:
The text of the password prompt that is displayed to the user.
confirm:
Boolean indicating whether the user should be prompted for the password
a second time. The two entered password must match, otherwise the
user is again prompted for a password.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The password entered by the user.
"""
# Are the arguments the expected type?
# If not, raise 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.TEXT_SCHEMA.check_match(prompt)
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(confirm)
while True:
# getpass() prompts the user for a password without echoing
# the user input.
password = getpass.getpass(prompt, sys.stderr)
if not confirm:
return password
password2 = getpass.getpass('Confirm: ', sys.stderr)
if password == password2:
return password
else:
print('Mismatch; try again.')
|
<Purpose>
Return the password entered by the user. If 'confirm' is True, the user is
asked to enter the previously entered password once again. If they match,
the password is returned to the caller.
<Arguments>
prompt:
The text of the password prompt that is displayed to the user.
confirm:
Boolean indicating whether the user should be prompted for the password
a second time. The two entered password must match, otherwise the
user is again prompted for a password.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The password entered by the user.
|
entailment
|
def generate_and_write_rsa_keypair(filepath=None, bits=DEFAULT_RSA_KEY_BITS,
password=None):
"""
<Purpose>
Generate an RSA key pair. The public portion of the generated RSA key is
saved to <'filepath'>.pub, whereas the private key portion is saved to
<'filepath'>. If no password is given, the user is prompted for one. If
the 'password' is an empty string, the private key is saved unencrypted to
<'filepath'>. If the filepath is not given, the KEYID is used as the
filename and the keypair saved to the current working directory.
The best available form of encryption, for a given key's backend, is used
with pyca/cryptography. According to their documentation, "it is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
bits:
The number of bits of the generated RSA key.
password:
The password to encrypt 'filepath'. If None, the user is prompted for a
password. If an empty string is given, the private key is written to
disk unencrypted.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Does 'bits' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RSAKEYBITS_SCHEMA.check_match(bits)
# Generate the public and private RSA keys.
rsa_key = securesystemslib.keys.generate_rsa_key(bits)
public = rsa_key['keyval']['public']
private = rsa_key['keyval']['private']
if not filepath:
filepath = os.path.join(os.getcwd(), rsa_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted RSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Encrypt the private key if 'password' is set.
if len(password):
private = securesystemslib.keys.create_rsa_encrypted_pem(private, password)
else:
logger.debug('An empty password was given. Not encrypting the private key.')
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
file_object = securesystemslib.util.TempFile()
file_object.write(public.encode('utf-8'))
# The temporary file is closed after the final move.
file_object.move(filepath + '.pub')
# Write the private key in encrypted PEM format to '<filepath>'.
# Unlike the public key file, the private key does not have a file
# extension.
file_object = securesystemslib.util.TempFile()
file_object.write(private.encode('utf-8'))
file_object.move(filepath)
return filepath
|
<Purpose>
Generate an RSA key pair. The public portion of the generated RSA key is
saved to <'filepath'>.pub, whereas the private key portion is saved to
<'filepath'>. If no password is given, the user is prompted for one. If
the 'password' is an empty string, the private key is saved unencrypted to
<'filepath'>. If the filepath is not given, the KEYID is used as the
filename and the keypair saved to the current working directory.
The best available form of encryption, for a given key's backend, is used
with pyca/cryptography. According to their documentation, "it is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
bits:
The number of bits of the generated RSA key.
password:
The password to encrypt 'filepath'. If None, the user is prompted for a
password. If an empty string is given, the private key is written to
disk unencrypted.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
|
entailment
|
def import_rsa_privatekey_from_file(filepath, password=None,
scheme='rsassa-pss-sha256', prompt=False):
"""
<Purpose>
Import the PEM file in 'filepath' containing the private key.
If password is passed use passed password for decryption.
If prompt is True use entered password for decryption.
If no password is passed and either prompt is False or if the password
entered at the prompt is an empty string, omit decryption, treating the
key as if it is not encrypted.
If password is passed and prompt is True, an error is raised. (See below.)
The returned key is an object in the
'securesystemslib.formats.RSAKEY_SCHEMA' format.
<Arguments>
filepath:
<filepath> file, an RSA encrypted PEM file. Unlike the public RSA PEM
key file, 'filepath' does not have an extension.
password:
The passphrase to decrypt 'filepath'.
scheme:
The signature scheme used by the imported key.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
ValueError, if 'password' is passed and 'prompt' is True.
ValueError, if 'password' is passed and it is an empty string.
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.FormatError, if the entered password is
improperly formatted.
IOError, if 'filepath' can't be loaded.
securesystemslib.exceptions.CryptoError, if a password is available
and 'filepath' is not a valid key file encrypted using that password.
securesystemslib.exceptions.CryptoError, if no password is available
and 'filepath' is not a valid non-encrypted key file.
<Side Effects>
The contents of 'filepath' are read, optionally decrypted, and returned.
<Returns>
An RSA key object, conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# Is 'scheme' properly formatted?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
if password and prompt:
raise ValueError("Passing 'password' and 'prompt' True is not allowed.")
# If 'password' was passed check format and that it is not empty.
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# TODO: PASSWORD_SCHEMA should be securesystemslib.schema.AnyString(min=1)
if not len(password):
raise ValueError('Password must be 1 or more characters')
elif prompt:
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
# NOTE: A user who gets prompted for a password, can only signal that the
# key is not encrypted by entering no password in the prompt, as opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False) or None
if password is not None:
# This check will not fail, because a mal-formatted passed password fails
# above and an entered password will always be a string (see get_password)
# However, we include it in case PASSWORD_SCHEMA or get_password changes.
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
else:
logger.debug('No password was given. Attempting to import an'
' unencrypted file.')
# Read the contents of 'filepath' that should be a PEM formatted private key.
with open(filepath, 'rb') as file_object:
pem_key = file_object.read().decode('utf-8')
# Convert 'pem_key' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
# Raise 'securesystemslib.exceptions.CryptoError' if 'pem_key' is invalid.
# If 'password' is None decryption will be omitted.
rsa_key = securesystemslib.keys.import_rsakey_from_private_pem(pem_key,
scheme, password)
return rsa_key
|
<Purpose>
Import the PEM file in 'filepath' containing the private key.
If password is passed use passed password for decryption.
If prompt is True use entered password for decryption.
If no password is passed and either prompt is False or if the password
entered at the prompt is an empty string, omit decryption, treating the
key as if it is not encrypted.
If password is passed and prompt is True, an error is raised. (See below.)
The returned key is an object in the
'securesystemslib.formats.RSAKEY_SCHEMA' format.
<Arguments>
filepath:
<filepath> file, an RSA encrypted PEM file. Unlike the public RSA PEM
key file, 'filepath' does not have an extension.
password:
The passphrase to decrypt 'filepath'.
scheme:
The signature scheme used by the imported key.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
ValueError, if 'password' is passed and 'prompt' is True.
ValueError, if 'password' is passed and it is an empty string.
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.FormatError, if the entered password is
improperly formatted.
IOError, if 'filepath' can't be loaded.
securesystemslib.exceptions.CryptoError, if a password is available
and 'filepath' is not a valid key file encrypted using that password.
securesystemslib.exceptions.CryptoError, if no password is available
and 'filepath' is not a valid non-encrypted key file.
<Side Effects>
The contents of 'filepath' are read, optionally decrypted, and returned.
<Returns>
An RSA key object, conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
|
entailment
|
def import_rsa_publickey_from_file(filepath):
"""
<Purpose>
Import the RSA key stored in 'filepath'. The key object returned is in the
format 'securesystemslib.formats.RSAKEY_SCHEMA'. If the RSA PEM in
'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, an RSA PEM file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error, if a valid RSA key object cannot be
generated. This may be caused by an improperly formatted PEM file.
<Side Effects>
'filepath' is read and its contents extracted.
<Returns>
An RSA key object conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# Read the contents of the key file that should be in PEM format and contains
# the public portion of the RSA key.
with open(filepath, 'rb') as file_object:
rsa_pubkey_pem = file_object.read().decode('utf-8')
# Convert 'rsa_pubkey_pem' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
try:
rsakey_dict = securesystemslib.keys.import_rsakey_from_public_pem(rsa_pubkey_pem)
except securesystemslib.exceptions.FormatError as e:
raise securesystemslib.exceptions.Error('Cannot import improperly formatted'
' PEM file.' + repr(str(e)))
return rsakey_dict
|
<Purpose>
Import the RSA key stored in 'filepath'. The key object returned is in the
format 'securesystemslib.formats.RSAKEY_SCHEMA'. If the RSA PEM in
'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, an RSA PEM file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error, if a valid RSA key object cannot be
generated. This may be caused by an improperly formatted PEM file.
<Side Effects>
'filepath' is read and its contents extracted.
<Returns>
An RSA key object conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
|
entailment
|
def generate_and_write_ed25519_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an Ed25519 keypair, where the encrypted key (using 'password' as
the passphrase) is saved to <'filepath'>. The public key portion of the
generated Ed25519 key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The private key is encrypted according to 'cryptography's approach:
"Encrypt using the best available encryption for a given key's backend.
This is a curated encryption choice and the algorithm may change over
time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated Ed25519 key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new Ed25519 key object.
ed25519_key = securesystemslib.keys.generate_ed25519_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ed25519_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the Ed25519'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one.')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ed25519 public key file contents in metadata format (i.e.,
# does not include the keyid portion).
keytype = ed25519_key['keytype']
keyval = ed25519_key['keyval']
scheme = ed25519_key['scheme']
ed25519key_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ed25519key_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
# The temporary file is closed after the final move.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Encrypt the private key if 'password' is set.
if len(password):
ed25519_key = securesystemslib.keys.encrypt_key(ed25519_key, password)
else:
logger.debug('An empty password was given. '
'Not encrypting the private key.')
ed25519_key = json.dumps(ed25519_key)
# Raise 'securesystemslib.exceptions.CryptoError' if 'ed25519_key' cannot be
# encrypted.
file_object.write(ed25519_key.encode('utf-8'))
file_object.move(filepath)
return filepath
|
<Purpose>
Generate an Ed25519 keypair, where the encrypted key (using 'password' as
the passphrase) is saved to <'filepath'>. The public key portion of the
generated Ed25519 key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The private key is encrypted according to 'cryptography's approach:
"Encrypt using the best available encryption for a given key's backend.
This is a curated encryption choice and the algorithm may change over
time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated Ed25519 key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
|
entailment
|
def import_ed25519_publickey_from_file(filepath):
"""
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ED25519 key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that
# also includes the keyid.
ed25519_key_metadata = securesystemslib.util.load_json_file(filepath)
ed25519_key, junk = \
securesystemslib.keys.format_metadata_to_key(ed25519_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ed25519_key_metadata'.
if ed25519_key['keytype'] != 'ed25519': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ed25519_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ed25519_key
|
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
|
entailment
|
def import_ed25519_privatekey_from_file(filepath, password=None, prompt=False):
"""
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
if password and prompt:
raise ValueError("Passing 'password' and 'prompt' True is not allowed.")
# If 'password' was passed check format and that it is not empty.
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# TODO: PASSWORD_SCHEMA should be securesystemslib.schema.AnyString(min=1)
if not len(password):
raise ValueError('Password must be 1 or more characters')
elif prompt:
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
# NOTE: A user who gets prompted for a password, can only signal that the
# key is not encrypted by entering no password in the prompt, as opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False)
# If user sets an empty string for the password, explicitly set the
# password to None, because some functions may expect this later.
if len(password) == 0: # pragma: no cover
password = None
# Finally, regardless of password, try decrypting the key, if necessary.
# Otherwise, load it straight from the disk.
with open(filepath, 'rb') as file_object:
json_str = file_object.read()
return securesystemslib.keys.\
import_ed25519key_from_private_json(json_str, password=password)
|
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
|
entailment
|
def generate_and_write_ecdsa_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an ECDSA keypair, where the encrypted key (using 'password' as the
passphrase) is saved to <'filepath'>. The public key portion of the
generated ECDSA key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The 'cryptography' library is currently supported. The private key is
encrypted according to 'cryptography's approach: "Encrypt using the best
available encryption for a given key's backend. This is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated ECDSA key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new ECDSA key object. The 'cryptography' library is currently
# supported and performs the actual cryptographic operations.
ecdsa_key = securesystemslib.keys.generate_ecdsa_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ecdsa_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ECDSA public key file contents in metadata format (i.e., does
# not include the keyid portion).
keytype = ecdsa_key['keytype']
keyval = ecdsa_key['keyval']
scheme = ecdsa_key['scheme']
ecdsakey_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ecdsakey_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Raise 'securesystemslib.exceptions.CryptoError' if 'ecdsa_key' cannot be
# encrypted.
encrypted_key = securesystemslib.keys.encrypt_key(ecdsa_key, password)
file_object.write(encrypted_key.encode('utf-8'))
file_object.move(filepath)
return filepath
|
<Purpose>
Generate an ECDSA keypair, where the encrypted key (using 'password' as the
passphrase) is saved to <'filepath'>. The public key portion of the
generated ECDSA key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The 'cryptography' library is currently supported. The private key is
encrypted according to 'cryptography's approach: "Encrypt using the best
available encryption for a given key's backend. This is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated ECDSA key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
|
entailment
|
def import_ecdsa_publickey_from_file(filepath):
"""
<Purpose>
Load the ECDSA public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ECDSAKEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ECDSA key object conformant to
'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ECDSA key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ECDSAKEY_SCHEMA' format that
# also includes the keyid.
ecdsa_key_metadata = securesystemslib.util.load_json_file(filepath)
ecdsa_key, junk = \
securesystemslib.keys.format_metadata_to_key(ecdsa_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ecdsa_key_metadata'.
if ecdsa_key['keytype'] != 'ecdsa-sha2-nistp256': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ecdsa_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ecdsa_key
|
<Purpose>
Load the ECDSA public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ECDSAKEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ECDSA key object conformant to
'securesystemslib.formats.ECDSAKEY_SCHEMA'.
|
entailment
|
def import_ecdsa_privatekey_from_file(filepath, password=None):
"""
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=False)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Store the encrypted contents of 'filepath' prior to calling the decryption
# routine.
encrypted_key = None
with open(filepath, 'rb') as file_object:
encrypted_key = file_object.read()
# Decrypt the loaded key file, calling the 'cryptography' library to generate
# the derived encryption key from 'password'. Raise
# 'securesystemslib.exceptions.CryptoError' if the decryption fails.
key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'),
password)
# Raise an exception if an unexpected key type is imported.
if key_object['keytype'] != 'ecdsa-sha2-nistp256':
message = 'Invalid key type loaded: ' + repr(key_object['keytype'])
raise securesystemslib.exceptions.FormatError(message)
# Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids
# can be associated using supported keyid_hash_algorithms.
key_object['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return key_object
|
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
|
entailment
|
def wait_until_element_has_focus(self, locator, timeout=None):
"""Waits until the element identified by `locator` has focus.
You might rather want to use `Element Focus Should Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for focus on '%s'" % (locator))
self._wait_until_no_error(timeout, self._check_element_focus_exp, True, locator, timeout)
|
Waits until the element identified by `locator` has focus.
You might rather want to use `Element Focus Should Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
|
entailment
|
def wait_until_element_does_not_have_focus(self, locator, timeout=None):
"""Waits until the element identified by `locator` doesn't have focus.
You might rather want to use `Element Focus Should Not Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting until '%s' does not have focus" % (locator))
self._wait_until_no_error(timeout, self._check_element_focus_exp, False, locator, timeout)
|
Waits until the element identified by `locator` doesn't have focus.
You might rather want to use `Element Focus Should Not Be Set`
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
|
entailment
|
def wait_until_element_value_is(self, locator, expected, strip=False, timeout=None):
"""Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to be '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, False, locator, expected, strip, timeout)
|
Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
|
entailment
|
def wait_until_element_value_contains(self, locator, expected, timeout=None):
"""Waits until the element identified by `locator` contains
the expected value. You might want to use `Element Value Should Contain` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to contain '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, True, locator, expected, False, timeout)
|
Waits until the element identified by `locator` contains
the expected value. You might want to use `Element Value Should Contain` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
|
entailment
|
def set_element_focus(self, locator):
"""Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Setting focus on element '%s'" % (locator))
element = self._element_find(locator, True, True)
element.send_keys(Keys.NULL)
self._wait_until_no_error(None, self._check_element_focus, True, locator)
|
Sets focus on the element identified by `locator`. Should
be used with elements meant to have focus only, such as
text fields. This keywords also waits for the focus to be
active by calling the `Wait Until Element Has Focus` keyword.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
|
entailment
|
def clear_input_field(self, locator, method=0):
"""Clears the text field identified by `locator`
The element.clear() method doesn't seem to work properly on
all browsers, so this keyword was created to offer alternatives.
The `method` argument defines the method it should use in order
to clear the target field.
0 = Uses the selenium method by doing element.clear \n
1 = Sets focus on the field and presses CTRL + A, and then DELETE \n
2 = Repeatedly presses BACKSPACE until the field is empty
This keyword, when using a method other than '2' does not validate it
successfully cleared the field, you should handle this verification by yourself.
When using the method '2', it presses delete until the field's value is empty.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| method | the clearing method that should be used | no example provided |"""
element = self._element_find(locator, True, True)
if (int(method) == 0):
self._info("Clearing input on element '%s'" % (locator))
element.clear()
elif (int(method) == 1):
self._info("Clearing input on element '%s' by pressing 'CTRL + A + DELETE'" % (locator))
element.send_keys(Keys.CONTROL + 'a')
element.send_keys(Keys.DELETE)
elif (int(method) == 2):
self._info("Clearing input on element '%s' by repeatedly pressing BACKSPACE" % (locator))
while (len(element.get_attribute('value')) != 0):
element.send_keys(Keys.BACKSPACE)
else: element.clear()
|
Clears the text field identified by `locator`
The element.clear() method doesn't seem to work properly on
all browsers, so this keyword was created to offer alternatives.
The `method` argument defines the method it should use in order
to clear the target field.
0 = Uses the selenium method by doing element.clear \n
1 = Sets focus on the field and presses CTRL + A, and then DELETE \n
2 = Repeatedly presses BACKSPACE until the field is empty
This keyword, when using a method other than '2' does not validate it
successfully cleared the field, you should handle this verification by yourself.
When using the method '2', it presses delete until the field's value is empty.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| method | the clearing method that should be used | no example provided |
|
entailment
|
def element_text_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
text color (it verifies the CSS attribute color). Color should be in
RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has text color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'color', expected)
|
Verifies the element identified by `locator` has the expected
text color (it verifies the CSS attribute color). Color should be in
RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |
|
entailment
|
def element_background_color_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has background color '%s'" % (locator, expected))
self._check_element_css_value(locator, 'background-color', expected)
|
Verifies the element identified by `locator` has the expected
background color (it verifies the CSS attribute background-color). Color should
be in RGBA format.
Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA)
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected color | rgba(0, 128, 0, 1) |
|
entailment
|
def element_width_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
width. Expected width should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected width | 800 |"""
self._info("Verifying element '%s' width is '%s'" % (locator, expected))
self._check_element_size(locator, 'width', expected)
|
Verifies the element identified by `locator` has the expected
width. Expected width should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected width | 800 |
|
entailment
|
def element_height_should_be(self, locator, expected):
"""Verifies the element identified by `locator` has the expected
height. Expected height should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected height | 600 |"""
self._info("Verifying element '%s' height is '%s'" % (locator, expected))
self._check_element_size(locator, 'height', expected)
|
Verifies the element identified by `locator` has the expected
height. Expected height should be in pixels.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected height | 600 |
|
entailment
|
def element_value_should_be(self, locator, expected, strip=False):
"""Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = element.get_attribute('value')
if (strip):
value = value.strip()
if str(value) == expected:
return
else:
raise AssertionError("Element '%s' value was not '%s', it was '%s'" % (locator, expected, value))
|
Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |
|
entailment
|
def element_value_should_not_be(self, locator, value, strip=False):
"""Verifies the element identified by `locator` is not the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not be | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is not '%s'" % (locator, value))
element = self._element_find(locator, True, True)
elem_value = str(element.get_attribute('value'))
if (strip):
elem_value = elem_value.strip()
if elem_value == value:
raise AssertionError("Value was '%s' for element '%s' while it shouldn't have" % (elem_value, locator))
|
Verifies the element identified by `locator` is not the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not be | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |
|
entailment
|
def element_value_should_contain(self, locator, expected):
"""Verifies the element identified by `locator` contains the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |"""
self._info("Verifying element '%s' value contains '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = str(element.get_attribute('value'))
if expected in value:
return
else:
raise AssertionError("Value '%s' did not appear in element '%s'. It's value was '%s'" % (expected, locator, value))
|
Verifies the element identified by `locator` contains the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | Slim Shady |
|
entailment
|
def element_value_should_not_contain(self, locator, value):
"""Verifies the element identified by `locator` does not contain the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not contain | Slim Shady |"""
self._info("Verifying element '%s' value does not contain '%s'" % (locator, value))
element = self._element_find(locator, True, True)
elem_value = str(element.get_attribute('value'))
if value in elem_value:
raise AssertionError("Value '%s' was found in element '%s' while it shouldn't have" % (value, locator))
|
Verifies the element identified by `locator` does not contain the specified value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| value | value it should not contain | Slim Shady |
|
entailment
|
def element_focus_should_be_set(self, locator):
"""Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is set" % locator)
self._check_element_focus(True, locator)
|
Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
|
entailment
|
def element_focus_should_not_be_set(self, locator):
"""Verifies the element identified by `locator` does not have focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is not set" % locator)
self._check_element_focus(False, locator)
|
Verifies the element identified by `locator` does not have focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
|
entailment
|
def element_css_attribute_should_be(self, locator, prop, expected):
"""Verifies the element identified by `locator` has the expected
value for the targeted `prop`.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| prop | targeted css attribute | background-color |
| expected | expected value | rgba(0, 128, 0, 1) |"""
self._info("Verifying element '%s' has css attribute '%s' with a value of '%s'" % (locator, prop, expected))
self._check_element_css_value(locator, prop, expected)
|
Verifies the element identified by `locator` has the expected
value for the targeted `prop`.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| prop | targeted css attribute | background-color |
| expected | expected value | rgba(0, 128, 0, 1) |
|
entailment
|
def wait_until_page_contains_elements(self, timeout, *locators):
"""This is a copy of `Wait Until Page Contains Element` but it allows
multiple arguments in order to wait for more than one element.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_elements, locators)
|
This is a copy of `Wait Until Page Contains Element` but it allows
multiple arguments in order to wait for more than one element.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
|
entailment
|
def wait_until_page_contains_one_of_these_elements(self, timeout, *locators):
"""Waits until at least one of the specified elements is found.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_at_least_one_element, locators)
|
Waits until at least one of the specified elements is found.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
|
entailment
|
def wait_until_page_does_not_contain_these_elements(self, timeout, *locators):
"""Waits until all of the specified elements are not found on the page.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |"""
self._wait_until_no_error(timeout, self._wait_for_elements_to_go_away, locators)
|
Waits until all of the specified elements are not found on the page.
| *Argument* | *Description* | *Example* |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
| *locators | Selenium 2 element locator(s) | id=MyId |
|
entailment
|
def tap_key(self, key, complementKey=None) :
"""Presses the specified `key`. The `complementKey` defines the key to hold
when pressing the specified `key`. For example, you could use ${VK_TAB} as `key` and
use ${VK_SHIFT} as `complementKey' in order to press Shift + Tab (back tab)
| =Argument= | =Description= | =Example= |
| key | the key to press | ${VK_F4} |
| complementKey | the key to hold while pressing the key passed in previous argument | ${VK_ALT} |"""
driver = self._current_browser()
if (complementKey is not None) :
ActionChains(driver).key_down(complementKey).send_keys(key).key_up(complementKey).perform()
else :
ActionChains(driver).send_keys(key).perform()
|
Presses the specified `key`. The `complementKey` defines the key to hold
when pressing the specified `key`. For example, you could use ${VK_TAB} as `key` and
use ${VK_SHIFT} as `complementKey' in order to press Shift + Tab (back tab)
| =Argument= | =Description= | =Example= |
| key | the key to press | ${VK_F4} |
| complementKey | the key to hold while pressing the key passed in previous argument | ${VK_ALT} |
|
entailment
|
def wait_until_element_is_clickable(self, locator, timeout=None):
"""Clicks the element specified by `locator` until the operation succeeds. This should be
used with buttons that are generated in real-time and that don't have their click handling available
immediately. This keyword avoids unclickable element exceptions.
| =Argument= | =Description= | =Example= |
| locator | Selenium 2 element locator(s) | id=MyId |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |"""
self._wait_until_no_error(timeout, self._wait_for_click_to_succeed, locator)
|
Clicks the element specified by `locator` until the operation succeeds. This should be
used with buttons that are generated in real-time and that don't have their click handling available
immediately. This keyword avoids unclickable element exceptions.
| =Argument= | =Description= | =Example= |
| locator | Selenium 2 element locator(s) | id=MyId |
| timeout | maximum time to wait, if set to ${None} it will use Selenium's default timeout | 5s |
|
entailment
|
def _visitor_impl(self, arg):
"""Actual visitor method implementation."""
if (_qualname(type(self)), type(arg)) in _methods:
method = _methods[(_qualname(type(self)), type(arg))]
return method(self, arg)
else:
# if no visitor method found for this arg type,
# search in parent arg type:
arg_parent_type = arg.__class__.__bases__[0]
while arg_parent_type != object:
if (_qualname(type(self)), arg_parent_type) in _methods:
method = _methods[(_qualname(type(self)), arg_parent_type)]
return method(self, arg)
else:
arg_parent_type = arg_parent_type.__bases__[0]
raise VisitorException('No visitor found for class ' + str(type(arg)))
|
Actual visitor method implementation.
|
entailment
|
def visitor(arg_type):
"""Decorator that creates a visitor method."""
def decorator(fn):
declaring_class = _declaring_class(fn)
_methods[(declaring_class, arg_type)] = fn
# Replace all decorated methods with _visitor_impl
return _visitor_impl
return decorator
|
Decorator that creates a visitor method.
|
entailment
|
def absolute(parser, token):
'''
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return AbsoluteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
)
|
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
|
entailment
|
def site(parser, token):
'''
Returns a full absolute URL based on the current site.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return SiteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
)
|
Returns a full absolute URL based on the current site.
This template tag takes exactly the same paramters as url template tag.
|
entailment
|
def _wait_until_exp(self, timeout, error, function, *args):
"""This replaces the method from Selenium2Library to fix the major logic error in it"""
error = error.replace('<TIMEOUT>', self._format_timeout(timeout))
def wait_func():
return None if function(*args) else error
self._wait_until_no_error_exp(timeout, wait_func)
|
This replaces the method from Selenium2Library to fix the major logic error in it
|
entailment
|
def _wait_until_no_error_exp(self, timeout, wait_func, *args):
"""This replaces the method from Selenium2Library to fix the major logic error in it"""
timeout = robot.utils.timestr_to_secs(timeout) if timeout is not None else self._timeout_in_secs
maxtime = time.time() + timeout
while True:
try:
timeout_error = wait_func(*args)
if not timeout_error: return
if time.time() > maxtime: raise AssertionError(timeout_error)
time.sleep(0.2)
except AssertionError:
raise
except:
if time.time() > maxtime: raise
continue
|
This replaces the method from Selenium2Library to fix the major logic error in it
|
entailment
|
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
'''
content = open(filename).read()
return re.sub(r'\.\.\s? code-block::\s*(\w|\+)+', '::', content)
|
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
|
entailment
|
def get_callable_method_dict(obj):
"""Returns a dictionary of callable methods of object `obj`.
@param obj: ZOS API Python COM object
@return: a dictionary of callable methods
Notes:
the function only returns the callable attributes that are listed by dir()
function. Properties are not returned.
"""
methodDict = {}
for methodStr in dir(obj):
method = getattr(obj, methodStr, 'none')
if callable(method) and not methodStr.startswith('_'):
methodDict[methodStr] = method
return methodDict
|
Returns a dictionary of callable methods of object `obj`.
@param obj: ZOS API Python COM object
@return: a dictionary of callable methods
Notes:
the function only returns the callable attributes that are listed by dir()
function. Properties are not returned.
|
entailment
|
def replicate_methods(srcObj, dstObj):
"""Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object).
@param srcObj: source object
@param dstObj: destination object of the same type.
@return : none
Implementer notes:
1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will
not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class.
2. An example of what a 'key' and 'value' look like:
key: MakeSequential
value: <bound method IOpticalSystem.MakeSequential of
<win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>>
"""
# prevent methods that we intend to specialize from being mapped. The specialized
# (overridden) methods are methods with the same name as the corresponding method in
# the source ZOS API COM object written for each ZOS API COM object in an associated
# python script such as i_analyses_methods.py for I_Analyses
overridden_methods = get_callable_method_dict(type(dstObj)).keys()
#overridden_attrs = [each for each in type(dstObj).__dict__.keys() if not each.startswith('_')]
#
def zos_wrapper_deco(func):
def wrapper(*args, **kwargs):
return wrapped_zos_object(func(*args, **kwargs))
varnames = func.im_func.func_code.co_varnames # alternative is to use inspect.getargspec
params = [par for par in varnames if par not in ('self', 'ret')] # removes 'self' and 'ret'
wrapper.__doc__ = func.im_func.func_name + '(' + ', '.join(params) + ')'
return wrapper
#
for key, value in get_callable_method_dict(srcObj).items():
if key not in overridden_methods:
setattr(dstObj, key, zos_wrapper_deco(value))
|
Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object).
@param srcObj: source object
@param dstObj: destination object of the same type.
@return : none
Implementer notes:
1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will
not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class.
2. An example of what a 'key' and 'value' look like:
key: MakeSequential
value: <bound method IOpticalSystem.MakeSequential of
<win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>>
|
entailment
|
def get_properties(zos_obj):
"""Returns a lists of properties bound to the object `zos_obj`
@param zos_obj: ZOS API Python COM object
@return prop_get: list of properties that are only getters
@return prop_set: list of properties that are both getters and setters
"""
prop_get = set(zos_obj._prop_map_get_.keys())
prop_set = set(zos_obj._prop_map_put_.keys())
if prop_set.issubset(prop_get):
prop_get = prop_get.difference(prop_set)
else:
msg = 'Assumption all getters are also setters is incorrect!'
raise NotImplementedError(msg)
return list(prop_get), list(prop_set)
|
Returns a lists of properties bound to the object `zos_obj`
@param zos_obj: ZOS API Python COM object
@return prop_get: list of properties that are only getters
@return prop_set: list of properties that are both getters and setters
|
entailment
|
def managed_wrapper_class_factory(zos_obj):
"""Creates and returns a wrapper class of a ZOS object, exposing the ZOS objects
methods and propertis, and patching custom specialized attributes
@param zos_obj: ZOS API Python COM object
"""
cls_name = repr(zos_obj).split()[0].split('.')[-1]
dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object
cdict = {} # class dictionary
# patch the properties of the base objects
base_cls_list = inheritance_dict.get(cls_name, None)
if base_cls_list:
for base_cls_name in base_cls_list:
getters, setters = get_properties(_CastTo(zos_obj, base_cls_name))
for each in getters:
exec("p{} = ZOSPropMapper('{}', '{}', cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict)
for each in setters:
exec("p{} = ZOSPropMapper('{}', '{}', setter=True, cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict)
# patch the property attributes of the given ZOS object
getters, setters = get_properties(zos_obj)
for each in getters:
exec("p{} = ZOSPropMapper('{}', '{}')".format(each, dispatch_attr, each), globals(), cdict)
for each in setters:
exec("p{} = ZOSPropMapper('{}', '{}', setter=True)".format(each, dispatch_attr, each), globals(), cdict)
def __init__(self, zos_obj):
# dispatcher attribute
cls_name = repr(zos_obj).split()[0].split('.')[-1]
dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object
self.__dict__[dispatch_attr] = zos_obj
self._dispatch_attr_value = dispatch_attr # used in __getattr__
# Store base class object
self._base_cls_list = inheritance_dict.get(cls_name, None)
# patch the methods of the base class(s) of the given ZOS object
if self._base_cls_list:
for base_cls_name in self._base_cls_list:
replicate_methods(_CastTo(zos_obj, base_cls_name), self)
# patch the methods of given ZOS object
replicate_methods(zos_obj, self)
# mark object as wrapped to prevent it from being wrapped subsequently
self._wrapped = True
# Provide a way to make property calls without the prefix p
def __getattr__(self, attrname):
return wrapped_zos_object(getattr(self.__dict__[self._dispatch_attr_value], attrname))
def __repr__(self):
if type(self).__name__ == 'IZOSAPI_Application':
repr_str = "{.__name__}(NumberOfOpticalSystems = {})".format(type(self), self.pNumberOfOpticalSystems)
else:
repr_str = "{.__name__}".format(type(self))
return repr_str
cdict['__init__'] = __init__
cdict['__getattr__'] = __getattr__
cdict['__repr__'] = __repr__
# patch custom methods from python files imported as modules
module_import_str = """
try:
from pyzos.zos_obj_override.{module:} import *
except ImportError:
pass
""".format(module=cls_name.lower() + '_methods')
exec(module_import_str, globals(), cdict)
_ = cdict.pop('print_function', None)
_ = cdict.pop('division', None)
return type(cls_name, (), cdict)
|
Creates and returns a wrapper class of a ZOS object, exposing the ZOS objects
methods and propertis, and patching custom specialized attributes
@param zos_obj: ZOS API Python COM object
|
entailment
|
def wrapped_zos_object(zos_obj):
"""Helper function to wrap ZOS API COM objects.
@param zos_obj : ZOS API Python COM object
@return: instance of the wrapped ZOS API class. If the input object is not a ZOS-API
COM object or if it is already wrapped, then the object is returned without
wrapping.
Notes:
The function dynamically creates a wrapped class with all the provided methods,
properties, and custom methods monkey patched; and returns an instance of it.
"""
if hasattr(zos_obj, '_wrapped') or ('CLSID' not in dir(zos_obj)):
return zos_obj
else:
Class = managed_wrapper_class_factory(zos_obj)
return Class(zos_obj)
|
Helper function to wrap ZOS API COM objects.
@param zos_obj : ZOS API Python COM object
@return: instance of the wrapped ZOS API class. If the input object is not a ZOS-API
COM object or if it is already wrapped, then the object is returned without
wrapping.
Notes:
The function dynamically creates a wrapped class with all the provided methods,
properties, and custom methods monkey patched; and returns an instance of it.
|
entailment
|
def generate_public_and_private(scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
"""
# Does 'scheme' have the correct format?
# Verify that 'scheme' is of the correct type, and that it's one of the
# supported ECDSA . It must conform to
# 'securesystemslib.formats.ECDSA_SCHEME_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
public_key = None
private_key = None
# An if-clause is strictly not needed, since 'ecdsa_sha2-nistp256' is the
# only currently supported ECDSA signature scheme. Nevertheness, include the
# conditional statement to accomodate any schemes that might be added.
if scheme == 'ecdsa-sha2-nistp256':
private_key = ec.generate_private_key(ec.SECP256R1, default_backend())
public_key = private_key.public_key()
# The ECDSA_SCHEME_SCHEMA.check_match() above should have detected any
# invalid 'scheme'. This is a defensive check.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('An unsupported'
' scheme specified: ' + repr(scheme) + '.\n Supported'
' algorithms: ' + repr(_SUPPORTED_ECDSA_SCHEMES))
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode('utf-8'), private_pem.decode('utf-8')
|
<Purpose>
Generate a pair of ECDSA public and private keys with one of the supported,
external cryptography libraries. The public and private keys returned
conform to 'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
The public ECDSA public key has the PEM format:
TODO: should we encrypt the private keys returned here? Should the
create_signature() accept encrypted keys?
'-----BEGIN PUBLIC KEY-----
...
'-----END PUBLIC KEY-----'
The private ECDSA private key has the PEM format:
'-----BEGIN EC PRIVATE KEY-----
...
-----END EC PRIVATE KEY-----'
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
scheme:
A string indicating which algorithm to use for the generation of the
public and private ECDSA keys. 'ecdsa-sha2-nistp256' is the only
currently supported ECDSA algorithm, which is supported by OpenSSH and
specified in RFC 5656 (https://tools.ietf.org/html/rfc5656).
<Exceptions>
securesystemslib.exceptions.FormatError, if 'algorithm' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is an
unsupported algorithm.
<Side Effects>
None.
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.PEMECDSA_SCHEMA' and
'securesystemslib.formats.PEMECDSA_SCHEMA', respectively.
|
entailment
|
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
"""
# Do 'public_key' and 'private_key' have the correct format?
# This check will ensure that the arguments conform to
# 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this
# if-clause isn't strictly needed. Nevertheless, the conditional statement
# is included to accommodate multiple schemes that can potentially be added
# in the future.
if scheme == 'ecdsa-sha2-nistp256':
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))
except TypeError as e:
raise securesystemslib.exceptions.CryptoError('Could not create'
' signature: ' + str(e))
# A defensive check for an invalid 'scheme'. The
# ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme
|
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
|
entailment
|
def verify_signature(public_key, scheme, signature, data):
"""
<Purpose>
Verify that 'signature' was produced by the private key associated with
'public_key'.
>>> scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data)
True
>>> verify_signature(public, scheme, signature, b'bad data')
False
<Arguments>
public_key:
The ECDSA public key in PEM format. The public key is needed to verify
'signature'.
scheme:
The signature scheme used to generate 'signature'. For example:
'ecdsa-sha2-nistp256'.
signature:
The signature to be verified, which should have been generated by
the private key associated with 'public_key'. 'data'.
data:
Byte data that was used by create_signature() to generate 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is
not one of the supported signature schemes.
<Side Effects>
None.
<Returns>
Boolean, indicating whether the 'signature' of data was generated by
the private key associated with 'public_key'.
"""
# Are the arguments properly formatted?
# If not, raise 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
securesystemslib.formats.ECDSASIGNATURE_SCHEMA.check_match(signature)
ecdsa_key = load_pem_public_key(public_key.encode('utf-8'),
backend=default_backend())
if not isinstance(ecdsa_key, ec.EllipticCurvePublicKey):
raise securesystemslib.exceptions.FormatError('Invalid ECDSA public'
' key: ' + repr(public_key))
else:
logger.debug('Loaded a valid ECDSA public key.')
# verify() raises an 'InvalidSignature' exception if 'signature'
# is invalid.
try:
ecdsa_key.verify(signature, data, ec.ECDSA(hashes.SHA256()))
return True
except (TypeError, cryptography.exceptions.InvalidSignature):
return False
|
<Purpose>
Verify that 'signature' was produced by the private key associated with
'public_key'.
>>> scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data)
True
>>> verify_signature(public, scheme, signature, b'bad data')
False
<Arguments>
public_key:
The ECDSA public key in PEM format. The public key is needed to verify
'signature'.
scheme:
The signature scheme used to generate 'signature'. For example:
'ecdsa-sha2-nistp256'.
signature:
The signature to be verified, which should have been generated by
the private key associated with 'public_key'. 'data'.
data:
Byte data that was used by create_signature() to generate 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is
not one of the supported signature schemes.
<Side Effects>
None.
<Returns>
Boolean, indicating whether the 'signature' of data was generated by
the private key associated with 'public_key'.
|
entailment
|
def create_ecdsa_public_and_private_from_pem(pem, password=None):
"""
<Purpose>
Create public and private ECDSA keys from a private 'pem'. The public and
private keys are strings in PEM format:
public: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
private: '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
>>> junk, private = generate_public_and_private()
>>> public, private = create_ecdsa_public_and_private_from_pem(private)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> public, private = create_ecdsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the ECDSA key
pair could not be extracted, possibly due to an unsupported algorithm.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'pem' have the correct format?
# This check will ensure 'pem' conforms to
# 'securesystemslib.formats.ECDSARSA_SCHEMA'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem)
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
password = password.encode('utf-8')
else:
logger.debug('The password/passphrase is unset. The PEM is expected'
' to be unencrypted.')
public = None
private = None
# Generate the public and private ECDSA keys. The pyca/cryptography library
# performs the actual import operation.
try:
private = load_pem_private_key(pem.encode('utf-8'), password=password,
backend=default_backend())
except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e:
raise securesystemslib.exceptions.CryptoError('Could not import private'
' PEM.\n' + str(e))
public = private.public_key()
# Serialize public and private keys to PEM format.
private = private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public = public.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public.decode('utf-8'), private.decode('utf-8')
|
<Purpose>
Create public and private ECDSA keys from a private 'pem'. The public and
private keys are strings in PEM format:
public: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
private: '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
>>> junk, private = generate_public_and_private()
>>> public, private = create_ecdsa_public_and_private_from_pem(private)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> public, private = create_ecdsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the ECDSA key
pair could not be extracted, possibly due to an unsupported algorithm.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
|
entailment
|
def create_ecdsa_encrypted_pem(private_pem, passphrase):
"""
<Purpose>
Return a string in PEM format, where the private part of the ECDSA key is
encrypted. The private part of the ECDSA key is encrypted as done by
pyca/cryptography: "Encrypt using the best available encryption for a given
key's backend. This is a curated encryption choice and the algorithm may
change over time."
>>> junk, private = generate_public_and_private()
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_pem:
The private ECDSA key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the ECDSA
key. 'passphrase' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if an ECDSA key in encrypted PEM
format cannot be created.
<Side Effects>
None.
<Returns>
A string in PEM format, where the private RSA portion is encrypted.
Conforms to 'securesystemslib.formats.PEMECDSA_SCHEMA'.
"""
# Does 'private_key' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(private_pem)
# Does 'passphrase' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(passphrase)
encrypted_pem = None
private = load_pem_private_key(private_pem.encode('utf-8'), password=None,
backend=default_backend())
encrypted_private_pem = \
private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase.encode('utf-8')))
return encrypted_private_pem
|
<Purpose>
Return a string in PEM format, where the private part of the ECDSA key is
encrypted. The private part of the ECDSA key is encrypted as done by
pyca/cryptography: "Encrypt using the best available encryption for a given
key's backend. This is a curated encryption choice and the algorithm may
change over time."
>>> junk, private = generate_public_and_private()
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_pem:
The private ECDSA key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the ECDSA
key. 'passphrase' is not used directly as the encryption key, a stronger
encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if an ECDSA key in encrypted PEM
format cannot be created.
<Side Effects>
None.
<Returns>
A string in PEM format, where the private RSA portion is encrypted.
Conforms to 'securesystemslib.formats.PEMECDSA_SCHEMA'.
|
entailment
|
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
|
Clean any processing data, and prepare object for reuse
|
entailment
|
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
|
Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
|
entailment
|
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
|
Set current parsing state to 'table',
create new table object and add it to tables collection
|
entailment
|
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
|
Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
|
entailment
|
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
|
Transform a square matrix into a format with two independent variables and one dependent variable.
|
entailment
|
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
|
Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
|
entailment
|
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
|
Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
|
entailment
|
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
|
Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
|
entailment
|
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
|
Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
|
entailment
|
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
|
Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
|
entailment
|
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
|
Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
|
entailment
|
def error_value_processor(value, error):
"""
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
"""
if isinstance(error, (str, unicode)):
try:
if "%" in error:
error_float = float(error.replace("%", ""))
error_abs = (value/100) * error_float
return error_abs
elif error == "":
error = 0.0
else:
error = float(error)
except:
pass
return error
|
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
|
entailment
|
def get_file_details(filepath, hash_algorithms=['sha256']):
"""
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
# The returned file hashes of 'filepath'.
file_hashes = {}
# Does the path exists?
if not os.path.exists(filepath):
raise securesystemslib.exceptions.Error('Path ' + repr(filepath) + ' doest'
' not exist.')
filepath = os.path.abspath(filepath)
# Obtaining length of the file.
file_length = os.path.getsize(filepath)
# Obtaining hash of the file.
for algorithm in hash_algorithms:
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
# Performing a format check to ensure 'file_hash' corresponds HASHDICT_SCHEMA.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_length, file_hashes
|
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
|
entailment
|
def ensure_parent_dir(filename):
"""
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
"""
# Ensure 'filename' corresponds to 'PATH_SCHEMA'.
# Raise 'securesystemslib.exceptions.FormatError' on a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filename)
# Split 'filename' into head and tail, check if head exists.
directory = os.path.split(filename)[0]
if directory and not os.path.exists(directory):
# mode = 'rwx------'. 448 (decimal) is 700 in octal.
os.makedirs(directory, 448)
|
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
|
entailment
|
def file_in_confined_directories(filepath, confined_directories):
"""
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A list, or a tuple, of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RELPATH_SCHEMA.check_match(filepath)
securesystemslib.formats.RELPATHS_SCHEMA.check_match(confined_directories)
for confined_directory in confined_directories:
# The empty string (arbitrarily chosen) signifies the client is confined
# to all directories and subdirectories. No need to check 'filepath'.
if confined_directory == '':
return True
# Normalized paths needed, to account for up-level references, etc.
# TUF clients have the option of setting the list of directories in
# 'confined_directories'.
filepath = os.path.normpath(filepath)
confined_directory = os.path.normpath(confined_directory)
# A TUF client may restrict himself to specific directories on the
# remote repository. The list of paths in 'confined_path', not including
# each path's subdirectories, are the only directories the client will
# download targets from.
if os.path.dirname(filepath) == confined_directory:
return True
return False
|
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A list, or a tuple, of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
|
entailment
|
def find_delegated_role(roles, delegated_role):
"""
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLELIST_SCHEMA.check_match(roles)
securesystemslib.formats.ROLENAME_SCHEMA.check_match(delegated_role)
# The index of a role, if any, with the same name.
role_index = None
for index in six.moves.xrange(len(roles)):
role = roles[index]
name = role.get('name')
# This role has no name.
if name is None:
no_name_message = 'Role with no name.'
raise securesystemslib.exceptions.RepositoryError(no_name_message)
# Does this role have the same name?
else:
# This role has the same name, and...
if name == delegated_role:
# ...it is the only known role with the same name.
if role_index is None:
role_index = index
# ...there are at least two roles with the same name.
else:
duplicate_role_message = 'Duplicate role (' + str(delegated_role) + ').'
raise securesystemslib.exceptions.RepositoryError(
'Duplicate role (' + str(delegated_role) + ').')
# This role has a different name.
else:
logger.debug('Skipping delegated role: ' + repr(delegated_role))
return role_index
|
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
|
entailment
|
def ensure_all_targets_allowed(rolename, list_of_targets, parent_delegations):
"""
<Purpose>
Ensure that the list of targets specified by 'rolename' are allowed; this
is determined by inspecting the 'delegations' field of the parent role of
'rolename'. If a target specified by 'rolename' is not found in the
delegations field of 'metadata_object_of_parent', raise an exception. The
top-level role 'targets' is allowed to list any target file, so this
function does not raise an exception if 'rolename' is 'targets'.
Targets allowed are either exlicitly listed under the 'paths' field, or
implicitly exist under a subdirectory of a parent directory listed under
'paths'. A parent role may delegate trust to all files under a particular
directory, including files in subdirectories, by simply listing the
directory (e.g., '/packages/source/Django/', the equivalent of
'/packages/source/Django/*'). Targets listed in hashed bins are also
validated (i.e., its calculated path hash prefix must be delegated by the
parent role).
TODO: Should the TUF spec restrict the repository to one particular
algorithm when calcutating path hash prefixes (currently restricted to
SHA256)? Should we allow the repository to specify in the role dictionary
the algorithm used for these generated hashed paths?
<Arguments>
rolename:
The name of the role whose targets must be verified. This is a
role name and should not end in '.json'. Examples: 'root', 'targets',
'targets/linux/x86'.
list_of_targets:
The targets of 'rolename', as listed in targets field of the 'rolename'
metadata. 'list_of_targets' are target paths relative to the targets
directory of the repository. The delegations of the parent role are
checked to verify that the targets of 'list_of_targets' are valid.
parent_delegations:
The parent delegations of 'rolename'. The metadata object stores
the allowed paths and path hash prefixes of child delegations in its
'delegations' attribute.
<Exceptions>
securesystemslib.exceptions.FormatError:
If any of the arguments are improperly formatted.
securesystemslib.exceptions.ForbiddenTargetError:
If the targets of 'metadata_role' are not allowed according to
the parent's metadata file. The 'paths' and 'path_hash_prefixes'
attributes are verified.
securesystemslib.exceptions.RepositoryError:
If the parent of 'rolename' has not made a delegation to 'rolename'.
<Side Effects>
None.
<Returns>
None.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLENAME_SCHEMA.check_match(rolename)
securesystemslib.formats.RELPATHS_SCHEMA.check_match(list_of_targets)
securesystemslib.formats.DELEGATIONS_SCHEMA.check_match(parent_delegations)
# Return if 'rolename' is 'targets'. 'targets' is not a delegated role. Any
# target file listed in 'targets' is allowed.
if rolename == 'targets':
return
# The allowed targets of delegated roles are stored in the parent's metadata
# file. Iterate 'list_of_targets' and confirm they are trusted, or their
# root parent directory exists in the role delegated paths, or path hash
# prefixes, of the parent role. First, locate 'rolename' in the 'roles'
# attribute of 'parent_delegations'.
roles = parent_delegations['roles']
role_index = find_delegated_role(roles, rolename)
# Ensure the delegated role exists prior to extracting trusted paths from
# the parent's 'paths', or trusted path hash prefixes from the parent's
# 'path_hash_prefixes'.
if role_index is not None:
role = roles[role_index]
allowed_child_paths = role.get('paths')
allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')
actual_child_targets = list_of_targets
if allowed_child_path_hash_prefixes is not None:
consistent = paths_are_consistent_with_hash_prefixes
# 'actual_child_tarets' (i.e., 'list_of_targets') should have lenth
# greater than zero due to the format check above.
if not consistent(actual_child_targets,
allowed_child_path_hash_prefixes):
message = repr(rolename) + ' specifies a target that does not' + \
' have a path hash prefix listed in its parent role.'
raise securesystemslib.exceptions.ForbiddenTargetError(message)
elif allowed_child_paths is not None:
# Check that each delegated target is either explicitly listed or a
# parent directory is found under role['paths'], otherwise raise an
# exception. If the parent role explicitly lists target file paths in
# 'paths', this loop will run in O(n^2), the worst-case. The repository
# maintainer will likely delegate entire directories, and opt for
# explicit file paths if the targets in a directory are delegated to
# different roles/developers.
for child_target in actual_child_targets:
for allowed_child_path in allowed_child_paths:
if fnmatch.fnmatch(child_target, allowed_child_path):
break
else:
raise securesystemslib.exceptions.ForbiddenTargetError(
'Role ' + repr(rolename) + ' specifies'
' target' + repr(child_target) + ',' + ' which is not an allowed'
' path according to the delegations set by its parent role.')
else:
# 'role' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' attributes should not be missing,
# so raise an error in case this clause is reached.
raise securesystemslib.exceptions.FormatError(repr(role) + ' did not'
' contain one of the required fields ("paths" or'
' "path_hash_prefixes").')
# Raise an exception if the parent has not delegated to the specified
# 'rolename' child role.
else:
raise securesystemslib.exceptions.RepositoryError('The parent role has'
' not delegated to ' + repr(rolename) + '.')
|
<Purpose>
Ensure that the list of targets specified by 'rolename' are allowed; this
is determined by inspecting the 'delegations' field of the parent role of
'rolename'. If a target specified by 'rolename' is not found in the
delegations field of 'metadata_object_of_parent', raise an exception. The
top-level role 'targets' is allowed to list any target file, so this
function does not raise an exception if 'rolename' is 'targets'.
Targets allowed are either exlicitly listed under the 'paths' field, or
implicitly exist under a subdirectory of a parent directory listed under
'paths'. A parent role may delegate trust to all files under a particular
directory, including files in subdirectories, by simply listing the
directory (e.g., '/packages/source/Django/', the equivalent of
'/packages/source/Django/*'). Targets listed in hashed bins are also
validated (i.e., its calculated path hash prefix must be delegated by the
parent role).
TODO: Should the TUF spec restrict the repository to one particular
algorithm when calcutating path hash prefixes (currently restricted to
SHA256)? Should we allow the repository to specify in the role dictionary
the algorithm used for these generated hashed paths?
<Arguments>
rolename:
The name of the role whose targets must be verified. This is a
role name and should not end in '.json'. Examples: 'root', 'targets',
'targets/linux/x86'.
list_of_targets:
The targets of 'rolename', as listed in targets field of the 'rolename'
metadata. 'list_of_targets' are target paths relative to the targets
directory of the repository. The delegations of the parent role are
checked to verify that the targets of 'list_of_targets' are valid.
parent_delegations:
The parent delegations of 'rolename'. The metadata object stores
the allowed paths and path hash prefixes of child delegations in its
'delegations' attribute.
<Exceptions>
securesystemslib.exceptions.FormatError:
If any of the arguments are improperly formatted.
securesystemslib.exceptions.ForbiddenTargetError:
If the targets of 'metadata_role' are not allowed according to
the parent's metadata file. The 'paths' and 'path_hash_prefixes'
attributes are verified.
securesystemslib.exceptions.RepositoryError:
If the parent of 'rolename' has not made a delegation to 'rolename'.
<Side Effects>
None.
<Returns>
None.
|
entailment
|
def paths_are_consistent_with_hash_prefixes(paths, path_hash_prefixes):
"""
<Purpose>
Determine whether a list of paths are consistent with their alleged path
hash prefixes. By default, the SHA256 hash function is used.
<Arguments>
paths:
A list of paths for which their hashes will be checked.
path_hash_prefixes:
The list of path hash prefixes with which to check the list of paths.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
No known side effects.
<Returns>
A Boolean indicating whether or not the paths are consistent with the
hash prefix.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.RELPATHS_SCHEMA.check_match(paths)
securesystemslib.formats.PATH_HASH_PREFIXES_SCHEMA.check_match(path_hash_prefixes)
# Assume that 'paths' and 'path_hash_prefixes' are inconsistent until
# proven otherwise.
consistent = False
# The format checks above ensure the 'paths' and 'path_hash_prefix' lists
# have lengths greater than zero.
for path in paths:
path_hash = get_target_hash(path)
# Assume that every path is inconsistent until proven otherwise.
consistent = False
for path_hash_prefix in path_hash_prefixes:
if path_hash.startswith(path_hash_prefix):
consistent = True
break
# This path has no matching path_hash_prefix. Stop looking further.
if not consistent:
break
return consistent
|
<Purpose>
Determine whether a list of paths are consistent with their alleged path
hash prefixes. By default, the SHA256 hash function is used.
<Arguments>
paths:
A list of paths for which their hashes will be checked.
path_hash_prefixes:
The list of path hash prefixes with which to check the list of paths.
<Exceptions>
securesystemslib.exceptions.FormatError:
If the arguments are improperly formatted.
<Side Effects>
No known side effects.
<Returns>
A Boolean indicating whether or not the paths are consistent with the
hash prefix.
|
entailment
|
def get_target_hash(target_filepath):
"""
<Purpose>
Compute the hash of 'target_filepath'. This is useful in conjunction with
the "path_hash_prefixes" attribute in a delegated targets role, which tells
us which paths it is implicitly responsible for.
The repository may optionally organize targets into hashed bins to ease
target delegations and role metadata management. The use of consistent
hashing allows for a uniform distribution of targets into bins.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
"""
# Does 'target_filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RELPATH_SCHEMA.check_match(target_filepath)
# Calculate the hash of the filepath to determine which bin to find the
# target. The client currently assumes the repository uses
# 'HASH_FUNCTION' to generate hashes and 'utf-8'.
digest_object = securesystemslib.hash.digest(HASH_FUNCTION)
encoded_target_filepath = target_filepath.encode('utf-8')
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
|
<Purpose>
Compute the hash of 'target_filepath'. This is useful in conjunction with
the "path_hash_prefixes" attribute in a delegated targets role, which tells
us which paths it is implicitly responsible for.
The repository may optionally organize targets into hashed bins to ease
target delegations and role metadata management. The use of consistent
hashing allows for a uniform distribution of targets into bins.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
|
entailment
|
def import_json():
"""
<Purpose>
Tries to import json module. We used to fall back to the simplejson module,
but we have dropped support for that module. We are keeping this interface
intact for backwards compatibility.
<Arguments>
None.
<Exceptions>
ImportError: on failure to import the json module.
<Side Effects>
None.
<Return>
json module
"""
global _json_module
if _json_module is not None:
return _json_module
else:
try:
module = __import__('json')
# The 'json' module is available in Python > 2.6, and thus this exception
# should not occur in all supported Python installations (> 2.6) of TUF.
except ImportError: #pragma: no cover
raise ImportError('Could not import the json module')
else:
_json_module = module
return module
|
<Purpose>
Tries to import json module. We used to fall back to the simplejson module,
but we have dropped support for that module. We are keeping this interface
intact for backwards compatibility.
<Arguments>
None.
<Exceptions>
ImportError: on failure to import the json module.
<Side Effects>
None.
<Return>
json module
|
entailment
|
def load_json_string(data):
"""
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
"""
deserialized_object = None
try:
deserialized_object = json.loads(data)
except TypeError:
message = 'Invalid JSON string: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
except ValueError:
message = 'Cannot deserialize to a Python object: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
else:
return deserialized_object
|
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
|
entailment
|
def load_json_file(filepath):
"""
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
"""
# Making sure that the format of 'filepath' is a path string.
# securesystemslib.exceptions.FormatError is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
deserialized_object = None
# The file is mostly likely gzipped.
if filepath.endswith('.gz'):
logger.debug('gzip.open(' + str(filepath) + ')')
fileobject = six.StringIO(gzip.open(filepath).read().decode('utf-8'))
else:
logger.debug('open(' + str(filepath) + ')')
fileobject = open(filepath)
try:
deserialized_object = json.load(fileobject)
except (ValueError, TypeError) as e:
raise securesystemslib.exceptions.Error('Cannot deserialize to a'
' Python object: ' + repr(filepath))
else:
fileobject.close()
return deserialized_object
finally:
fileobject.close()
|
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
|
entailment
|
def digests_are_equal(digest1, digest2):
"""
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
"""
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HEX_SCHEMA.check_match(digest1)
securesystemslib.formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal
|
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
|
entailment
|
def _default_temporary_directory(self, prefix):
"""__init__ helper."""
try:
self.temporary_file = tempfile.NamedTemporaryFile(prefix=prefix)
except OSError as err: # pragma: no cover
logger.critical('Cannot create a system temporary directory: '+repr(err))
raise securesystemslib.exceptions.Error(err)
|
__init__ helper.
|
entailment
|
def read(self, size=None):
"""
<Purpose>
Read specified number of bytes. If size is not specified then the whole
file is read and the file pointer is placed at the beginning of the file.
<Arguments>
size:
Number of bytes to be read.
<Exceptions>
securesystemslib.exceptions.FormatError: if 'size' is invalid.
<Return>
String of data.
"""
if size is None:
self.temporary_file.seek(0)
data = self.temporary_file.read()
self.temporary_file.seek(0)
return data
else:
if not (isinstance(size, int) and size > 0):
raise securesystemslib.exceptions.FormatError
return self.temporary_file.read(size)
|
<Purpose>
Read specified number of bytes. If size is not specified then the whole
file is read and the file pointer is placed at the beginning of the file.
<Arguments>
size:
Number of bytes to be read.
<Exceptions>
securesystemslib.exceptions.FormatError: if 'size' is invalid.
<Return>
String of data.
|
entailment
|
def write(self, data, auto_flush=True):
"""
<Purpose>
Writes a data string to the file.
<Arguments>
data:
A string containing some data.
auto_flush:
Boolean argument, if set to 'True', all data will be flushed from
internal buffer.
<Exceptions>
None.
<Return>
None.
"""
self.temporary_file.write(data)
if auto_flush:
self.flush()
|
<Purpose>
Writes a data string to the file.
<Arguments>
data:
A string containing some data.
auto_flush:
Boolean argument, if set to 'True', all data will be flushed from
internal buffer.
<Exceptions>
None.
<Return>
None.
|
entailment
|
def move(self, destination_path):
"""
<Purpose>
Copies 'self.temporary_file' to a non-temp file at 'destination_path' and
closes 'self.temporary_file' so that it is removed.
<Arguments>
destination_path:
Path to store the file in.
<Exceptions>
None.
<Return>
None.
"""
self.flush()
self.seek(0)
destination_file = open(destination_path, 'wb')
shutil.copyfileobj(self.temporary_file, destination_file)
# Force the destination file to be written to disk from Python's internal
# and the operation system's buffers. os.fsync() should follow flush().
destination_file.flush()
os.fsync(destination_file.fileno())
destination_file.close()
# 'self.close()' closes temporary file which destroys itself.
self.close_temp_file()
|
<Purpose>
Copies 'self.temporary_file' to a non-temp file at 'destination_path' and
closes 'self.temporary_file' so that it is removed.
<Arguments>
destination_path:
Path to store the file in.
<Exceptions>
None.
<Return>
None.
|
entailment
|
def decompress_temp_file_object(self, compression):
"""
<Purpose>
To decompress a compressed temp file object. Decompression is performed
on a temp file object that is compressed, this occurs after downloading
a compressed file. For instance if a compressed version of some meta
file in the repository is downloaded, the temp file containing the
compressed meta file will be decompressed using this function.
Note that after calling this method, write() can no longer be called.
meta.json.gz
|...[download]
temporary_file (containing meta.json.gz)
/ \
temporary_file _orig_file
containing meta.json containing meta.json.gz
(decompressed data)
<Arguments>
compression:
A string indicating the type of compression that was used to compress
a file. Only gzip is allowed.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'compression' is improperly formatted.
securesystemslib.exceptions.Error: If an invalid compression is given.
securesystemslib.exceptions.DecompressionError: If the compression failed for any reason.
<Side Effects>
'self._orig_file' is used to store the original data of 'temporary_file'.
<Return>
None.
"""
# Does 'compression' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.NAME_SCHEMA.check_match(compression)
if self._orig_file is not None:
raise securesystemslib.exceptions.Error('Can only set compression on a'
' TempFile once.')
if compression != 'gzip':
raise securesystemslib.exceptions.Error('Only gzip compression is'
' supported.')
self.seek(0)
self._compression = compression
self._orig_file = self.temporary_file
try:
gzip_file_object = gzip.GzipFile(fileobj=self.temporary_file, mode='rb')
uncompressed_content = gzip_file_object.read()
self.temporary_file = tempfile.NamedTemporaryFile()
self.temporary_file.write(uncompressed_content)
self.flush()
except Exception as exception:
raise securesystemslib.exceptions.DecompressionError(exception)
|
<Purpose>
To decompress a compressed temp file object. Decompression is performed
on a temp file object that is compressed, this occurs after downloading
a compressed file. For instance if a compressed version of some meta
file in the repository is downloaded, the temp file containing the
compressed meta file will be decompressed using this function.
Note that after calling this method, write() can no longer be called.
meta.json.gz
|...[download]
temporary_file (containing meta.json.gz)
/ \
temporary_file _orig_file
containing meta.json containing meta.json.gz
(decompressed data)
<Arguments>
compression:
A string indicating the type of compression that was used to compress
a file. Only gzip is allowed.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'compression' is improperly formatted.
securesystemslib.exceptions.Error: If an invalid compression is given.
securesystemslib.exceptions.DecompressionError: If the compression failed for any reason.
<Side Effects>
'self._orig_file' is used to store the original data of 'temporary_file'.
<Return>
None.
|
entailment
|
def close_temp_file(self):
"""
<Purpose>
Closes the temporary file object. 'close_temp_file' mimics usual
file.close(), however temporary file destroys itself when
'close_temp_file' is called. Further if compression is set, second
temporary file instance 'self._orig_file' is also closed so that no open
temporary files are left open.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Closes 'self._orig_file'.
<Return>
None.
"""
self.temporary_file.close()
# If compression has been set, we need to explicitly close the original
# file object.
if self._orig_file is not None:
self._orig_file.close()
|
<Purpose>
Closes the temporary file object. 'close_temp_file' mimics usual
file.close(), however temporary file destroys itself when
'close_temp_file' is called. Further if compression is set, second
temporary file instance 'self._orig_file' is also closed so that no open
temporary files are left open.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Closes 'self._orig_file'.
<Return>
None.
|
entailment
|
def matches(self, object):
"""
<Purpose>
Return True if 'object' matches this schema, False if it doesn't.
If the caller wishes to signal an error on a failed match, check_match()
should be called, which will raise a 'exceptions.FormatError' exception.
"""
try:
self.check_match(object)
except securesystemslib.exceptions.FormatError:
return False
else:
return True
|
<Purpose>
Return True if 'object' matches this schema, False if it doesn't.
If the caller wishes to signal an error on a failed match, check_match()
should be called, which will raise a 'exceptions.FormatError' exception.
|
entailment
|
def get_event(self, *etypes, timeout=None):
"""
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
"""
self._validate_etypes(*etypes)
start = time.time()
e = self._eventq.get(timeout=timeout)
if isinstance(e, Exception):
raise e
self._stats['events_recieved'] += 1
if etypes and e.type not in etypes:
if timeout:
timeout -= time.time() - start
log.debug('ignoring filtered event: {}'.format(e.json))
self._stats['events_dropped'] += 1
return self.get_event(*etypes, timeout=timeout)
return e
|
Return a single event object or block until an event is
received and return it.
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- timeout(int): Max time, in seconds, to block waiting for new event
|
entailment
|
def events(self, *etypes, idle_timeout=None):
"""
returns a blocking generator yielding Slack event objects
params:
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- idle_timeout(int): optional maximum amount of time (in seconds)
to wait between events before returning
"""
while self._state != STATE_STOPPED:
try:
yield self.get_event(*etypes, timeout=idle_timeout)
except Queue.Empty:
log.info('idle timeout reached for events()')
return
|
returns a blocking generator yielding Slack event objects
params:
- etypes(str): If defined, Slack event type(s) not matching
the filter will be ignored. See https://api.slack.com/events for
a listing of valid event types.
- idle_timeout(int): optional maximum amount of time (in seconds)
to wait between events before returning
|
entailment
|
def send_msg(self, text, channel, confirm=True):
"""
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning.
"""
self._send_id += 1
msg = SlackMsg(self._send_id, channel.id, text)
self.ws.send(msg.json)
self._stats['messages_sent'] += 1
if confirm:
# Wait for confirmation our message was received
for e in self.events():
if e.get('reply_to') == self._send_id:
msg.sent = True
msg.ts = e.ts
return msg
else:
return msg
|
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning.
|
entailment
|
def _process_event(self, event):
""" Extend event object with User and Channel objects """
if event.get('user'):
event.user = self.lookup_user(event.get('user'))
if event.get('channel'):
event.channel = self.lookup_channel(event.get('channel'))
if self.user.id in event.mentions:
event.mentions_me = True
event.mentions = [ self.lookup_user(uid) for uid in event.mentions ]
return event
|
Extend event object with User and Channel objects
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.