sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def diff_args(subparsers):
"""Add command line options for the diff operation"""
diff_parser = subparsers.add_parser('diff')
secretfile_args(diff_parser)
vars_args(diff_parser)
base_args(diff_parser)
thaw_from_args(diff_parser) | Add command line options for the diff operation | entailment |
def seed_args(subparsers):
"""Add command line options for the seed operation"""
seed_parser = subparsers.add_parser('seed')
secretfile_args(seed_parser)
vars_args(seed_parser)
seed_parser.add_argument('--mount-only',
dest='mount_only',
help='Only mount paths if needed',
default=False,
action='store_true')
thaw_from_args(seed_parser)
seed_parser.add_argument('--remove-unknown',
dest='remove_unknown',
action='store_true',
help='Remove mountpoints that are not '
'defined in the Secretfile')
base_args(seed_parser) | Add command line options for the seed operation | entailment |
def thaw_from_args(parser):
"""Adds command line options for things related to inline thawing
of icefiles"""
parser.add_argument('--thaw-from',
dest='thaw_from',
help='Thaw an ICE file containing secrets')
parser.add_argument('--gpg-password-path',
dest='gpg_pass_path',
help='Vault path of GPG passphrase location') | Adds command line options for things related to inline thawing
of icefiles | entailment |
def thaw_args(subparsers):
"""Add command line options for the thaw operation"""
thaw_parser = subparsers.add_parser('thaw')
thaw_parser.add_argument('--gpg-password-path',
dest='gpg_pass_path',
help='Vault path of GPG passphrase location')
thaw_parser.add_argument('--ignore-missing',
dest='ignore_missing',
help='Warn when secrets are missing from icefiles'
'instead of exiting',
action='store_true',
default=False)
secretfile_args(thaw_parser)
archive_args(thaw_parser)
vars_args(thaw_parser)
base_args(thaw_parser) | Add command line options for the thaw operation | entailment |
def freeze_args(subparsers):
"""Add command line options for the freeze operation"""
freeze_parser = subparsers.add_parser('freeze')
freeze_parser.add_argument('--icefile-prefix',
dest='icefile_prefix',
help='Prefix of icefilename')
secretfile_args(freeze_parser)
archive_args(freeze_parser)
vars_args(freeze_parser)
base_args(freeze_parser) | Add command line options for the freeze operation | entailment |
def password_args(subparsers):
"""Add command line options for the set_password operation"""
password_parser = subparsers.add_parser('set_password')
password_parser.add_argument('vault_path',
help='Path which contains password'
'secret to be udpated')
base_args(password_parser) | Add command line options for the set_password operation | entailment |
def vars_args(parser):
"""Add various command line options for external vars"""
parser.add_argument('--extra-vars',
dest='extra_vars',
help='Extra template variables',
default=[],
type=str,
action='append')
parser.add_argument('--extra-vars-file',
dest='extra_vars_file',
help='YAML files full of variables',
default=[],
type=str,
action='append') | Add various command line options for external vars | entailment |
def parser_factory(fake_args=None):
"""Return a proper contextual OptionParser"""
parser = ArgumentParser(description='aomi')
subparsers = parser.add_subparsers(dest='operation',
help='Specify the data '
' or extraction operation')
extract_file_args(subparsers)
environment_args(subparsers)
aws_env_args(subparsers)
seed_args(subparsers)
render_args(subparsers)
diff_args(subparsers)
freeze_args(subparsers)
thaw_args(subparsers)
template_args(subparsers)
password_args(subparsers)
token_args(subparsers)
help_args(subparsers)
export_args(subparsers)
if fake_args is None:
return parser, parser.parse_args()
return parser, parser.parse_args(fake_args) | Return a proper contextual OptionParser | entailment |
def template_runner(client, parser, args):
"""Executes template related operations"""
if args.builtin_list:
aomi.template.builtin_list()
elif args.builtin_info:
aomi.template.builtin_info(args.builtin_info)
elif args.template and args.destination and args.vault_paths:
aomi.render.template(client, args.template,
args.destination,
args.vault_paths,
args)
else:
parser.print_usage()
sys.exit(2)
sys.exit(0) | Executes template related operations | entailment |
def ux_actions(parser, args):
"""Handle some human triggers actions"""
# cryptorito uses native logging (as aomi should tbh)
normal_fmt = '%(message)s'
if hasattr(args, 'verbose') and args.verbose and args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif hasattr(args, 'verbose') and args.verbose >= 1:
logging.basicConfig(level=logging.INFO, format=normal_fmt)
else:
logging.basicConfig(level=logging.WARN, format=normal_fmt)
if args.operation == 'help':
help_me(parser, args) | Handle some human triggers actions | entailment |
def do_thaw(client, args):
"""Execute the thaw operation, pulling in an actual Vault
client if neccesary"""
vault_client = None
if args.gpg_pass_path:
vault_client = client.connect(args)
aomi.filez.thaw(vault_client, args.icefile, args)
sys.exit(0) | Execute the thaw operation, pulling in an actual Vault
client if neccesary | entailment |
def action_runner(parser, args):
"""Run appropriate action, or throw help"""
ux_actions(parser, args)
client = aomi.vault.Client(args)
if args.operation == 'extract_file':
aomi.render.raw_file(client.connect(args),
args.vault_path, args.destination, args)
sys.exit(0)
elif args.operation == 'environment':
aomi.render.env(client.connect(args),
args.vault_paths, args)
sys.exit(0)
elif args.operation == 'aws_environment':
aomi.render.aws(client.connect(args),
args.vault_path, args)
sys.exit(0)
elif args.operation == 'seed':
aomi.validation.gitignore(args)
aomi.seed_action.seed(client.connect(args), args)
sys.exit(0)
elif args.operation == 'render':
aomi.seed_action.render(args.directory, args)
sys.exit(0)
elif args.operation == 'export':
aomi.seed_action.export(client.connect(args), args)
sys.exit(0)
elif args.operation == 'diff':
aomi.seed_action.diff(client.connect(args), args)
sys.exit(0)
elif args.operation == 'template':
template_runner(client.connect(args), parser, args)
elif args.operation == 'token':
print(client.connect(args).token)
sys.exit(0)
elif args.operation == 'set_password':
aomi.util.password(client.connect(args), args.vault_path)
sys.exit(0)
elif args.operation == 'freeze':
aomi.filez.freeze(args.icefile, args)
sys.exit(0)
elif args.operation == 'thaw':
do_thaw(client, args)
parser.print_usage()
sys.exit(2) | Run appropriate action, or throw help | entailment |
def main():
"""Entrypoint, sweet Entrypoint"""
parser, args = parser_factory()
try:
action_runner(parser, args)
# this is our uncaught handler so yes we want to actually
# catch every error. the format may vary based on the error handler tho
except Exception as uncaught: # pylint: disable=broad-except
unhandled(uncaught, args)
sys.exit(1) | Entrypoint, sweet Entrypoint | entailment |
def grok_ttl(secret):
"""Parses the TTL information"""
ttl_obj = {}
lease_msg = ''
if 'lease' in secret:
ttl_obj['lease'] = secret['lease']
lease_msg = "lease:%s" % (ttl_obj['lease'])
if 'lease_max' in secret:
ttl_obj['lease_max'] = secret['lease_max']
elif 'lease' in ttl_obj:
ttl_obj['lease_max'] = ttl_obj['lease']
if 'lease_max' in ttl_obj:
lease_msg = "%s lease_max:%s" % (lease_msg, ttl_obj['lease_max'])
return ttl_obj, lease_msg | Parses the TTL information | entailment |
def my_version():
"""Return the version, checking both packaged and development locations"""
if os.path.exists(resource_filename(__name__, 'version')):
return resource_string(__name__, 'version')
return open(os.path.join(os.path.dirname(__file__),
"..", "version")).read() | Return the version, checking both packaged and development locations | entailment |
def abspath(raw):
"""Return what is hopefully a OS independent path."""
path_bits = []
if raw.find('/') != -1:
path_bits = raw.split('/')
elif raw.find('\\') != -1:
path_bits = raw.split('\\')
else:
path_bits = [raw]
return os.path.abspath(os.sep.join(path_bits)) | Return what is hopefully a OS independent path. | entailment |
def hard_path(path, prefix_dir):
"""Returns an absolute path to either the relative or absolute file."""
relative = abspath("%s/%s" % (prefix_dir, path))
a_path = abspath(path)
if os.path.exists(relative):
LOG.debug("using relative path %s (%s)", relative, path)
return relative
LOG.debug("using absolute path %s", a_path)
return a_path | Returns an absolute path to either the relative or absolute file. | entailment |
def is_tagged(required_tags, has_tags):
"""Checks if tags match"""
if not required_tags and not has_tags:
return True
elif not required_tags:
return False
found_tags = []
for tag in required_tags:
if tag in has_tags:
found_tags.append(tag)
return len(found_tags) == len(required_tags) | Checks if tags match | entailment |
def cli_hash(list_of_kv):
"""Parse out a hash from a list of key=value strings"""
ev_obj = {}
for extra_var in list_of_kv:
ev_list = extra_var.split('=')
key = ev_list[0]
val = '='.join(ev_list[1:]) # b64 and other side effects
ev_obj[key] = val
return ev_obj | Parse out a hash from a list of key=value strings | entailment |
def merge_dicts(dict_a, dict_b):
"""Deep merge of two dicts"""
obj = {}
for key, value in iteritems(dict_a):
if key in dict_b:
if isinstance(dict_b[key], dict):
obj[key] = merge_dicts(value, dict_b.pop(key))
else:
obj[key] = value
for key, value in iteritems(dict_b):
obj[key] = value
return obj | Deep merge of two dicts | entailment |
def get_tty_password(confirm):
"""When returning a password from a TTY we assume a user
is entering it on a keyboard so we ask for confirmation."""
LOG.debug("Reading password from TTY")
new_password = getpass('Enter Password: ', stream=sys.stderr)
if not new_password:
raise aomi.exceptions.AomiCommand("Must specify a password")
if not confirm:
return new_password
confirm_password = getpass('Again, Please: ', stream=sys.stderr)
if confirm_password != new_password:
raise aomi.exceptions.AomiCommand("Passwords do not match")
return new_password | When returning a password from a TTY we assume a user
is entering it on a keyboard so we ask for confirmation. | entailment |
def path_pieces(vault_path):
"""Will return a two part tuple comprising of the vault path
and the key with in the stored object"""
path_bits = vault_path.split('/')
path = '/'.join(path_bits[0:len(path_bits) - 1])
key = path_bits[len(path_bits) - 1]
return path, key | Will return a two part tuple comprising of the vault path
and the key with in the stored object | entailment |
def mount_for_path(path, client):
"""Returns the mountpoint for this path"""
backend_data = client.list_secret_backends()['data']
backends = [mnt for mnt in backend_data.keys()]
path_bits = path.split('/')
if len(path_bits) == 1:
vault_path = "%s/" % path
if vault_path in backends:
return vault_path[0:len(vault_path) - 1]
else:
for i in range(1, len(path_bits) + 1):
vault_path = "%s/" % '/'.join(path_bits[0:i])
if vault_path in backends:
return vault_path[0:len(vault_path) - 1]
return None | Returns the mountpoint for this path | entailment |
def backend_type(path, client):
"""Returns the type of backend at the given mountpoint"""
backends = client.list_secret_backends()['data']
vault_path = "%s/" % path
return backends[vault_path]['type'] | Returns the type of backend at the given mountpoint | entailment |
def load_word_file(filename):
"""Loads a words file as a list of lines"""
words_file = resource_filename(__name__, "words/%s" % filename)
handle = open(words_file, 'r')
words = handle.readlines()
handle.close()
return words | Loads a words file as a list of lines | entailment |
def choose_one(things):
"""Returns a random entry from a list of things"""
choice = SystemRandom().randint(0, len(things) - 1)
return things[choice].strip() | Returns a random entry from a list of things | entailment |
def subdir_path(directory, relative):
"""Returns a file path relative to another path."""
item_bits = directory.split(os.sep)
relative_bits = relative.split(os.sep)
for i, _item in enumerate(item_bits):
if i == len(relative_bits) - 1:
return os.sep.join(item_bits[i:])
else:
if item_bits[i] != relative_bits[i]:
return None
return None | Returns a file path relative to another path. | entailment |
def open_maybe_binary(filename):
"""Opens something that might be binary but also
might be "plain text"."""
if sys.version_info >= (3, 0):
data = open(filename, 'rb').read()
try:
return data.decode('utf-8')
except UnicodeDecodeError:
return data
return open(filename, 'r').read() | Opens something that might be binary but also
might be "plain text". | entailment |
def ensure_dir(path):
"""Ensures a directory exists"""
if not (os.path.exists(path) and
os.path.isdir(path)):
os.mkdir(path) | Ensures a directory exists | entailment |
def clean_tmpdir(path):
"""Invoked atexit, this removes our tmpdir"""
if os.path.exists(path) and \
os.path.isdir(path):
rmtree(path) | Invoked atexit, this removes our tmpdir | entailment |
def dict_unicodeize(some_dict):
"""Ensure that every string in a dict is properly represented
by unicode strings"""
# some python 2/3 compat
if isinstance(some_dict, ("".__class__, u"".__class__)):
if sys.version_info >= (3, 0):
return some_dict
return some_dict.decode('utf-8')
elif isinstance(some_dict, collections.Mapping):
return dict(map(dict_unicodeize, iteritems(some_dict)))
elif isinstance(some_dict, collections.Iterable):
return type(some_dict)(map(dict_unicodeize, some_dict))
return some_dict | Ensure that every string in a dict is properly represented
by unicode strings | entailment |
def diff_dict(dict1, dict2, ignore_missing=False):
"""Performs a base type comparison between two dicts"""
unidict1 = dict_unicodeize(dict1)
unidict2 = dict_unicodeize(dict2)
if ((not ignore_missing) and (len(unidict1) != len(unidict2))) or \
(ignore_missing and (len(unidict1) >= len(unidict2))):
return True
for comp_k, comp_v in iteritems(unidict1):
if comp_k not in unidict2:
return True
else:
if comp_v != unidict2[comp_k]:
return True
return False | Performs a base type comparison between two dicts | entailment |
def map_val(dest, src, key, default=None, src_key=None):
"""Will ensure a dict has values sourced from either
another dict or based on the provided default"""
if not src_key:
src_key = key
if src_key in src:
dest[key] = src[src_key]
else:
if default is not None:
dest[key] = default | Will ensure a dict has values sourced from either
another dict or based on the provided default | entailment |
def filtered_context(context):
"""Filters a context
This will return a new context with only the resources that
are actually available for use. Uses tags and command line
options to make determination."""
ctx = Context(context.opt)
for resource in context.resources():
if resource.child:
continue
if resource.filtered():
ctx.add(resource)
return ctx | Filters a context
This will return a new context with only the resources that
are actually available for use. Uses tags and command line
options to make determination. | entailment |
def ensure_backend(resource, backend, backends, opt, managed=True):
"""Ensure the backend for a resource is properly in context"""
existing_mount = find_backend(resource.mount, backends)
if not existing_mount:
new_mount = backend(resource, opt, managed=managed)
backends.append(new_mount)
return new_mount
return existing_mount | Ensure the backend for a resource is properly in context | entailment |
def find_model(config, obj, mods):
"""Given a list of mods (as returned by py_resources) attempts to
determine if a given Python obj fits one of the models"""
for mod in mods:
if mod[0] != config:
continue
if len(mod) == 2:
return mod[1]
if len(mod) == 3 and mod[1] in obj:
return mod[2]
return None | Given a list of mods (as returned by py_resources) attempts to
determine if a given Python obj fits one of the models | entailment |
def py_resources():
"""Discovers all aomi Vault resource models. This includes
anything extending aomi.model.Mount or aomi.model.Resource."""
aomi_mods = [m for
m, _v in iteritems(sys.modules)
if m.startswith('aomi.model')]
mod_list = []
mod_map = []
for amod in [sys.modules[m] for m in aomi_mods]:
for _mod_bit, model in inspect.getmembers(amod):
if str(model) in mod_list:
continue
if model == Mount:
mod_list.append(str(model))
mod_map.append((model.config_key, model))
elif (inspect.isclass(model) and
issubclass(model, Resource) and
model.config_key):
mod_list.append(str(model))
if model.resource_key:
mod_map.append((model.config_key,
model.resource_key,
model))
elif model.config_key != 'secrets':
mod_map.append((model.config_key, model))
return mod_map | Discovers all aomi Vault resource models. This includes
anything extending aomi.model.Mount or aomi.model.Resource. | entailment |
def load(config, opt):
"""Loads and returns a full context object based on the Secretfile"""
ctx = Context(opt)
seed_map = py_resources()
seed_keys = sorted(set([m[0] for m in seed_map]), key=resource_sort)
for config_key in seed_keys:
if config_key not in config:
continue
for resource_config in config[config_key]:
mod = find_model(config_key, resource_config, seed_map)
if not mod:
LOG.warning("unable to find mod for %s", resource_config)
continue
ctx.add(mod(resource_config, opt))
for config_key in config.keys():
if config_key != 'pgp_keys' and \
config_key not in seed_keys:
LOG.warning("missing model for %s", config_key)
return filtered_context(ctx) | Loads and returns a full context object based on the Secretfile | entailment |
def thaw(self, tmp_dir):
"""Will thaw every secret into an appropriate temporary location"""
for resource in self.resources():
if resource.present:
resource.thaw(tmp_dir) | Will thaw every secret into an appropriate temporary location | entailment |
def freeze(self, dest_dir):
"""Freezes every resource within a context"""
for resource in self.resources():
if resource.present:
resource.freeze(dest_dir) | Freezes every resource within a context | entailment |
def resources(self):
"""Vault resources within context"""
res = []
for resource in self._resources:
res = res + resource.resources()
return res | Vault resources within context | entailment |
def add(self, resource):
"""Add a resource to the context"""
if isinstance(resource, Resource):
if isinstance(resource, Secret) and \
resource.mount != 'cubbyhole':
ensure_backend(resource,
SecretBackend,
self._mounts,
self.opt,
False)
elif isinstance(resource, Mount):
ensure_backend(resource, SecretBackend, self._mounts, self.opt)
elif isinstance(resource, Auth):
ensure_backend(resource, AuthBackend, self._auths, self.opt)
elif isinstance(resource, AuditLog):
ensure_backend(resource, LogBackend, self._logs, self.opt)
self._resources.append(resource)
else:
msg = "Unknown resource %s being " \
"added to context" % resource.__class__
raise aomi_excep.AomiError(msg) | Add a resource to the context | entailment |
def remove(self, resource):
"""Removes a resource from the context"""
if isinstance(resource, Resource):
self._resources.remove(resource) | Removes a resource from the context | entailment |
def sync_policies(self, vault_client):
"""Synchronizes policies only"""
p_resources = [x for x in self.resources()
if isinstance(x, Policy)]
for resource in p_resources:
resource.sync(vault_client)
return [x for x in self.resources()
if not isinstance(x, Policy)] | Synchronizes policies only | entailment |
def sync_auth(self, vault_client, resources):
"""Synchronizes auth mount wrappers. These happen
early in the cycle, to ensure that user backends
are proper. They may also be used to set mount
tuning"""
for auth in self.auths():
auth.sync(vault_client)
auth_resources = [x for x in resources
if isinstance(x, (LDAP, UserPass))]
for resource in auth_resources:
resource.sync(vault_client)
return [x for x in resources
if not isinstance(x, (LDAP, UserPass, AuditLog))] | Synchronizes auth mount wrappers. These happen
early in the cycle, to ensure that user backends
are proper. They may also be used to set mount
tuning | entailment |
def actually_mount(self, vault_client, resource, active_mounts):
"""Handle the actual (potential) mounting of a secret backend.
This is called in multiple contexts, but the action will always
be the same. If we were not aware of the mountpoint at the start
and it has not already been mounted, then mount it."""
a_mounts = list(active_mounts)
if isinstance(resource, Secret) and resource.mount == 'cubbyhole':
return a_mounts
active_mount = find_backend(resource.mount, active_mounts)
if not active_mount:
actual_mount = find_backend(resource.mount, self._mounts)
a_mounts.append(actual_mount)
actual_mount.sync(vault_client)
return a_mounts | Handle the actual (potential) mounting of a secret backend.
This is called in multiple contexts, but the action will always
be the same. If we were not aware of the mountpoint at the start
and it has not already been mounted, then mount it. | entailment |
def sync_mounts(self, active_mounts, resources, vault_client):
"""Synchronizes mount points. Removes things before
adding new."""
# Create a resource set that is only explicit mounts
# and sort so removals are first
mounts = [x for x in resources
if isinstance(x, (Mount, AWS))]
s_resources = sorted(mounts, key=absent_sort)
# Iterate over explicit mounts only
for resource in s_resources:
active_mounts = self.actually_mount(vault_client,
resource,
active_mounts)
# OK Now iterate over everything but make sure it is clear
# that ad-hoc mountpoints are deprecated as per
# https://github.com/Autodesk/aomi/issues/110
for resource in [x for x in resources
if isinstance(x, Secret)]:
n_mounts = self.actually_mount(vault_client,
resource,
active_mounts)
if len(n_mounts) != len(active_mounts):
LOG.warning("Ad-Hoc mount with %s. Please specify"
" explicit mountpoints.", resource)
active_mounts = n_mounts
return active_mounts, [x for x in resources
if not isinstance(x, (Mount))] | Synchronizes mount points. Removes things before
adding new. | entailment |
def sync(self, vault_client, opt):
"""Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending."""
active_mounts = []
for audit_log in self.logs():
audit_log.sync(vault_client)
# Handle policies only on the first pass. This allows us
# to ensure that ACL's are in place prior to actually
# making any changes.
not_policies = self.sync_policies(vault_client)
# Handle auth wrapper resources on the next path. The resources
# may update a path on their own. They may also provide mount
# tuning information.
not_auth = self.sync_auth(vault_client, not_policies)
# Handle mounts only on the next pass. This allows us to
# ensure that everything is in order prior to actually
# provisioning secrets. Note we handle removals before
# anything else, allowing us to address mount conflicts.
active_mounts, not_mounts = self.sync_mounts(active_mounts,
not_auth,
vault_client)
# Now handle everything else. If "best practices" are being
# adhered to then every generic mountpoint should exist by now.
# We handle "child" resources after the first batch
sorted_resources = sorted(not_mounts, key=childless_first)
for resource in sorted_resources:
resource.sync(vault_client)
for mount in self.mounts():
if not find_backend(mount.path, active_mounts):
mount.unmount(vault_client)
if opt.remove_unknown:
self.prune(vault_client) | Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending. | entailment |
def prune(self, vault_client):
"""Will remove any mount point which is not actually defined
in this context. """
existing = getattr(vault_client,
SecretBackend.list_fun)()['data'].items()
for mount_name, _values in existing:
# ignore system paths and cubbyhole
mount_path = normalize_vault_path(mount_name)
if mount_path.startswith('sys') or mount_path == 'cubbyhole':
continue
exists = [resource.path
for resource in self.mounts()
if normalize_vault_path(resource.path) == mount_path]
if not exists:
LOG.info("removed unknown mount %s", mount_path)
getattr(vault_client, SecretBackend.unmount_fun)(mount_path) | Will remove any mount point which is not actually defined
in this context. | entailment |
def fetch(self, vault_client):
"""Updates the context based on the contents of the Vault
server. Note that some resources can not be read after
they have been written to and it is up to those classes
to handle that case properly."""
backends = [(self.mounts, SecretBackend),
(self.auths, AuthBackend),
(self.logs, LogBackend)]
for b_list, b_class in backends:
backend_list = b_list()
if backend_list:
existing = getattr(vault_client, b_class.list_fun)()
for backend in backend_list:
backend.fetch(vault_client, existing)
for rsc in self.resources():
if issubclass(type(rsc), Secret):
nc_exists = (rsc.mount != 'cubbyhole' and
find_backend(rsc.mount, self._mounts).existing)
if nc_exists or rsc.mount == 'cubbyhole':
rsc.fetch(vault_client)
elif issubclass(type(rsc), Auth):
if find_backend(rsc.mount, self._auths).existing:
rsc.fetch(vault_client)
elif issubclass(type(rsc), Mount):
rsc.existing = find_backend(rsc.mount,
self._mounts).existing
else:
rsc.fetch(vault_client)
return self | Updates the context based on the contents of the Vault
server. Note that some resources can not be read after
they have been written to and it is up to those classes
to handle that case properly. | entailment |
def presets(self, presets, opt):
"""Will create representational objects for any preset (push)
based AppRole Secrets."""
for preset in presets:
secret_obj = dict(preset)
secret_obj['role_name'] = self.app_name
self.secret_ids.append(AppRoleSecret(secret_obj, opt)) | Will create representational objects for any preset (push)
based AppRole Secrets. | entailment |
def secret_key_name(path, key, opt):
"""Renders a Secret key name appropriately"""
value = key
if opt.merge_path:
norm_path = [x for x in path.split('/') if x]
value = "%s_%s" % ('_'.join(norm_path), key)
if opt.add_prefix:
value = "%s%s" % (opt.add_prefix, value)
if opt.add_suffix:
value = "%s%s" % (value, opt.add_suffix)
return value | Renders a Secret key name appropriately | entailment |
def grok_template_file(src):
"""Determine the real deal template file"""
if not src.startswith('builtin:'):
return abspath(src)
builtin = src.split(':')[1]
builtin = "templates/%s.j2" % builtin
return resource_filename(__name__, builtin) | Determine the real deal template file | entailment |
def blend_vars(secrets, opt):
"""Blends secret and static variables together"""
base_obj = load_vars(opt)
merged = merge_dicts(base_obj, secrets)
template_obj = dict((k, v) for k, v in iteritems(merged) if v)
# give templates something to iterate over
template_obj['aomi_items'] = template_obj.copy()
return template_obj | Blends secret and static variables together | entailment |
def template(client, src, dest, paths, opt):
"""Writes a template using variables from a vault path"""
key_map = cli_hash(opt.key_map)
obj = {}
for path in paths:
response = client.read(path)
if not response:
raise aomi.exceptions.VaultData("Unable to retrieve %s" % path)
if is_aws(response['data']) and 'sts' not in path:
renew_secret(client, response, opt)
for s_k, s_v in response['data'].items():
o_key = s_k
if s_k in key_map:
o_key = key_map[s_k]
k_name = secret_key_name(path, o_key, opt) \
.lower() \
.replace('-', '_')
obj[k_name] = s_v
template_obj = blend_vars(obj, opt)
output = render(grok_template_file(src),
template_obj)
write_raw_file(output, abspath(dest)) | Writes a template using variables from a vault path | entailment |
def write_raw_file(secret, dest):
"""Writes an actual secret out to a file"""
secret_file = None
secret_filename = abspath(dest)
if sys.version_info >= (3, 0):
if not isinstance(secret, str):
secret_file = open(secret_filename, 'wb')
if not secret_file:
secret_file = open(secret_filename, 'w')
secret_file.write(secret)
secret_file.close()
os.chmod(secret_filename, 0o600) | Writes an actual secret out to a file | entailment |
def raw_file(client, src, dest, opt):
"""Write the contents of a vault path/key to a file. Is
smart enough to attempt and handle binary files that are
base64 encoded."""
path, key = path_pieces(src)
resp = client.read(path)
if not resp:
client.revoke_self_token()
raise aomi.exceptions.VaultData("Unable to retrieve %s" % path)
else:
if 'data' in resp and key in resp['data']:
secret = resp['data'][key]
if is_base64(secret):
LOG.debug('decoding base64 entry')
secret = portable_b64decode(secret)
if is_aws(resp['data']) and 'sts' not in path:
renew_secret(client, resp, opt)
write_raw_file(secret, dest)
else:
client.revoke_self_token()
e_msg = "Key %s not found in %s" % (key, path)
raise aomi.exceptions.VaultData(e_msg) | Write the contents of a vault path/key to a file. Is
smart enough to attempt and handle binary files that are
base64 encoded. | entailment |
def env(client, paths, opt):
"""Renders a shell snippet based on paths in a Secretfile"""
old_prefix = False
old_prefix = opt.prefix and not (opt.add_prefix or
opt.add_suffix or
not opt.merge_path)
if old_prefix:
LOG.warning("the prefix option is deprecated "
"please use"
"--no-merge-path --add-prefix $OLDPREFIX_ instead")
elif opt.prefix:
LOG.warning("the prefix option is deprecated"
"please use"
"--no-merge-path --add-prefix $OLDPREFIX_ instead")
key_map = cli_hash(opt.key_map)
for path in paths:
secrets = client.read(path)
if secrets and 'data' in secrets:
if is_aws(secrets['data']) and 'sts' not in path:
renew_secret(client, secrets, opt)
for s_key, s_val in secrets['data'].items():
o_key = s_key
if s_key in key_map:
o_key = key_map[s_key]
# see https://github.com/Autodesk/aomi/issues/40
env_name = None
if old_prefix:
env_name = ("%s_%s" % (opt.prefix, o_key)).upper()
else:
env_name = secret_key_name(path, o_key, opt).upper()
print("%s=\"%s\"" % (env_name, s_val))
if opt.export:
print("export %s" % env_name) | Renders a shell snippet based on paths in a Secretfile | entailment |
def aws(client, path, opt):
"""Renders a shell environment snippet with AWS information"""
try:
creds = client.read(path)
except (hvac.exceptions.InternalServerError) as vault_exception:
# this is how old vault behaves
if vault_exception.errors[0].find('unsupported path') > 0:
emsg = "Invalid AWS path. Did you forget the" \
" credential type and role?"
raise aomi.exceptions.AomiFile(emsg)
else:
raise
# this is how new vault behaves
if not creds:
emsg = "Invalid AWS path. Did you forget the" \
" credential type and role?"
raise aomi.exceptions.AomiFile(emsg)
renew_secret(client, creds, opt)
if creds and 'data' in creds:
print("AWS_ACCESS_KEY_ID=\"%s\"" % creds['data']['access_key'])
print("AWS_SECRET_ACCESS_KEY=\"%s\"" % creds['data']['secret_key'])
if 'security_token' in creds['data'] \
and creds['data']['security_token']:
token = creds['data']['security_token']
print("AWS_SECURITY_TOKEN=\"%s\"" % token)
else:
client.revoke_self_token()
e_msg = "Unable to generate AWS credentials from %s" % path
raise aomi.exceptions.VaultData(e_msg)
if opt.export:
print("export AWS_ACCESS_KEY_ID")
print("export AWS_SECRET_ACCESS_KEY")
if 'security_token' in creds['data'] \
and creds['data']['security_token']:
print("export AWS_SECURITY_TOKEN") | Renders a shell environment snippet with AWS information | entailment |
def generated_key(key):
"""Create the proper generated key value"""
key_name = key['name']
if key['method'] == 'uuid':
LOG.debug("Setting %s to a uuid", key_name)
return str(uuid4())
elif key['method'] == 'words':
LOG.debug("Setting %s to random words", key_name)
return random_word()
elif key['method'] == 'static':
if 'value' not in key.keys():
raise aomi.exceptions.AomiData("Missing static value")
LOG.debug("Setting %s to a static value", key_name)
return key['value']
else:
raise aomi.exceptions.AomiData("Unexpected generated secret method %s"
% key['method']) | Create the proper generated key value | entailment |
def generate_obj(self):
"""Generates the secret object, respecting existing information
and user specified options"""
secret_obj = {}
if self.existing:
secret_obj = deepcopy(self.existing)
for key in self.keys:
key_name = key['name']
if self.existing and \
key_name in self.existing and \
not key.get('overwrite'):
LOG.debug("Not overwriting %s/%s", self.path, key_name)
continue
else:
secret_obj[key_name] = generated_key(key)
return secret_obj | Generates the secret object, respecting existing information
and user specified options | entailment |
def unhandled(exception, opt):
""" Handle uncaught/unexpected errors and be polite about it"""
exmod = type(exception).__module__
name = "%s.%s" % (exmod, type(exception).__name__)
# this is a Vault error
if exmod == 'aomi.exceptions' or exmod == 'cryptorito':
# This may be set for Validation or similar errors
if hasattr(exception, 'source'):
output(exception.message, opt, extra=exception.source)
else:
output(exception.message, opt)
else:
output("Unexpected error: %s" % name, opt)
sys.exit(1) | Handle uncaught/unexpected errors and be polite about it | entailment |
def output(message, opt, extra=None):
""" Politely display an unexpected error"""
print(message, file=sys.stderr)
if opt.verbose:
if extra:
print(extra)
traceback.print_exc(sys.stderr) | Politely display an unexpected error | entailment |
def grok_seconds(lease):
"""Ensures that we are returning just seconds"""
if lease.endswith('s'):
return int(lease[0:-1])
elif lease.endswith('m'):
return int(lease[0:-1]) * 60
elif lease.endswith('h'):
return int(lease[0:-1]) * 3600
return None | Ensures that we are returning just seconds | entailment |
def renew_secret(client, creds, opt):
"""Renews a secret. This will occur unless the user has
specified on the command line that it is not neccesary"""
if opt.reuse_token:
return
seconds = grok_seconds(opt.lease)
if not seconds:
raise aomi.exceptions.AomiCommand("invalid lease %s" % opt.lease)
renew = None
if client.version:
v_bits = client.version.split('.')
if int(v_bits[0]) == 0 and \
int(v_bits[1]) <= 8 and \
int(v_bits[2]) <= 0:
r_obj = {
'increment': seconds
}
r_path = "v1/sys/renew/{0}".format(creds['lease_id'])
# Pending discussion on https://github.com/ianunruh/hvac/issues/148
# pylint: disable=protected-access
renew = client._post(r_path, json=r_obj).json()
if not renew:
renew = client.renew_secret(creds['lease_id'], seconds)
# sometimes it takes a bit for vault to respond
# if we are within 5s then we are fine
if not renew or (seconds - renew['lease_duration'] >= 5):
client.revoke_self_token()
e_msg = 'Unable to renew with desired lease'
raise aomi.exceptions.VaultConstraint(e_msg) | Renews a secret. This will occur unless the user has
specified on the command line that it is not neccesary | entailment |
def approle_token(vault_client, role_id, secret_id):
"""Returns a vault token based on the role and seret id"""
resp = vault_client.auth_approle(role_id, secret_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid approle') | Returns a vault token based on the role and seret id | entailment |
def app_token(vault_client, app_id, user_id):
"""Returns a vault token based on the app and user id."""
resp = vault_client.auth_app_id(app_id, user_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid apptoken') | Returns a vault token based on the app and user id. | entailment |
def token_meta(opt):
"""Generates metadata for a token"""
meta = {
'via': 'aomi',
'operation': opt.operation,
'hostname': socket.gethostname()
}
if 'USER' in os.environ:
meta['unix_user'] = os.environ['USER']
if opt.metadata:
meta_bits = opt.metadata.split(',')
for meta_bit in meta_bits:
key, value = meta_bit.split('=')
if key not in meta:
meta[key] = value
for key, value in meta.items():
LOG.debug("Token metadata %s %s", key, value)
return meta | Generates metadata for a token | entailment |
def get_backend(backend, path, backends):
"""Returns mountpoint details for a backend"""
m_norm = normalize_vault_path(path)
for mount_name, values in backends.items():
b_norm = normalize_vault_path(mount_name)
if (m_norm == b_norm) and values['type'] == backend:
return values
return None | Returns mountpoint details for a backend | entailment |
def wrap_hvac(msg):
"""Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class?"""
# pylint: disable=missing-docstring
def wrap_call(func):
# pylint: disable=missing-docstring
def func_wrapper(self, vault_client):
try:
return func(self, vault_client)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied %s from %s" % (msg, self.path)
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
return func_wrapper
return wrap_call | Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class? | entailment |
def server_version(self):
"""Attempts to determine the version of Vault that a
server is running. Some actions will change on older
Vault deployments."""
health_url = "%s/v1/sys/health" % self.vault_addr
resp = self.session.request('get', health_url, **self._kwargs)
if resp.status_code == 200 or resp.status_code == 429:
blob = resp.json()
if 'version' in blob:
return blob['version']
else:
raise aomi.exceptions.VaultProblem('Health check failed')
return None | Attempts to determine the version of Vault that a
server is running. Some actions will change on older
Vault deployments. | entailment |
def connect(self, opt):
"""This sets up the tokens we expect to see in a way
that hvac also expects."""
if not self._kwargs['verify']:
LOG.warning('Skipping SSL Validation!')
self.version = self.server_version()
self.token = self.init_token()
my_token = self.lookup_token()
if not my_token or 'data' not in my_token:
raise aomi.exceptions.AomiCredentials('initial token')
display_name = my_token['data']['display_name']
vsn_string = ""
if self.version:
vsn_string = ", v%s" % self.version
else:
LOG.warning("Unable to deterine Vault version. Not all "
"functionality is supported")
LOG.info("Connected to %s as %s%s",
self._url,
display_name,
vsn_string)
if opt.reuse_token:
LOG.debug("Not creating operational token")
self.initial_token = self.token
self.operational_token = self.token
else:
self.initial_token = self.token
self.operational_token = self.op_token(display_name, opt)
if not self.is_authenticated():
raise aomi.exceptions.AomiCredentials('operational token')
self.token = self.operational_token
return self | This sets up the tokens we expect to see in a way
that hvac also expects. | entailment |
def init_token(self):
"""Generate our first token based on workstation configuration"""
app_filename = appid_file()
token_filename = token_file()
approle_filename = approle_file()
token = None
if 'VAULT_ROLE_ID' in os.environ and \
'VAULT_SECRET_ID' in os.environ and \
os.environ['VAULT_ROLE_ID'] and os.environ['VAULT_SECRET_ID']:
token = approle_token(self,
os.environ['VAULT_ROLE_ID'],
os.environ['VAULT_SECRET_ID'])
LOG.debug("Token derived from VAULT_ROLE_ID and VAULT_SECRET_ID")
elif 'VAULT_TOKEN' in os.environ and os.environ['VAULT_TOKEN']:
LOG.debug('Token derived from VAULT_TOKEN environment variable')
token = os.environ['VAULT_TOKEN'].strip()
elif 'VAULT_USER_ID' in os.environ and \
'VAULT_APP_ID' in os.environ and \
os.environ['VAULT_USER_ID'] and os.environ['VAULT_APP_ID']:
LOG.debug("Token derived from VAULT_APP_ID and VAULT_USER_ID")
token = app_token(self,
os.environ['VAULT_APP_ID'].strip(),
os.environ['VAULT_USER_ID'].strip())
elif approle_filename:
creds = yaml.safe_load(open(approle_filename).read().strip())
if 'role_id' in creds and 'secret_id' in creds:
LOG.debug("Token derived from approle file")
token = approle_token(self,
creds['role_id'],
creds['secret_id'])
elif token_filename:
LOG.debug("Token derived from %s", token_filename)
try:
token = open(token_filename, 'r').read().strip()
except IOError as os_exception:
if os_exception.errno == 21:
raise aomi.exceptions.AomiFile('Bad Vault token file')
raise
elif app_filename:
token = yaml.safe_load(open(app_filename).read().strip())
if 'app_id' in token and 'user_id' in token:
LOG.debug("Token derived from %s", app_filename)
token = app_token(self,
token['app_id'],
token['user_id'])
else:
raise aomi.exceptions.AomiCredentials('unknown method')
return token | Generate our first token based on workstation configuration | entailment |
def op_token(self, display_name, opt):
"""Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho."""
args = {
'lease': opt.lease,
'display_name': display_name,
'meta': token_meta(opt)
}
try:
token = self.create_token(**args)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied creating operational token"
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
LOG.debug("Created operational token with lease of %s", opt.lease)
return token['auth']['client_token'] | Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho. | entailment |
def read(self, path, wrap_ttl=None):
"""Wrap the hvac read call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).read(path, wrap_ttl)
self.token = self.operational_token
return val
return super(Client, self).read(path, wrap_ttl) | Wrap the hvac read call, using the right token for
cubbyhole interactions. | entailment |
def write(self, path, wrap_ttl=None, **kwargs):
"""Wrap the hvac write call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
val = None
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs)
self.token = self.operational_token
else:
super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs)
return val | Wrap the hvac write call, using the right token for
cubbyhole interactions. | entailment |
def delete(self, path):
"""Wrap the hvac delete call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
val = None
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).delete(path)
self.token = self.operational_token
else:
super(Client, self).delete(path)
return val | Wrap the hvac delete call, using the right token for
cubbyhole interactions. | entailment |
def from_keybase(username):
"""Will attempt to retrieve a GPG public key from
Keybase, importing if neccesary"""
public_key = key_from_keybase(username)
fingerprint = public_key['fingerprint'][-8:].upper().encode('ascii')
key = public_key['bundle'].encode('ascii')
if not has_gpg_key(fingerprint):
LOG.debug("Importing gpg key for %s", username)
if not import_gpg_key(key):
raise aomi.exceptions.KeybaseAPI("import key for %s" % username)
return fingerprint | Will attempt to retrieve a GPG public key from
Keybase, importing if neccesary | entailment |
def grok_keys(config):
"""Will retrieve a GPG key from either Keybase or GPG directly"""
key_ids = []
for key in config['pgp_keys']:
if key.startswith('keybase:'):
key_id = from_keybase(key[8:])
LOG.debug("Encrypting for keybase user %s", key[8:])
else:
if not has_gpg_key(key):
raise aomi.exceptions.GPG("Do not actually have key %s" % key)
LOG.debug("Encrypting for gpg id %s", key)
key_id = key
validate_gpg_fingerprint(key_id)
key_ids.append(key_id)
return key_ids | Will retrieve a GPG key from either Keybase or GPG directly | entailment |
def freeze_archive(tmp_dir, dest_prefix):
"""Generates a ZIP file of secrets"""
zip_filename = "%s/aomi-blah.zip" % tmp_dir
archive = zipfile.ZipFile(zip_filename, 'w')
for root, _dirnames, filenames in os.walk(dest_prefix):
for filename in filenames:
relative_path = subdir_path(root, dest_prefix).split(os.sep)[1:]
relative_path = os.sep.join(relative_path)
archive.write("%s/%s" % (root, filename),
"%s/%s" % (relative_path, filename))
archive.close()
return zip_filename | Generates a ZIP file of secrets | entailment |
def freeze_encrypt(dest_dir, zip_filename, config, opt):
"""Encrypts the zip file"""
pgp_keys = grok_keys(config)
icefile_prefix = "aomi-%s" % \
os.path.basename(os.path.dirname(opt.secretfile))
if opt.icefile_prefix:
icefile_prefix = opt.icefile_prefix
timestamp = time.strftime("%H%M%S-%m-%d-%Y",
datetime.datetime.now().timetuple())
ice_file = "%s/%s-%s.ice" % (dest_dir, icefile_prefix, timestamp)
if not encrypt(zip_filename, ice_file, pgp_keys):
raise aomi.exceptions.GPG("Unable to encrypt zipfile")
return ice_file | Encrypts the zip file | entailment |
def freeze(dest_dir, opt):
"""Iterates over the Secretfile looking for secrets to freeze"""
tmp_dir = ensure_tmpdir()
dest_prefix = "%s/dest" % tmp_dir
ensure_dir(dest_dir)
ensure_dir(dest_prefix)
config = get_secretfile(opt)
Context.load(config, opt) \
.freeze(dest_prefix)
zip_filename = freeze_archive(tmp_dir, dest_prefix)
ice_file = freeze_encrypt(dest_dir, zip_filename, config, opt)
shutil.rmtree(tmp_dir)
LOG.debug("Generated file is %s", ice_file) | Iterates over the Secretfile looking for secrets to freeze | entailment |
def thaw_decrypt(vault_client, src_file, tmp_dir, opt):
"""Decrypts the encrypted ice file"""
if not os.path.isdir(opt.secrets):
LOG.info("Creating secret directory %s", opt.secrets)
os.mkdir(opt.secrets)
zip_file = "%s/aomi.zip" % tmp_dir
if opt.gpg_pass_path:
gpg_path_bits = opt.gpg_pass_path.split('/')
gpg_path = '/'.join(gpg_path_bits[0:len(gpg_path_bits) - 1])
gpg_field = gpg_path_bits[len(gpg_path_bits) - 1]
resp = vault_client.read(gpg_path)
gpg_pass = None
if resp and 'data' in resp and gpg_field in resp['data']:
gpg_pass = resp['data'][gpg_field]
if not gpg_pass:
raise aomi.exceptions.GPG("Unable to retrieve GPG password")
LOG.debug("Retrieved GPG password from Vault")
if not decrypt(src_file, zip_file, passphrase=gpg_pass):
raise aomi.exceptions.GPG("Unable to gpg")
else:
raise aomi.exceptions.VaultData("Unable to retrieve GPG password")
else:
if not decrypt(src_file, zip_file):
raise aomi.exceptions.GPG("Unable to gpg")
return zip_file | Decrypts the encrypted ice file | entailment |
def thaw(vault_client, src_file, opt):
"""Given the combination of a Secretfile and the output of
a freeze operation, will restore secrets to usable locations"""
if not os.path.exists(src_file):
raise aomi.exceptions.AomiFile("%s does not exist" % src_file)
tmp_dir = ensure_tmpdir()
zip_file = thaw_decrypt(vault_client, src_file, tmp_dir, opt)
archive = zipfile.ZipFile(zip_file, 'r')
for archive_file in archive.namelist():
archive.extract(archive_file, tmp_dir)
os.chmod("%s/%s" % (tmp_dir, archive_file), 0o640)
LOG.debug("Extracted %s from archive", archive_file)
LOG.info("Thawing secrets into %s", opt.secrets)
config = get_secretfile(opt)
Context.load(config, opt) \
.thaw(tmp_dir) | Given the combination of a Secretfile and the output of
a freeze operation, will restore secrets to usable locations | entailment |
def diff(self):
"""Determines if changes are needed for the Vault backend"""
if not self.present:
if self.existing:
return DEL
return NOOP
is_diff = NOOP
if self.present and self.existing:
a_obj = self.config.copy()
if self.config and diff_dict(a_obj, self.existing, True):
is_diff = CHANGED
if self.description != self.existing.get('description'):
is_diff = CONFLICT
elif self.present and not self.existing:
is_diff = ADD
return is_diff | Determines if changes are needed for the Vault backend | entailment |
def sync(self, vault_client):
"""Synchronizes the local and remote Vault resources. Has the net
effect of adding backend if needed"""
if self.present:
if not self.existing:
LOG.info("Mounting %s backend on %s",
self.backend, self.path)
self.actually_mount(vault_client)
else:
LOG.info("%s backend already mounted on %s",
self.backend, self.path)
else:
if self.existing:
LOG.info("Unmounting %s backend on %s",
self.backend, self.path)
self.unmount(vault_client)
else:
LOG.info("%s backend already unmounted on %s",
self.backend, self.path)
if self.present and vault_client.version:
self.sync_tunables(vault_client) | Synchronizes the local and remote Vault resources. Has the net
effect of adding backend if needed | entailment |
def sync_tunables(self, vault_client):
"""Synchtonizes any tunables we have set"""
if not self.config:
return
a_prefix = self.tune_prefix
if self.tune_prefix:
a_prefix = "%s/" % self.tune_prefix
v_path = "sys/mounts/%s%s/tune" % (a_prefix, self.path)
a_obj = self.config.copy()
if 'description' in a_obj:
del a_obj['description']
t_resp = vault_client.write(v_path, **a_obj)
if t_resp and 'errors' in t_resp and t_resp['errors']:
e_msg = "Unable to update tuning info for %s" % self
raise aomi_excep.VaultData(e_msg) | Synchtonizes any tunables we have set | entailment |
def fetch(self, vault_client, backends):
"""Updates local resource with context on whether this
backend is actually mounted and available"""
if not is_mounted(self.backend, self.path, backends) or \
self.tune_prefix is None:
return
backend_details = get_backend(self.backend, self.path, backends)
self.existing = backend_details['config']
if backend_details['description']:
self.existing['description'] = backend_details['description']
if vault_client.version is None:
return
if not self.managed:
return
a_prefix = self.tune_prefix
if self.tune_prefix:
a_prefix = "%s/" % self.tune_prefix
v_path = "sys/mounts/%s%s/tune" % (a_prefix, self.path)
t_resp = vault_client.read(v_path)
if 'data' not in t_resp:
e_msg = "Unable to retrieve tuning info for %s" % self
raise aomi_excep.VaultData(e_msg)
e_obj = t_resp['data']
e_obj['description'] = None
n_path = normalize_vault_path(self.path)
if n_path in backends:
a_mount = backends[n_path]
if 'description' in a_mount and a_mount['description']:
e_obj['description'] = a_mount['description']
self.existing = e_obj | Updates local resource with context on whether this
backend is actually mounted and available | entailment |
def unmount(self, client):
"""Unmounts a backend within Vault"""
getattr(client, self.unmount_fun)(mount_point=self.path) | Unmounts a backend within Vault | entailment |
def actually_mount(self, client):
"""Actually mount something in Vault"""
a_obj = self.config.copy()
if 'description' in a_obj:
del a_obj['description']
try:
m_fun = getattr(client, self.mount_fun)
if self.description and a_obj:
m_fun(self.backend,
mount_point=self.path,
description=self.description,
config=a_obj)
elif self.description:
m_fun(self.backend,
mount_point=self.path,
description=self.description)
elif a_obj:
m_fun(self.backend,
mount_point=self.path,
config=a_obj)
else:
m_fun(self.backend,
mount_point=self.path)
except hvac.exceptions.InvalidRequest as exception:
match = re.match('existing mount at (?P<path>.+)', str(exception))
if match:
e_msg = "%s has a mountpoint conflict with %s" % \
(self.path, match.group('path'))
raise aomi_excep.VaultConstraint(e_msg)
else:
raise | Actually mount something in Vault | entailment |
def thaw(self, tmp_dir):
"""Will perform some validation and copy a
decrypted secret to it's final location"""
for sfile in self.secrets():
src_file = "%s/%s" % (tmp_dir, sfile)
err_msg = "%s secret missing from icefile" % (self)
if not os.path.exists(src_file):
if hasattr(self.opt, 'ignore_missing') and \
self.opt.ignore_missing:
LOG.warning(err_msg)
continue
else:
raise aomi_excep.IceFile(err_msg)
dest_file = "%s/%s" % (self.opt.secrets, sfile)
dest_dir = os.path.dirname(dest_file)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
shutil.copy(src_file, dest_file)
LOG.debug("Thawed %s %s", self, sfile) | Will perform some validation and copy a
decrypted secret to it's final location | entailment |
def tunable(self, obj):
"""A tunable resource maps against a backend..."""
self.tune = dict()
if 'tune' in obj:
for tunable in MOUNT_TUNABLES:
tunable_key = tunable[0]
map_val(self.tune, obj['tune'], tunable_key)
if tunable_key in self.tune and \
is_vault_time(self.tune[tunable_key]):
vault_time_s = vault_time_to_s(self.tune[tunable_key])
self.tune[tunable_key] = vault_time_s
if 'description'in obj:
self.tune['description'] = obj['description'] | A tunable resource maps against a backend... | entailment |
def export_handle(self, directory):
"""Get a filehandle for exporting"""
filename = getattr(self, 'filename')
dest_file = "%s/%s" % (directory, filename)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir, 0o700)
return open(dest_file, 'w') | Get a filehandle for exporting | entailment |
def export(self, directory):
"""Export exportable resources decoding as needed"""
if not self.existing or not hasattr(self, 'filename'):
return
secret_h = self.export_handle(directory)
obj = self.existing
if isinstance(obj, str):
secret_h.write(obj)
elif isinstance(obj, dict):
secret_h.write(yaml.safe_dump(obj)) | Export exportable resources decoding as needed | entailment |
def freeze(self, tmp_dir):
"""Copies a secret into a particular location"""
for sfile in self.secrets():
src_file = hard_path(sfile, self.opt.secrets)
if not os.path.exists(src_file):
raise aomi_excep.IceFile("%s secret not found at %s" %
(self, src_file))
dest_file = "%s/%s" % (tmp_dir, sfile)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir, 0o700)
shutil.copy(src_file, dest_file)
LOG.debug("Froze %s %s", self, sfile) | Copies a secret into a particular location | entailment |
def grok_state(self, obj):
"""Determine the desired state of this
resource based on data present"""
if 'state' in obj:
my_state = obj['state'].lower()
if my_state != 'absent' and my_state != 'present':
raise aomi_excep \
.Validation('state must be either "absent" or "present"')
self.present = obj.get('state', 'present').lower() == 'present' | Determine the desired state of this
resource based on data present | entailment |
def validate(self, obj):
"""Base validation method. Will inspect class attributes
to dermine just what should be present"""
if 'tags' in obj and not isinstance(obj['tags'], list):
raise aomi_excep.Validation('tags must be a list')
if self.present:
check_obj(self.required_fields, self.name(), obj) | Base validation method. Will inspect class attributes
to dermine just what should be present | entailment |
def diff(self, obj=None):
"""Determine if something has changed or not"""
if self.no_resource:
return NOOP
if not self.present:
if self.existing:
return DEL
return NOOP
if not obj:
obj = self.obj()
is_diff = NOOP
if self.present and self.existing:
if isinstance(self.existing, dict):
current = dict(self.existing)
if 'refresh_interval' in current:
del current['refresh_interval']
if diff_dict(current, obj):
is_diff = CHANGED
elif is_unicode(self.existing):
if self.existing != obj:
is_diff = CHANGED
elif self.present and not self.existing:
is_diff = ADD
return is_diff | Determine if something has changed or not | entailment |
def fetch(self, vault_client):
"""Populate internal representation of remote
Vault resource contents"""
result = self.read(vault_client)
if result:
if isinstance(result, dict) and 'data' in result:
self.existing = result['data']
else:
self.existing = result
else:
self.existing = None | Populate internal representation of remote
Vault resource contents | entailment |
def sync(self, vault_client):
"""Update remove Vault resource contents if needed"""
if self.present and not self.existing:
LOG.info("Writing new %s to %s",
self.secret_format, self)
self.write(vault_client)
elif self.present and self.existing:
if self.diff() == CHANGED or self.diff() == OVERWRITE:
LOG.info("Updating %s in %s",
self.secret_format, self)
self.write(vault_client)
elif not self.present and not self.existing:
LOG.info("No %s to remove from %s",
self.secret_format, self)
elif not self.present and self.existing:
LOG.info("Removing %s from %s",
self.secret_format, self)
self.delete(vault_client) | Update remove Vault resource contents if needed | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.