sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def precheck_ami_id(context): """ Is the AMI in service the same as the AMI marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed """ # get the current AMI key = "{}/{}".format(context.env, context.service_name) print_if_verbose("precheck_ami_id with key: {}".format(key)) current_ami = context.versionresolver.lookup("ami-id,{}".format(key)) print_if_verbose("ami found: {}".format(current_ami)) # If bootstrapping (this will be the first entry in the version history) # then we can't check it vs. running version if current_ami is None: print_if_verbose("precheck passed without check because current AMI is None") return True # Otherwise perform a consistency check # 1. get IDs of instances running the AMI - will find instances in all environments instances_running_ami = context.aws_client("ec2").describe_instances( Filters=[{ 'Name': 'image-id', 'Values': [current_ami] }] )["Reservations"] if instances_running_ami: instances_running_ami = [resv["Instances"][0]["InstanceId"] for resv in instances_running_ami] print_if_verbose("instances running ami {}:\n{}".format(current_ami, repr(instances_running_ami))) # 2. Get IDs of instances running as <context.env>-<context.service_name> env_service = "{}-{}".format(context.env, context.service_name) instances_running_as_env_service = context.aws_client("ec2").describe_instances( Filters=[{ 'Name': 'iam-instance-profile.arn', 'Values': ["arn:aws:iam::*:instance-profile/{}-{}".format(context.env, context.service_name)] }] )["Reservations"] if instances_running_as_env_service: instances_running_as_env_service = \ [resv["Instances"][0]["InstanceId"] for resv in instances_running_as_env_service] print_if_verbose("instances running as {}".format(env_service)) print_if_verbose(repr(instances_running_as_env_service)) # 3. Instances running as env-service should be a subset of instances running the AMI for instance_id in instances_running_as_env_service: if instance_id not in instances_running_ami: raise RuntimeError("Instance: {} not running expected ami: {}".format(instance_id, current_ami)) # Check passed - all is well return True
Is the AMI in service the same as the AMI marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed
entailment
def precheck_dist_hash(context): """ Is the dist in service the same as the dist marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed """ # get the current dist-hash key = "{}/{}/dist-hash".format(context.service_name, context.env) print_if_verbose("precheck_dist_hash with key: {}".format(key)) try: current_dist_hash = Version(context.aws_client("s3").get_object( Bucket=EFConfig.S3_VERSION_BUCKET, Key=key )) print_if_verbose("dist-hash found: {}".format(current_dist_hash.value)) except ClientError as error: if error.response["Error"]["Code"] == "NoSuchKey": # If bootstrapping (this will be the first entry in the version history) # then we can't check it vs. current version, thus we cannot get the key print_if_verbose("precheck passed without check because current dist-hash is None") return True else: fail("Exception while prechecking dist_hash for {} {}: {}".format(context.service_name, context.env, error)) # Otherwise perform a consistency check # 1. get dist version in service for environment try: response = urllib2.urlopen(current_dist_hash.location, None, 5) if response.getcode() != 200: raise IOError("Non-200 response " + str(response.getcode()) + " reading " + current_dist_hash.location) dist_hash_in_service = response.read().strip() except urllib2.URLError as error: raise IOError("URLError in http_get_dist_version: " + repr(error)) # 2. dist version in service should be the same as "current" dist version if dist_hash_in_service != current_dist_hash.value: raise RuntimeError("{} dist-hash in service: {} but expected dist-hash: {}" .format(key, dist_hash_in_service, current_dist_hash.value)) # Check passed - all is well return True
Is the dist in service the same as the dist marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed
entailment
def precheck(context): """ calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_' (e.g. "precheck_ami_id") Checking function should return True if OK, or raise RuntimeError w/ message if not Args: context: a populated EFVersionContext object Returns: True if the precheck passed, or if there was no precheck function for context.key Raises: RuntimeError if precheck failed, with explanatory message """ if context.noprecheck: return True func_name = "precheck_" + context.key.replace("-", "_") if func_name in globals() and isfunction(globals()[func_name]): return globals()[func_name](context) else: return True
calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_' (e.g. "precheck_ami_id") Checking function should return True if OK, or raise RuntimeError w/ message if not Args: context: a populated EFVersionContext object Returns: True if the precheck passed, or if there was no precheck function for context.key Raises: RuntimeError if precheck failed, with explanatory message
entailment
def get_versions(context, return_stable=False): """ Get all versions of a key Args: context: a populated EFVersionContext object return_stable: (default:False) If True, stop fetching if 'stable' version is found; return only that version Returns: json list of object data sorted in reverse by last_modified (newest version is first). Each item is a dict: { 'value': <value>, 'last_modified": <YYYY-MM-DDThh:mm:ssZ>, (ISO8601 date time string) 'modified_by': '<arn:aws:...>', 'version_id': '<version_id>', 'status': See EF_Config.S3_VERSION_STATUS_* for possible values } """ s3_key = "{}/{}/{}".format(context.service_name, context.env, context.key) object_version_list = context.aws_client("s3").list_object_versions( Bucket=EFConfig.S3_VERSION_BUCKET, Delimiter='/', MaxKeys=context.limit, Prefix=s3_key ) if "Versions" not in object_version_list: return [] object_versions = [] for version in object_version_list["Versions"]: object_version = Version(context.aws_client("s3").get_object( Bucket=EFConfig.S3_VERSION_BUCKET, Key=s3_key, VersionId=version["VersionId"] )) # Stop if a stable version was found and return_stable was set if return_stable and object_version.status == EFConfig.S3_VERSION_STATUS_STABLE: return [object_version] object_versions.append(object_version) # If caller is looking for a 'stable' version and we made it to here, a stable version was not found if return_stable: return [] else: return sorted(object_versions, key=lambda v: v.last_modified, reverse=True)
Get all versions of a key Args: context: a populated EFVersionContext object return_stable: (default:False) If True, stop fetching if 'stable' version is found; return only that version Returns: json list of object data sorted in reverse by last_modified (newest version is first). Each item is a dict: { 'value': <value>, 'last_modified": <YYYY-MM-DDThh:mm:ssZ>, (ISO8601 date time string) 'modified_by': '<arn:aws:...>', 'version_id': '<version_id>', 'status': See EF_Config.S3_VERSION_STATUS_* for possible values }
entailment
def get_version_by_value(context, value): """ Get the latest version that matches the provided ami-id Args: context: a populated EFVersionContext object value: the value of the version to look for """ versions = get_versions(context) for version in versions: if version.value == value: return version fail("Didn't find a matching version for: " "{}:{} in env/service: {}/{}".format( context.key, value, context.env, context.service_name))
Get the latest version that matches the provided ami-id Args: context: a populated EFVersionContext object value: the value of the version to look for
entailment
def cmd_rollback(context): """ Roll back by finding the most recent "stable" tagged version, and putting it again, so that it's the new "current" version. Args: context: a populated EFVersionContext object """ last_stable = get_versions(context, return_stable=True) if len(last_stable) != 1: fail("Didn't find a version marked stable for key: {} in env/service: {}/{}".format( context.key, context.env, context.service_name)) context.value = last_stable[0].value context.commit_hash = last_stable[0].commit_hash context.build_number = last_stable[0].build_number context.location = last_stable[0].location context.stable = True cmd_set(context)
Roll back by finding the most recent "stable" tagged version, and putting it again, so that it's the new "current" version. Args: context: a populated EFVersionContext object
entailment
def cmd_rollback_to(context): """ Roll back by finding a specific version in the history of the service and putting it as the new current version. Args: context: a populated EFVersionContext object """ version = get_version_by_value(context, context.rollback_to) context.value = version.value context.commit_hash = version.commit_hash context.build_number = version.build_number context.location = version.location context.stable = True cmd_set(context)
Roll back by finding a specific version in the history of the service and putting it as the new current version. Args: context: a populated EFVersionContext object
entailment
def cmd_set(context): """ Set the new "current" value for a key. If the existing current version and the new version have identical /value/ and /status, then nothing is written, to avoid stacking up redundant entreis in the version table. Args: context: a populated EFVersionContext object """ # If key value is a special symbol, see if this env allows it if context.value in EFConfig.SPECIAL_VERSIONS and context.env_short not in EFConfig.SPECIAL_VERSION_ENVS: fail("special version: {} not allowed in env: {}".format(context.value, context.env_short)) # If key value is a special symbol, the record cannot be marked "stable" if context.value in EFConfig.SPECIAL_VERSIONS and context.stable: fail("special versions such as: {} cannot be marked 'stable'".format(context.value)) # Resolve any references if context.value == "=prod": context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "prod", context.service_name)) elif context.value == "=staging": context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "staging", context.service_name)) elif context.value == "=latest": if not EFConfig.VERSION_KEYS[context.key]["allow_latest"]: fail("=latest cannot be used with key: {}".format(context.key)) func_name = "_getlatest_" + context.key.replace("-", "_") if func_name in globals() and isfunction(globals()[func_name]): context.value = globals()[func_name](context) else: raise RuntimeError("{} version for {}/{} is '=latest' but can't look up because method not found: {}".format( context.key, context.env, context.service_name, func_name)) # precheck to confirm coherent world state before attempting set - whatever that means for the current key type try: precheck(context) except Exception as e: fail("Precheck failed: {}".format(e.message)) s3_key = "{}/{}/{}".format(context.service_name, context.env, context.key) s3_version_status = EFConfig.S3_VERSION_STATUS_STABLE if context.stable else EFConfig.S3_VERSION_STATUS_UNDEFINED # If the set would put a value and status that are the same as the existing 'current' value/status, don't do it context.limit = 1 current_version = get_versions(context) # If there is no 'current version' it's ok, just means the set will write the first entry if len(current_version) == 1 and current_version[0].status == s3_version_status and \ current_version[0].value == context.value: print("Version not written because current version and new version have identical value and status: {} {}" .format(current_version[0].value, current_version[0].status)) return if not context.commit: print("=== DRY RUN ===\nUse --commit to set value\n=== DRY RUN ===") print("would set key: {} with value: {} {} {} {} {}".format( s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status)) else: context.aws_client("s3").put_object( ACL='bucket-owner-full-control', Body=context.value, Bucket=EFConfig.S3_VERSION_BUCKET, ContentEncoding=EFConfig.S3_VERSION_CONTENT_ENCODING, Key=s3_key, Metadata={ EFConfig.S3_VERSION_BUILDNUMBER_KEY: context.build_number, EFConfig.S3_VERSION_COMMITHASH_KEY: context.commit_hash, EFConfig.S3_VERSION_LOCATION_KEY: context.location, EFConfig.S3_VERSION_MODIFIEDBY_KEY: context.aws_client("sts").get_caller_identity()["Arn"], EFConfig.S3_VERSION_STATUS_KEY: s3_version_status }, StorageClass='STANDARD' ) print("set key: {} with value: {} {} {} {} {}".format( s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
Set the new "current" value for a key. If the existing current version and the new version have identical /value/ and /status, then nothing is written, to avoid stacking up redundant entreis in the version table. Args: context: a populated EFVersionContext object
entailment
def to_json(self): """ called by VersionEncoder.default() when doing json.dumps() on the object the json materializes in reverse order from the order used here """ return { "build_number": self._build_number, "commit_hash": self._commit_hash, "last_modified": self._last_modified, "location": self._location, "modified_by": self._modified_by, "status": self._status, "value": self._value, "version_id": self._version_id }
called by VersionEncoder.default() when doing json.dumps() on the object the json materializes in reverse order from the order used here
entailment
def learn(self, steps=1, **kwargs): """ Train the model using the environment and the agent. Note that the model might be shared between multiple agents (which most probably are of the same type) at the same time. :param steps: The number of steps to train for. """ # TODO add some housekeeping for i in range(steps): self.step(**kwargs)
Train the model using the environment and the agent. Note that the model might be shared between multiple agents (which most probably are of the same type) at the same time. :param steps: The number of steps to train for.
entailment
def get_value(self, symbol): """ Hierarchically searches for 'symbol' in the parameters blob if there is one (would have been retrieved by 'load()'). Order is: default, <env_short>, <env> Args: symbol: the key to resolve Returns: Hierarchically resolved value for 'symbol' in the environment set by the constructor, or None if a match is not found or there are no parameters """ default = "default" if not self.parameters: return None # Hierarchically lookup the value result = None if default in self.parameters and symbol in self.parameters[default]: result = self.parameters[default][symbol] if self.env_short in self.parameters and symbol in self.parameters[self.env_short]: result = self.parameters[self.env_short][symbol] # This lookup is redundant when env_short == env, but it's also cheap if self.env in self.parameters and symbol in self.parameters[self.env]: result = self.parameters[self.env][symbol] # Finally, convert any list of items into a single \n-delimited string if isinstance(result, list): result = "\n".join(result) return result
Hierarchically searches for 'symbol' in the parameters blob if there is one (would have been retrieved by 'load()'). Order is: default, <env_short>, <env> Args: symbol: the key to resolve Returns: Hierarchically resolved value for 'symbol' in the environment set by the constructor, or None if a match is not found or there are no parameters
entailment
def overlay_classification_on_image(classification, rgb_image, scale=1): """ Overlay a classification either 1 channel or 3 channels on an input image. :param classification: The classification tensor of shape [bach_size, v, u, 1] or [batch_size, v, u, 3]. The value range of the classification tensor is supposed to be 0 to 1. :param rgb_image: The input image of shape [batch_size, h, w, 3]. The input image value range is 0-255. And channel order is RGB. If you have BGR you can use image[..., ::-1] to make it RGB. :param scale: The scale with which to multiply the size of the image to achieve the normal size. :return: The merged image tensor. """ with tf.variable_scope("debug_overlay"): if not classification.get_shape()[3] in [1, 2, 3]: raise RuntimeError("The classification can either be of 1, 2 or 3 dimensions as last dimension, but shape is {}".format(classification.get_shape().as_list())) size = rgb_image.get_shape()[1:3] if classification.get_shape()[3] == 1: classification = tf.pad(classification, [[0, 0], [0, 0], [0, 0], [0, 2]], "CONSTANT") elif classification.get_shape()[3] == 2: classification = tf.pad(classification, [[0, 0], [0, 0], [0, 0], [0, 1]], "CONSTANT") casted_classification = tf.cast(classification, dtype=tf.float32) target_size = (int(classification.get_shape()[1] * scale), int(classification.get_shape()[2] * scale)) scaled_image = tf.image.resize_images(casted_classification, size=target_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) cropped_img = tf.image.crop_to_bounding_box(scaled_image, 0, 0, size[0], size[1]) return 0.5 * rgb_image + 0.5 * 255 * cropped_img
Overlay a classification either 1 channel or 3 channels on an input image. :param classification: The classification tensor of shape [bach_size, v, u, 1] or [batch_size, v, u, 3]. The value range of the classification tensor is supposed to be 0 to 1. :param rgb_image: The input image of shape [batch_size, h, w, 3]. The input image value range is 0-255. And channel order is RGB. If you have BGR you can use image[..., ::-1] to make it RGB. :param scale: The scale with which to multiply the size of the image to achieve the normal size. :return: The merged image tensor.
entailment
def inflate_to_one_hot(tensor, classes): """ Converts a tensor with index form to a one hot tensor. :param tensor: A tensor of shape [batch, h, w, 1] :param classes: The number of classes that exist. (length of one hot encoding) :return: A tensor of shape [batch, h, w, classes]. """ one_hot = tf.one_hot(tensor, classes) shape = one_hot.get_shape().as_list() return tf.reshape(one_hot, shape=[-1, shape[1], shape[2], shape[4]])
Converts a tensor with index form to a one hot tensor. :param tensor: A tensor of shape [batch, h, w, 1] :param classes: The number of classes that exist. (length of one hot encoding) :return: A tensor of shape [batch, h, w, classes].
entailment
def accountaliasofenv(self, lookup, default=None): """ Args: lookup: ENV_SHORT name of an env, such as: 'prod' or 'proto' default: the optional value to return if lookup failed; returns None if not set Returns: The account alias of the account that hosts the env named in lookupor default/None if no match found """ if lookup in EFConfig.ENV_ACCOUNT_MAP: return EFConfig.ENV_ACCOUNT_MAP[lookup] else: return None
Args: lookup: ENV_SHORT name of an env, such as: 'prod' or 'proto' default: the optional value to return if lookup failed; returns None if not set Returns: The account alias of the account that hosts the env named in lookupor default/None if no match found
entailment
def customdata(self, lookup, default=None): """ Args: lookup: the custom data file default: the optional value to return if lookup failed; returns None if not set Returns: The custom data returned from the file 'lookup' or default/None if no match found """ try: if lookup in EFConfig.CUSTOM_DATA: return EFConfig.CUSTOM_DATA[lookup] else: return default except AttributeError: return default
Args: lookup: the custom data file default: the optional value to return if lookup failed; returns None if not set Returns: The custom data returned from the file 'lookup' or default/None if no match found
entailment
def fail(message, exception_data=None): """ Print a failure message and exit nonzero """ print(message, file=sys.stderr) if exception_data: print(repr(exception_data)) sys.exit(1)
Print a failure message and exit nonzero
entailment
def http_get_metadata(metadata_path, timeout=__HTTP_DEFAULT_TIMEOUT_SEC): """ Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path> ARGS: metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id") RETURN: response content on success RAISE: URLError if there was a problem reading metadata """ metadata_path = __METADATA_PREFIX + metadata_path try: response = urllib2.urlopen(metadata_path, None, timeout) if response.getcode() != 200: raise IOError("Non-200 response " + str(response.getcode()) + " reading " + metadata_path) return response.read() except urllib2.URLError as error: raise IOError("URLError in http_get_metadata: " + repr(error))
Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path> ARGS: metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id") RETURN: response content on success RAISE: URLError if there was a problem reading metadata
entailment
def is_in_virtualbox(): """ Is the current environment a virtualbox instance? Returns a boolean Raises IOError if the necessary tooling isn't available """ if not isfile(__VIRT_WHAT) or not access(__VIRT_WHAT, X_OK): raise IOError("virt-what not available") try: return subprocess.check_output(["sudo", "-n", __VIRT_WHAT]).split('\n')[0:2] == __VIRT_WHAT_VIRTUALBOX_WITH_KVM except subprocess.CalledProcessError as e: raise IOError("virt-what failed execution with {}".format(e))
Is the current environment a virtualbox instance? Returns a boolean Raises IOError if the necessary tooling isn't available
entailment
def whereami(): """ Determine if this is an ec2 instance or "running locally" Returns: "ec2" - this is an ec2 instance "virtualbox-kvm" - kernel VM (virtualbox with vagrant) "local" - running locally and not in a known VM "unknown" - I have no idea where I am """ # If the metadata endpoint responds, this is an EC2 instance # If it doesn't, we can safely say this isn't EC2 and try the other options try: response = http_get_metadata("instance-id", 1) if response[:2] == "i-": return "ec2" except: pass # Virtualbox? try: if is_in_virtualbox(): return "virtualbox-kvm" except: pass # Outside virtualbox/vagrant but not in aws; hostname is "<name>.local" hostname = gethostname() if re.findall(r"\.local$", hostname): return "local" # we have no idea where we are return "unknown"
Determine if this is an ec2 instance or "running locally" Returns: "ec2" - this is an ec2 instance "virtualbox-kvm" - kernel VM (virtualbox with vagrant) "local" - running locally and not in a known VM "unknown" - I have no idea where I am
entailment
def http_get_instance_env(): """ Returns: just the env this ec2 instance is in. Doesn't require API access like get_instance_aws_context does Example return value: "staging" """ try: info = json.loads(http_get_metadata('iam/info')) except Exception as error: raise IOError("Error looking up metadata:iam/info: " + repr(error)) return info["InstanceProfileArn"].split(":")[5].split("/")[1].split("-",1)[0]
Returns: just the env this ec2 instance is in. Doesn't require API access like get_instance_aws_context does Example return value: "staging"
entailment
def get_instance_aws_context(ec2_client): """ Returns: a dictionary of aws context dictionary will contain these entries: region, instance_id, account, role, env, env_short, service Raises: IOError if couldn't read metadata or lookup attempt failed """ result = {} try: result["region"] = http_get_metadata("placement/availability-zone/") result["region"] = result["region"][:-1] result["instance_id"] = http_get_metadata('instance-id') except IOError as error: raise IOError("Error looking up metadata:availability-zone or instance-id: " + repr(error)) try: instance_desc = ec2_client.describe_instances(InstanceIds=[result["instance_id"]]) except Exception as error: raise IOError("Error calling describe_instances: " + repr(error)) result["account"] = instance_desc["Reservations"][0]["OwnerId"] arn = instance_desc["Reservations"][0]["Instances"][0]["IamInstanceProfile"]["Arn"] result["role"] = arn.split(":")[5].split("/")[1] env = re.search("^(" + EFConfig.VALID_ENV_REGEX + ")-", result["role"]) if not env: raise IOError("Did not find environment in role name: " + result["role"]) result["env"] = env.group(1) result["env_short"] = result["env"].strip(".0123456789") result["service"] = "-".join(result["role"].split("-")[1:]) return result
Returns: a dictionary of aws context dictionary will contain these entries: region, instance_id, account, role, env, env_short, service Raises: IOError if couldn't read metadata or lookup attempt failed
entailment
def pull_repo(): """ Pulls latest version of EF_REPO_BRANCH from EF_REPO (as set in ef_config.py) if client is in EF_REPO and on the branch EF_REPO_BRANCH Raises: RuntimeError with message if not in the correct repo on the correct branch """ try: current_repo = subprocess.check_output(["git", "remote", "-v", "show"]) except subprocess.CalledProcessError as error: raise RuntimeError("Exception checking current repo", error) current_repo = re.findall("(https://|@)(.*?)(.git|[ ])", current_repo)[0][1].replace(":", "/") if current_repo != EFConfig.EF_REPO: raise RuntimeError("Must be in " + EFConfig.EF_REPO + " repo. Current repo is: " + current_repo) try: current_branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).rstrip() except subprocess.CalledProcessError as error: raise RuntimeError("Exception checking current branch: " + repr(error)) if current_branch != EFConfig.EF_REPO_BRANCH: raise RuntimeError("Must be on branch: " + EFConfig.EF_REPO_BRANCH + ". Current branch is: " + current_branch) try: subprocess.check_call(["git", "pull", "-q", "origin", EFConfig.EF_REPO_BRANCH]) except subprocess.CalledProcessError as error: raise RuntimeError("Exception running 'git pull': " + repr(error))
Pulls latest version of EF_REPO_BRANCH from EF_REPO (as set in ef_config.py) if client is in EF_REPO and on the branch EF_REPO_BRANCH Raises: RuntimeError with message if not in the correct repo on the correct branch
entailment
def create_aws_clients(region, profile, *clients): """ Create boto3 clients for one or more AWS services. These are the services used within the libs: cloudformation, cloudfront, ec2, iam, lambda, route53, waf Args: region: the region in which to create clients that are region-specific (all but IAM) profile: Name of profile (in .aws/credentials). Pass the value None if using instance credentials on EC2 or Lambda clients: names of the clients to create (lowercase, must match what boto3 expects) Returns: A dictionary of <key>,<value> pairs for several AWS services, using the labels above as keys, e.g.: { "cloudfront": <cloudfront_client>, ... } Dictionary contains an extra record, "SESSION" - pointing to the session that created the clients """ if not profile: profile = None client_key = (region, profile) aws_clients = client_cache.get(client_key, {}) requested_clients = set(clients) new_clients = requested_clients.difference(aws_clients) if not new_clients: return aws_clients session = aws_clients.get("SESSION") try: if not session: session = boto3.Session(region_name=region, profile_name=profile) aws_clients["SESSION"] = session # build clients client_dict = {c: session.client(c) for c in new_clients} # append the session itself in case it's needed by the client code - can't get it from the clients themselves aws_clients.update(client_dict) # add the created clients to the cache client_cache[client_key] = aws_clients return aws_clients except ClientError as error: raise RuntimeError("Exception logging in with Session() and creating clients", error)
Create boto3 clients for one or more AWS services. These are the services used within the libs: cloudformation, cloudfront, ec2, iam, lambda, route53, waf Args: region: the region in which to create clients that are region-specific (all but IAM) profile: Name of profile (in .aws/credentials). Pass the value None if using instance credentials on EC2 or Lambda clients: names of the clients to create (lowercase, must match what boto3 expects) Returns: A dictionary of <key>,<value> pairs for several AWS services, using the labels above as keys, e.g.: { "cloudfront": <cloudfront_client>, ... } Dictionary contains an extra record, "SESSION" - pointing to the session that created the clients
entailment
def get_account_alias(env): """ Given an env, return <account_alias> if env is valid Args: env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>" Returns: the alias of the AWS account that holds the env Raises: ValueError if env is misformatted or doesn't name a known environment """ env_valid(env) # Env is a global env of the form "env.<account_alias>" (e.g. "mgmt.<account_alias>") if env.find(".") > -1: base, ext = env.split(".") return ext # Ordinary env, possibly a proto env ending with a digit that is stripped to look up the alias else: env_short = env.strip(".0123456789") if env_short not in EFConfig.ENV_ACCOUNT_MAP: raise ValueError("generic env: {} has no entry in ENV_ACCOUNT_MAP of ef_site_config.py".format(env_short)) return EFConfig.ENV_ACCOUNT_MAP[env_short]
Given an env, return <account_alias> if env is valid Args: env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>" Returns: the alias of the AWS account that holds the env Raises: ValueError if env is misformatted or doesn't name a known environment
entailment
def get_env_short(env): """ Given an env, return <env_short> if env is valid Args: env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>" Returns: the shortname of the env, such as "prod", "staging", "proto", "mgmt" Raises: ValueError if env is misformatted or doesn't name a known environment """ env_valid(env) if env.find(".") > -1: env_short, ext = env.split(".") else: env_short = env.strip(".0123456789") return env_short
Given an env, return <env_short> if env is valid Args: env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>" Returns: the shortname of the env, such as "prod", "staging", "proto", "mgmt" Raises: ValueError if env is misformatted or doesn't name a known environment
entailment
def env_valid(env): """ Given an env, determine if it's valid Args: env: the env to check Returns: True if the env is valid Raises: ValueError with message if the env is not valid """ if env not in EFConfig.ENV_LIST: raise ValueError("unknown env: {}; env must be one of: ".format(env) + ", ".join(EFConfig.ENV_LIST)) return True
Given an env, determine if it's valid Args: env: the env to check Returns: True if the env is valid Raises: ValueError with message if the env is not valid
entailment
def global_env_valid(env): """ Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig Args: env: the env to check Returns: True if the env is a valid global env in EFConfig Raises: ValueError with message if the env is not valid """ if env not in EFConfig.ACCOUNT_SCOPED_ENVS: raise ValueError("Invalid global env: {}; global envs are: {}".format(env, EFConfig.ACCOUNT_SCOPED_ENVS)) return True
Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig Args: env: the env to check Returns: True if the env is a valid global env in EFConfig Raises: ValueError with message if the env is not valid
entailment
def kms_encrypt(kms_client, service, env, secret): """ Encrypt string for use by a given service/environment Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. service (string): name of the service that the secret is being encrypted for. env (string): environment that the secret is being encrypted for. secret (string): value to be encrypted Returns: a populated EFPWContext object Raises: SystemExit(1): If there is an error with the boto3 encryption call (ex. missing kms key) """ # Converting all periods to underscores because they are invalid in KMS alias names key_alias = '{}-{}'.format(env, service.replace('.', '_')) try: response = kms_client.encrypt( KeyId='alias/{}'.format(key_alias), Plaintext=secret.encode() ) except ClientError as error: if error.response['Error']['Code'] == "NotFoundException": fail("Key '{}' not found. You may need to run ef-generate for this environment.".format(key_alias), error) else: fail("boto3 exception occurred while performing kms encrypt operation.", error) encrypted_secret = base64.b64encode(response['CiphertextBlob']) return encrypted_secret
Encrypt string for use by a given service/environment Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. service (string): name of the service that the secret is being encrypted for. env (string): environment that the secret is being encrypted for. secret (string): value to be encrypted Returns: a populated EFPWContext object Raises: SystemExit(1): If there is an error with the boto3 encryption call (ex. missing kms key)
entailment
def kms_decrypt(kms_client, secret): """ Decrypt kms-encrypted string Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. secret (string): base64 encoded value to be decrypted Returns: a populated EFPWContext object Raises: SystemExit(1): If there is an error with the boto3 decryption call (ex. malformed secret) """ try: decrypted_secret = kms_client.decrypt(CiphertextBlob=base64.b64decode(secret))['Plaintext'] except TypeError: fail("Malformed base64 string data") except ClientError as error: if error.response["Error"]["Code"] == "InvalidCiphertextException": fail("The decrypt request was rejected because the specified ciphertext \ has been corrupted or is otherwise invalid.", error) elif error.response["Error"]["Code"] == "NotFoundException": fail("The decrypt request was rejected because the specified entity or resource could not be found.", error) else: fail("boto3 exception occurred while performing kms decrypt operation.", error) return decrypted_secret
Decrypt kms-encrypted string Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. secret (string): base64 encoded value to be decrypted Returns: a populated EFPWContext object Raises: SystemExit(1): If there is an error with the boto3 decryption call (ex. malformed secret)
entailment
def kms_key_arn(kms_client, alias): """ Obtain the full key arn based on the key alias provided Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. alias (string): alias of key, example alias/proto0-evs-drm. Returns: string of the full key arn """ try: response = kms_client.describe_key(KeyId=alias) key_arn = response["KeyMetadata"]["Arn"] except ClientError as error: raise RuntimeError("Failed to obtain key arn for alias {}, error: {}".format(alias, error.response["Error"]["Message"])) return key_arn
Obtain the full key arn based on the key alias provided Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. alias (string): alias of key, example alias/proto0-evs-drm. Returns: string of the full key arn
entailment
def get_template_parameters_file(template_full_path): """ Checks for existance of parameters file against supported suffixes and returns parameters file path if found Args: template_full_path: full filepath for template file Returns: filename of parameters file if it exists """ for suffix in EFConfig.PARAMETER_FILE_SUFFIXES: parameters_file = template_full_path.replace("/templates", "/parameters") + suffix if exists(parameters_file): return parameters_file else: continue return None
Checks for existance of parameters file against supported suffixes and returns parameters file path if found Args: template_full_path: full filepath for template file Returns: filename of parameters file if it exists
entailment
def get_template_parameters_s3(template_key, s3_resource): """ Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found Args: template_key: S3 key for template file. omit bucket. s3_resource: a boto3 s3 resource Returns: filename of parameters file if it exists """ for suffix in EFConfig.PARAMETER_FILE_SUFFIXES: parameters_key = template_key.replace("/templates", "/parameters") + suffix try: obj = s3_resource.Object(EFConfig.S3_CONFIG_BUCKET, parameters_key) obj.get() return parameters_key except ClientError: continue return None
Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found Args: template_key: S3 key for template file. omit bucket. s3_resource: a boto3 s3 resource Returns: filename of parameters file if it exists
entailment
def get_autoscaling_group_properties(asg_client, env, service): """ Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists """ try: # See if {{ENV}}-{{SERVICE}} matches ASG name response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)]) if len(response["AutoScalingGroups"]) == 0: # See if {{ENV}}-{{SERVICE}} matches ASG tag name response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}]) if len(response["Tags"]) == 0: # Query does not match either of the above, return None return None else: asg_name = response["Tags"][0]["ResourceId"] response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) return response["AutoScalingGroups"] else: return response["AutoScalingGroups"] except ClientError as error: raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error)
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists
entailment
def get_default_config(gpu_memory_usage=0.75, allow_growth=False): """ A helper to create sessions easily. :param gpu_memory_usage: How much of the gpu should be used for your project. :param allow_growth: If you want to have a fixed gpus size or if it should grow and use just as much as it needs. :return: A configuration you can pass to your session when creating it. """ config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_usage config.gpu_options.allow_growth = allow_growth return config
A helper to create sessions easily. :param gpu_memory_usage: How much of the gpu should be used for your project. :param allow_growth: If you want to have a fixed gpus size or if it should grow and use just as much as it needs. :return: A configuration you can pass to your session when creating it.
entailment
def export_graph(checkpoint_path, output_nodes): """ Export a graph stored in a checkpoint as a *.pb file. :param checkpoint_path: The checkpoint path which should be frozen. :param output_nodes: The output nodes you care about as a list of strings (their names). :return: """ if not tf.gfile.Exists(checkpoint_path): raise AssertionError( "Export directory doesn't exists. Please specify an export " "directory: %s" % checkpoint_path) if not output_nodes: print("You need to supply the name of a node to --output_node_names.") return -1 # We retrieve our checkpoint fullpath checkpoint = tf.train.get_checkpoint_state(checkpoint_path) input_checkpoint = checkpoint.model_checkpoint_path # We precise the file fullname of our freezed graph output_graph = checkpoint_path + "/frozen_model.pb" # We clear devices to allow TensorFlow to control on which device it will load operations clear_devices = True # We start a session using a temporary fresh Graph with tf.Session(graph=tf.Graph()) as sess: # We import the meta graph in the current default Graph saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) # We restore the weights saver.restore(sess, input_checkpoint) # We use a built-in TF helper to export variables to constants output_graph_def = tf.graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes output_nodes # The output node names are used to select the useful nodes ) # Finally we serialize and dump the output graph to the filesystem with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) return output_graph_def
Export a graph stored in a checkpoint as a *.pb file. :param checkpoint_path: The checkpoint path which should be frozen. :param output_nodes: The output nodes you care about as a list of strings (their names). :return:
entailment
def load_graph(frozen_graph_filename, namespace_prefix="", placeholders=None): """ Loads a frozen graph from a *.pb file. :param frozen_graph_filename: The file which graph to load. :param namespace_prefix: A namespace for your graph to live in. This is useful when having multiple models. :param placeholders: A dict containing the new placeholders that replace the old inputs. :return: The graph that can now be passed to a session when creating it. """ # Load graph def from protobuff and import the definition with tf.gfile.GFile(frozen_graph_filename, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) if placeholders is None: with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, name=namespace_prefix) else: with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, input_map=placeholders, name=namespace_prefix) return graph
Loads a frozen graph from a *.pb file. :param frozen_graph_filename: The file which graph to load. :param namespace_prefix: A namespace for your graph to live in. This is useful when having multiple models. :param placeholders: A dict containing the new placeholders that replace the old inputs. :return: The graph that can now be passed to a session when creating it.
entailment
def _elbv2_load_balancer(self, lookup): """ Args: lookup: the friendly name of the V2 elb to look up Returns: A dict with the load balancer description Raises: botocore.exceptions.ClientError: no such load-balancer """ client = EFAwsResolver.__CLIENTS['elbv2'] elbs = client.describe_load_balancers(Names=[lookup]) # getting the first one, since we requested only one lb elb = elbs['LoadBalancers'][0] return elb
Args: lookup: the friendly name of the V2 elb to look up Returns: A dict with the load balancer description Raises: botocore.exceptions.ClientError: no such load-balancer
entailment
def acm_certificate_arn(self, lookup, default=None): """ Args: lookup: region/domain on the certificate to be looked up default: the optional value to return if lookup failed; returns None if not set Returns: ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match If more than one "Issued" certificate matches the region/domain: - if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp - if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate - certificates issued by Amazon take precedence over certificates not issued by Amazon """ # @todo: Only searches the first 100 certificates in the account try: # This a region-specific client, so we'll make a new client in the right place using existing SESSION region_name, domain_name = lookup.split("/") acm_client = EFAwsResolver.__CLIENTS["SESSION"].client(service_name="acm", region_name=region_name) response = acm_client.list_certificates( CertificateStatuses=['ISSUED'], MaxItems=100 ) except Exception: return default # No certificates if len(response["CertificateSummaryList"]) < 1: return default # One or more certificates - find cert with latest IssuedAt date or an arbitrary cert if none are dated best_match_cert = None for cert_handle in response["CertificateSummaryList"]: if cert_handle["DomainName"] == domain_name: cert = acm_client.describe_certificate(CertificateArn=cert_handle["CertificateArn"])["Certificate"] # Patch up cert if there is no IssuedAt (i.e. cert was not issued by Amazon) if not cert.has_key("IssuedAt"): cert[u"IssuedAt"] = datetime.datetime(1970, 1, 1, 0, 0) if best_match_cert is None: best_match_cert = cert elif cert["IssuedAt"] > best_match_cert["IssuedAt"]: best_match_cert = cert if best_match_cert is not None: return best_match_cert["CertificateArn"] return default
Args: lookup: region/domain on the certificate to be looked up default: the optional value to return if lookup failed; returns None if not set Returns: ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match If more than one "Issued" certificate matches the region/domain: - if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp - if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate - certificates issued by Amazon take precedence over certificates not issued by Amazon
entailment
def ec2_elasticip_elasticip_id(self, lookup, default=None): """ Args: lookup: the CloudFormation resource name of the Elastic IP ID to look up default: the optional value to return if lookup failed; returns None if not set Returns: The ID of the first Elastic IP found with a description matching 'lookup' or default/None if no match found """ public_ip = self.ec2_elasticip_elasticip_ipaddress(lookup) if public_ip is None: return default try: eips = EFAwsResolver.__CLIENTS["ec2"].describe_addresses( PublicIps=[public_ip] ) # Public IP not found except ClientError: return default eip_id = eips["Addresses"][0]["AllocationId"] return eip_id
Args: lookup: the CloudFormation resource name of the Elastic IP ID to look up default: the optional value to return if lookup failed; returns None if not set Returns: The ID of the first Elastic IP found with a description matching 'lookup' or default/None if no match found
entailment
def ec2_elasticip_elasticip_ipaddress(self, lookup, default=None): """ Args: lookup: the CloudFormation resource name of the Elastic IP address to look up default: the optional value to return if lookup failed; returns None if not set Returns: The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match """ # Extract environment from resource ID to build stack name m = re.search('ElasticIp([A-Z]?[a-z]+[0-9]?)\w+', lookup) # The lookup string was not a valid ElasticIp resource label if m is None: return default env = m.group(1) stackname = "{}-elasticip".format(env.lower()) # Convert env substring to title in case {{ENV}} substitution is being used lookup = lookup.replace(env, env.title()) # Look up the EIP resource in the stack to get the IP address assigned to the EIP try: eip_stack = EFAwsResolver.__CLIENTS["cloudformation"].describe_stack_resources( StackName=stackname, LogicalResourceId=lookup ) except ClientError: return default stack_resources = eip_stack["StackResources"] # Resource does not exist in stack if len(stack_resources) < 1: return default eip_publicip = stack_resources[0]["PhysicalResourceId"] return eip_publicip
Args: lookup: the CloudFormation resource name of the Elastic IP address to look up default: the optional value to return if lookup failed; returns None if not set Returns: The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match
entailment
def ec2_eni_eni_id(self, lookup, default=None): """ Args: lookup: the description of the Elastic Network Interface (ENI) to look up default: the optional value to return if lookup failed; returns None if not set Returns: The ID of the first ENI found with a description matching 'lookup' or default/None if no match found """ enis = EFAwsResolver.__CLIENTS["ec2"].describe_network_interfaces(Filters=[{ 'Name': 'description', 'Values': [lookup] }]) if len(enis.get("NetworkInterfaces")) > 0: return enis["NetworkInterfaces"][0]["NetworkInterfaceId"] else: return default
Args: lookup: the description of the Elastic Network Interface (ENI) to look up default: the optional value to return if lookup failed; returns None if not set Returns: The ID of the first ENI found with a description matching 'lookup' or default/None if no match found
entailment
def ec2_network_network_acl_id(self, lookup, default=None): """ Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found """ network_acl_id = EFAwsResolver.__CLIENTS["ec2"].describe_network_acls(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(network_acl_id["NetworkAcls"]) > 0: return network_acl_id["NetworkAcls"][0]["NetworkAclId"] else: return default
Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found
entailment
def ec2_security_group_security_group_id(self, lookup, default=None): """ Args: lookup: the friendly name of a security group to look up default: the optional value to return if lookup failed; returns None if not set Returns: Security group ID if target found or default/None if no match """ try: response = EFAwsResolver.__CLIENTS["ec2"].describe_security_groups(Filters=[{ 'Name':'group-name', 'Values':[lookup] }]) except: return default if len(response["SecurityGroups"]) > 0: return response["SecurityGroups"][0]["GroupId"] else: return default
Args: lookup: the friendly name of a security group to look up default: the optional value to return if lookup failed; returns None if not set Returns: Security group ID if target found or default/None if no match
entailment
def ec2_subnet_subnet_id(self, lookup, default=None): """ Return: the ID of a single subnet or default/None if no match Args: lookup: the friendly name of the subnet to look up (subnet-<env>-a or subnet-<env>-b) default: the optional value to return if lookup failed; returns None if not set """ subnets = EFAwsResolver.__CLIENTS["ec2"].describe_subnets(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(subnets["Subnets"]) > 0: return subnets["Subnets"][0]["SubnetId"] else: return default
Return: the ID of a single subnet or default/None if no match Args: lookup: the friendly name of the subnet to look up (subnet-<env>-a or subnet-<env>-b) default: the optional value to return if lookup failed; returns None if not set
entailment
def ec2_vpc_availabilityzones(self, lookup, default=None): """ Args: lookup: the friendly name of a VPC to look up default: the optional value to return if lookup failed; returns None if not set Returns: A comma-separated list of availability zones in use in the named VPC or default/None if no match """ vpc_id = self.ec2_vpc_vpc_id(lookup) if vpc_id is None: return default subnets = EFAwsResolver.__CLIENTS["ec2"].describe_subnets(Filters=[{ 'Name': 'vpc-id', 'Values': [vpc_id] }]) if len(subnets["Subnets"]) > 0: # Strip the metadata section (subnets["Subnets"]) az_list = [s["AvailabilityZone"] for s in subnets["Subnets"]] # Add internal ", " only. This is called literally from: "{{aws...}}" - CF template needs the outer quotes return "\", \"".join(az_list) else: return default
Args: lookup: the friendly name of a VPC to look up default: the optional value to return if lookup failed; returns None if not set Returns: A comma-separated list of availability zones in use in the named VPC or default/None if no match
entailment
def ec2_vpc_subnets(self, lookup, default=None): """ Args: lookup - the friendly name of the VPC whose subnets we want Returns: A comma-separated list of all subnets in use in the named VPC or default/None if no match found """ vpc_id = self.ec2_vpc_vpc_id(lookup) if vpc_id is None: return default subnets = EFAwsResolver.__CLIENTS["ec2"].describe_subnets(Filters=[{ 'Name': 'vpc-id', 'Values': [vpc_id] }]) if len(subnets["Subnets"]) > 0: # Strip the metadata section (subnets["Subnets"]) subnet_list = [s["SubnetId"] for s in subnets["Subnets"]] # Add internal ", " only. This is called literally from: "{{aws...}}" - reuses the outer quotes return "\", \"".join(subnet_list) else: return default
Args: lookup - the friendly name of the VPC whose subnets we want Returns: A comma-separated list of all subnets in use in the named VPC or default/None if no match found
entailment
def ec2_vpc_cidrblock(self, lookup, default=None): """ Args: lookup - the friendly name of the VPC whose CIDR block we want Returns: The CIDR block of the named VPC, or default/None if no match found """ vpcs = EFAwsResolver.__CLIENTS["ec2"].describe_vpcs(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(vpcs.get("Vpcs")) > 0: return vpcs["Vpcs"][0]["CidrBlock"] else: return default
Args: lookup - the friendly name of the VPC whose CIDR block we want Returns: The CIDR block of the named VPC, or default/None if no match found
entailment
def elbv2_load_balancer_hosted_zone(self, lookup, default=None): """ Args: lookup: the friendly name of the V2 elb to look up default: value to return in case of no match Returns: The hosted zone ID of the ELB found with a name matching 'lookup'. """ try: elb = self._elbv2_load_balancer(lookup) return elb['CanonicalHostedZoneId'] except ClientError: return default
Args: lookup: the friendly name of the V2 elb to look up default: value to return in case of no match Returns: The hosted zone ID of the ELB found with a name matching 'lookup'.
entailment
def elbv2_load_balancer_dns_name(self, lookup, default=None): """ Args: lookup: the friendly name of the V2 elb to look up default: value to return in case of no match Returns: The hosted zone ID of the ELB found with a name matching 'lookup'. """ try: elb = self._elbv2_load_balancer(lookup) return elb['DNSName'] except ClientError: return default
Args: lookup: the friendly name of the V2 elb to look up default: value to return in case of no match Returns: The hosted zone ID of the ELB found with a name matching 'lookup'.
entailment
def elbv2_load_balancer_arn_suffix(self, lookup, default=None): """ Args: lookup: the friendly name of the v2 elb to look up default: value to return in case of no match Returns: The shorthand fragment of the ALB's ARN, of the form `app/*/*` """ try: elb = self._elbv2_load_balancer(lookup) m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn']) return m.group(1) except ClientError: return default
Args: lookup: the friendly name of the v2 elb to look up default: value to return in case of no match Returns: The shorthand fragment of the ALB's ARN, of the form `app/*/*`
entailment
def elbv2_target_group_arn_suffix(self, lookup, default=None): """ Args: lookup: the friendly name of the v2 elb target group default: value to return in case of no match Returns: The shorthand fragment of the target group's ARN, of the form `targetgroup/*/*` """ try: client = EFAwsResolver.__CLIENTS['elbv2'] elbs = client.describe_target_groups(Names=[lookup]) elb = elbs['TargetGroups'][0] m = re.search(r'.+?(targetgroup\/[^\/]+\/[^\/]+)$', elb['TargetGroupArn']) return m.group(1) except ClientError: return default
Args: lookup: the friendly name of the v2 elb target group default: value to return in case of no match Returns: The shorthand fragment of the target group's ARN, of the form `targetgroup/*/*`
entailment
def waf_rule_id(self, lookup, default=None): """ Args: lookup: the friendly name of a WAF rule default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the WAF rule whose name matches 'lookup' or default/None if no match found """ # list_rules returns at most 100 rules per request list_limit = 100 rules = EFAwsResolver.__CLIENTS["waf"].list_rules(Limit=list_limit) while True: for rule in rules["Rules"]: if rule["Name"] == lookup: return rule["RuleId"] if rules.has_key("NextMarker"): rules = EFAwsResolver.__CLIENTS["waf"].list_rules(Limit=list_limit, NextMarker=rules["NextMarker"]) else: return default
Args: lookup: the friendly name of a WAF rule default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the WAF rule whose name matches 'lookup' or default/None if no match found
entailment
def waf_web_acl_id(self, lookup, default=None): """ Args: lookup: the friendly name of a Web ACL default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the WAF Web ACL whose name matches rule_name or default/None if no match found """ # list_rules returns at most 100 rules per request list_limit = 100 acls = EFAwsResolver.__CLIENTS["waf"].list_web_acls(Limit=list_limit) while True: for acl in acls["WebACLs"]: if acl["Name"] == lookup: return acl["WebACLId"] if acls.has_key("NextMarker"): acls = EFAwsResolver.__CLIENTS["waf"].list_web_acls(Limit=list_limit, NextMarker=acls["NextMarker"]) else: return default
Args: lookup: the friendly name of a Web ACL default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the WAF Web ACL whose name matches rule_name or default/None if no match found
entailment
def route53_public_hosted_zone_id(self, lookup, default=None): """ Args: lookup: The zone name to look up. Must end with "." default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the public hosted zone for the 'lookup' domain, or default/None if no match found """ list_limit = "100" # enforce terminal '.' in name, otherwise we could get a partial match of the incorrect zones if lookup[-1] != '.': return default hosted_zones = EFAwsResolver.__CLIENTS["route53"].list_hosted_zones_by_name(DNSName=lookup, MaxItems=list_limit) # Return if the account has no HostedZones if not hosted_zones.has_key("HostedZones"): return default while True: for hosted_zone in hosted_zones["HostedZones"]: if lookup == hosted_zone["Name"] and not hosted_zone["Config"]["PrivateZone"]: return hosted_zone["Id"].split("/")[2] if hosted_zones["IsTruncated"]: hosted_zones = EFAwsResolver.__CLIENTS["route53"].list_hosted_zones_by_name( DNSName=hosted_zones["NextDNSName"], HostedZoneId=hosted_zones["NextHostedZoneId"], MaxItems=list_limit) else: return default
Args: lookup: The zone name to look up. Must end with "." default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the public hosted zone for the 'lookup' domain, or default/None if no match found
entailment
def ec2_route_table_main_route_table_id(self, lookup, default=None): """ Args: lookup: the friendly name of the VPC whose main route table we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the main route table of the named VPC, or default if no match/multiple matches found """ vpc_id = self.ec2_vpc_vpc_id(lookup) if vpc_id is None: return default route_table = EFAwsResolver.__CLIENTS["ec2"].describe_route_tables(Filters=[ {'Name': 'vpc-id', 'Values': [vpc_id]}, {'Name': 'association.main', 'Values': ['true']} ]) if len(route_table["RouteTables"]) is not 1: return default return route_table["RouteTables"][0]["RouteTableId"]
Args: lookup: the friendly name of the VPC whose main route table we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the main route table of the named VPC, or default if no match/multiple matches found
entailment
def ec2_route_table_tagged_route_table_id(self, lookup, default=None): """ Args: lookup: the tagged route table name, should be unique default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the route table, or default if no match/multiple matches found """ route_table = EFAwsResolver.__CLIENTS["ec2"].describe_route_tables(Filters=[ {'Name': 'tag-key', 'Values': ['Name']}, {'Name': 'tag-value', 'Values': [lookup]} ]) if len(route_table["RouteTables"]) is not 1: return default return route_table["RouteTables"][0]["RouteTableId"]
Args: lookup: the tagged route table name, should be unique default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the route table, or default if no match/multiple matches found
entailment
def cloudfront_domain_name(self, lookup, default=None): """ Args: lookup: any CNAME on the Cloudfront distribution default: the optional value to return if lookup failed; returns None if not set Returns: The domain name (FQDN) of the Cloudfront distrinbution, or default/None if no match """ # list_distributions returns at most 100 distributions per request list_limit = "100" distributions = EFAwsResolver.__CLIENTS["cloudfront"].list_distributions(MaxItems=list_limit)["DistributionList"] # Return if the account has no Distributions if not distributions.has_key("Items"): return default while True: for distribution in distributions["Items"]: if lookup in distribution["Aliases"]["Items"]: return distribution["DomainName"] if distributions["IsTruncated"]: distributions = EFAwsResolver.__CLIENTS["cloudfront"].list_distributions( MaxItems=list_limit, Marker=distributions["NextMarker"])["DistributionList"] else: return default
Args: lookup: any CNAME on the Cloudfront distribution default: the optional value to return if lookup failed; returns None if not set Returns: The domain name (FQDN) of the Cloudfront distrinbution, or default/None if no match
entailment
def cloudfront_origin_access_identity_oai_canonical_user_id(self, lookup, default=None): """ Args: lookup: the FQDN of the Origin Access Identity (from its comments) default: the optional value to return if lookup failed; returns None if not set Returns: the S3 Canonical User ID of the OAI associated with the named FQDN in 'lookup', or default/None if no match """ # list_cloud_front_origin_access_identities returns at most 100 oai's per request list_limit = "100" oais = EFAwsResolver.__CLIENTS["cloudfront"].list_cloud_front_origin_access_identities( MaxItems=list_limit)["CloudFrontOriginAccessIdentityList"] # Return if the account has no OriginAccessIdentities if not oais.has_key("Items"): return default while True: for oai in oais["Items"]: if oai["Comment"] == lookup: return oai["S3CanonicalUserId"] if oais["IsTruncated"]: oais = EFAwsResolver.__CLIENTS["cloudfront"].list_cloud_front_origin_access_identities( MaxItems=list_limit, Marker=oais["NextMarker"])["CloudFrontOriginAccessIdentityList"] else: return default
Args: lookup: the FQDN of the Origin Access Identity (from its comments) default: the optional value to return if lookup failed; returns None if not set Returns: the S3 Canonical User ID of the OAI associated with the named FQDN in 'lookup', or default/None if no match
entailment
def cognito_identity_identity_pool_arn(self, lookup, default=None): """ Args: lookup: Cognito Federated Identity name, proto0-cms-identity-pool default: the optional value to return if lookup failed; returns None if not set Returns: the constructed ARN for the cognito identity pool, else default/None """ identity_pool_id = self.cognito_identity_identity_pool_id(lookup, default) if identity_pool_id == default: return default # The ARN has to be constructed because there is no boto3 call that returns the full ARN for a cognito identity pool return "arn:aws:cognito-identity:{{{{REGION}}}}:{{{{ACCOUNT}}}}:identitypool/{}".format(identity_pool_id)
Args: lookup: Cognito Federated Identity name, proto0-cms-identity-pool default: the optional value to return if lookup failed; returns None if not set Returns: the constructed ARN for the cognito identity pool, else default/None
entailment
def cognito_identity_identity_pool_id(self, lookup, default=None): """ Args: lookup: Cognito Federated Identity name, proto0-cms-identity-pool default: the optional value to return if lookup failed; returns None if not set Returns: the Cognito Identity Pool ID corresponding to the given lookup, else default/None """ # List size cannot be greater than 60 list_limit = 60 client = EFAwsResolver.__CLIENTS["cognito-identity"] response = client.list_identity_pools(MaxResults=list_limit) while "IdentityPools" in response: # Loop through all the identity pools for pool in response["IdentityPools"]: if pool["IdentityPoolName"] == lookup: return pool["IdentityPoolId"] # No match found on this page, but there are more pages if response.has_key("NextToken"): response = client.list_identity_pools(MaxResults=list_limit, NextToken=response["NextToken"]) else: break return default
Args: lookup: Cognito Federated Identity name, proto0-cms-identity-pool default: the optional value to return if lookup failed; returns None if not set Returns: the Cognito Identity Pool ID corresponding to the given lookup, else default/None
entailment
def cognito_idp_user_pool_arn(self, lookup, default=None): """ Args: lookup: Cognito User Pool name, proto0-cms-user-pool default: the optional value to return if lookup failed; returns None if not set Returns: the User Pool ARN corresponding to the given lookup, else default/None """ client = EFAwsResolver.__CLIENTS["cognito-idp"] user_pool_id = self.cognito_idp_user_pool_id(lookup, default) if user_pool_id == default: return default response = client.describe_user_pool(UserPoolId=user_pool_id) if not response.has_key("UserPool"): return default return response["UserPool"]["Arn"]
Args: lookup: Cognito User Pool name, proto0-cms-user-pool default: the optional value to return if lookup failed; returns None if not set Returns: the User Pool ARN corresponding to the given lookup, else default/None
entailment
def kms_decrypt_value(self, lookup): """ Args: lookup: the encrypted value to be decrypted by KMS; base64 encoded Returns: The decrypted lookup value """ decrypted_lookup = ef_utils.kms_decrypt(EFAwsResolver.__CLIENTS["kms"], lookup) return decrypted_lookup
Args: lookup: the encrypted value to be decrypted by KMS; base64 encoded Returns: The decrypted lookup value
entailment
def kms_key_arn(self, lookup): """ Args: lookup: The key alias, EX: alias/proto0-evs-drm Returns: The full key arn """ key_arn = ef_utils.kms_key_arn(EFAwsResolver.__CLIENTS["kms"], lookup) return key_arn
Args: lookup: The key alias, EX: alias/proto0-evs-drm Returns: The full key arn
entailment
def handle_args_and_set_context(args): """ Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated EFContext object Raises: IOError: if service registry file can't be found or can't be opened RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH CalledProcessError: if 'git rev-parse' command to find repo root could not be run """ parser = argparse.ArgumentParser() parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST)) parser.add_argument("--sr", help="optional /path/to/service_registry_file.json", default=None) parser.add_argument("--commit", help="Make changes in AWS (dry run if omitted)", action="store_true", default=False) parser.add_argument("--verbose", help="Print additional info", action="store_true", default=False) parser.add_argument("--devel", help="Allow running from branch; don't refresh from origin", action="store_true", default=False) parsed_args = vars(parser.parse_args(args)) context = EFContext() context.commit = parsed_args["commit"] context.devel = parsed_args["devel"] try: context.env = parsed_args["env"] except ValueError as e: fail("Error in env: {}".format(e.message)) # Set up service registry and policy template path which depends on it context.service_registry = EFServiceRegistry(parsed_args["sr"]) context.policy_template_path = normpath(dirname(context.service_registry.filespec)) + EFConfig.POLICY_TEMPLATE_PATH_SUFFIX context.verbose = parsed_args["verbose"] return context
Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated EFContext object Raises: IOError: if service registry file can't be found or can't be opened RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH CalledProcessError: if 'git rev-parse' command to find repo root could not be run
entailment
def conditionally_create_security_groups(env, service_name, service_type): """ Create security groups as needed; name and number created depend on service_type Args: env: the environment the SG will be created in service_name: name of the service in service registry service_type: service registry service type: 'aws_ec2', 'aws_lambda', 'aws_security_group', or 'http_service' """ if service_type not in SG_SERVICE_TYPES: print_if_verbose("not eligible for security group(s); service type: {}".format(service_type)) return target_name = "{}-{}".format(env, service_name) if service_type == "aws_ec2": sg_names = ["{}-ec2".format(target_name)] elif service_type == "aws_lambda": sg_names = ["{}-lambda".format(target_name)] elif service_type == "http_service": sg_names = [ "{}-ec2".format(target_name), "{}-elb".format(target_name) ] elif service_type == "aws_security_group": sg_names = [target_name] else: fail("Unexpected service_type: {} when creating security group for: {}".format(service_type, target_name)) for sg_name in sg_names: if not AWS_RESOLVER.ec2_security_group_security_group_id(sg_name): vpc_name = "vpc-{}".format(env) print("Create security group: {} in vpc: {}".format(sg_name, vpc_name)) vpc = AWS_RESOLVER.ec2_vpc_vpc_id(vpc_name) if not vpc: fail("Error: could not get VPC by name: {}".format(vpc_name)) # create security group if CONTEXT.commit: try: new_sg = CLIENTS["ec2"].create_security_group(GroupName=sg_name, VpcId=vpc, Description=sg_name) except: fail("Exception creating security group named: {} in VpcId: {}".format(sg_name, vpc_name), sys.exc_info()) print(new_sg["GroupId"]) else: print_if_verbose("security group already exists: {}".format(sg_name))
Create security groups as needed; name and number created depend on service_type Args: env: the environment the SG will be created in service_name: name of the service in service registry service_type: service registry service type: 'aws_ec2', 'aws_lambda', 'aws_security_group', or 'http_service'
entailment
def conditionally_create_role(role_name, sr_entry): """ Create role_name if a role by that name does not already exist; attach a custom list of Principals to its AssumeRolePolicy Args: role_name: the name for the role to create sr_entry: service registry entry Example of a (complex) AssumeRole policy document comprised of two IAM entities and a service: { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com", "AWS": [ "arn:aws:iam::978969509086:root", "arn:aws:iam::978969509086:role/mgmt-jenkins" ] }, "Action": "sts:AssumeRole" } ] } """ service_type = sr_entry['type'] if service_type not in SERVICE_TYPE_ROLE: print_if_verbose("not eligible for role (and possibly instance profile); service type: {}".format(service_type)) return if sr_entry.has_key("assume_role_policy"): # Explicitly defined AssumeRole policy assume_role_policy_document = resolve_policy_document(sr_entry["assume_role_policy"]) else: # Create Service:AssumeRole policy using the service type in the SERVICE_TYPE_ROLE dict # which must list a service type to use this capacity (most do) if SERVICE_TYPE_ROLE[service_type] is None: fail("service_type: {} does not have a default service-type AssumeRole policy".format(service_type)) formatted_principals = '"Service": "{}"'.format(SERVICE_TYPE_ROLE[service_type]) assume_role_policy_document = '''{ "Version" : "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { ''' + formatted_principals + ''' }, "Action": [ "sts:AssumeRole" ] }] }''' if not get_role_id(role_name): print("Create role: {}".format(role_name)) print_if_verbose("AssumeRole policy document:\n{}".format(assume_role_policy_document)) if CONTEXT.commit: try: new_role = CLIENTS["iam"].create_role( RoleName=role_name, AssumeRolePolicyDocument=assume_role_policy_document ) except ClientError as error: fail("Exception creating new role named: {} {}".format(role_name, sys.exc_info(), error)) print(new_role["Role"]["RoleId"]) else: print_if_verbose("role already exists: {}".format(role_name))
Create role_name if a role by that name does not already exist; attach a custom list of Principals to its AssumeRolePolicy Args: role_name: the name for the role to create sr_entry: service registry entry Example of a (complex) AssumeRole policy document comprised of two IAM entities and a service: { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com", "AWS": [ "arn:aws:iam::978969509086:root", "arn:aws:iam::978969509086:role/mgmt-jenkins" ] }, "Action": "sts:AssumeRole" } ] }
entailment
def conditionally_create_profile(role_name, service_type): """ Check that there is a 1:1 correspondence with an InstanceProfile having the same name as the role, and that the role is contained in it. Create InstanceProfile and attach to role if needed. """ # make instance profile if this service_type gets an instance profile if service_type not in INSTANCE_PROFILE_SERVICE_TYPES: print_if_verbose("service type: {} not eligible for instance profile".format(service_type)) return instance_profile = get_instance_profile(role_name) if not instance_profile: print("Create instance profile: {}".format(role_name)) if CONTEXT.commit: try: instance_profile = CLIENTS["iam"].create_instance_profile(InstanceProfileName=role_name) except ClientError as error: fail("Exception creating instance profile named: {} {}".format(role_name, sys.exc_info(), error)) else: print_if_verbose("instance profile already exists: {}".format(role_name)) # attach instance profile to role; test 'if instance_profile' because we drop through to here in a dry run if instance_profile and not instance_profile_contains_role(instance_profile, role_name): print("Add role: {} to instance profile: {}".format(role_name, role_name)) if CONTEXT.commit: try: CLIENTS["iam"].add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name) except ClientError as error: fail("Exception adding role to instance profile: {} {}".format(role_name, sys.exc_info(), error)) else: print_if_verbose("instance profile already contains role: {}".format(role_name))
Check that there is a 1:1 correspondence with an InstanceProfile having the same name as the role, and that the role is contained in it. Create InstanceProfile and attach to role if needed.
entailment
def conditionally_attach_managed_policies(role_name, sr_entry): """ If 'aws_managed_policies' key lists the names of AWS managed policies to bind to the role, attach them to the role Args: role_name: name of the role to attach the policies to sr_entry: service registry entry """ service_type = sr_entry['type'] if not (service_type in SERVICE_TYPE_ROLE and "aws_managed_policies" in sr_entry): print_if_verbose("not eligible for policies; service_type: {} is not valid for policies " "or no 'aws_managed_policies' key in service registry for this role".format(service_type)) return for policy_name in sr_entry['aws_managed_policies']: print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name)) if CONTEXT.commit: try: CLIENTS["iam"].attach_role_policy(RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/' + policy_name) except: fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info())
If 'aws_managed_policies' key lists the names of AWS managed policies to bind to the role, attach them to the role Args: role_name: name of the role to attach the policies to sr_entry: service registry entry
entailment
def conditionally_inline_policies(role_name, sr_entry): """ If 'policies' key lists the filename prefixes of policies to bind to the role, load them from the expected path and inline them onto the role Args: role_name: name of the role to attach the policies to sr_entry: service registry entry """ service_type = sr_entry['type'] if not (service_type in SERVICE_TYPE_ROLE and "policies" in sr_entry): print_if_verbose("not eligible for policies; service_type: {} is not valid for policies " "or no 'policies' key in service registry for this role".format(service_type)) return for policy_name in sr_entry['policies']: print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name)) try: policy_document = resolve_policy_document(policy_name) except: fail("Exception loading policy: {} for role: {}".format(policy_name, role_name), sys.exc_info()) # inline the policy onto the role if CONTEXT.commit: try: CLIENTS["iam"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document) except: fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info())
If 'policies' key lists the filename prefixes of policies to bind to the role, load them from the expected path and inline them onto the role Args: role_name: name of the role to attach the policies to sr_entry: service registry entry
entailment
def conditionally_create_kms_key(role_name, service_type): """ Create KMS Master Key for encryption/decryption of sensitive values in cf templates and latebind configs Args: role_name: name of the role that kms key is being created for; it will be given decrypt privileges. service_type: service registry service type: 'aws_ec2', 'aws_fixture', 'aws_lambda', or 'http_service' """ if service_type not in KMS_SERVICE_TYPES: print_if_verbose("not eligible for kms; service_type: {} is not valid for kms".format(service_type)) return # Converting all periods to underscores because they are invalid in KMS alias names key_alias = role_name.replace('.', '_') try: kms_key = CLIENTS["kms"].describe_key(KeyId='alias/{}'.format(key_alias)) except ClientError as error: if error.response['Error']['Code'] == 'NotFoundException': kms_key = None else: fail("Exception describing KMS key: {} {}".format(role_name, error)) if service_type == "aws_fixture": kms_key_policy = '''{ "Version": "2012-10-17", "Statement": [ { "Sid": "Enable IAM User Permissions", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::''' + CONTEXT.account_id + ''':root" }, "Action": "kms:*", "Resource": "*" } ] }''' else: formatted_principal = '"AWS": "arn:aws:iam::{}:role/{}"'.format(CONTEXT.account_id, role_name) kms_key_policy = '''{ "Version": "2012-10-17", "Statement": [ { "Sid": "Enable IAM User Permissions", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::''' + CONTEXT.account_id + ''':root" }, "Action": "kms:*", "Resource": "*" }, { "Sid": "Allow Service Role Decrypt Privileges", "Effect": "Allow", "Principal": { ''' + formatted_principal + ''' }, "Action": "kms:Decrypt", "Resource": "*" }, { "Sid": "Allow use of the key for default autoscaling group service role", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::''' + CONTEXT.account_id + ''':role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" }, "Action": [ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey" ], "Resource": "*" }, { "Sid": "Allow attachment of persistent resourcesfor default autoscaling group service role", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::''' + CONTEXT.account_id + ''':role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" }, "Action": [ "kms:CreateGrant" ], "Resource": "*", "Condition": { "Bool": { "kms:GrantIsForAWSResource": true } } } ] }''' if not kms_key: print("Create KMS key: {}".format(key_alias)) if CONTEXT.commit: # Create KMS Master Key. Due to AWS eventual consistency a newly created IAM role may not be # immediately visible to KMS. Retrying up to 5 times (25 seconds) to account for this behavior. create_key_failures = 0 while create_key_failures <= 5: try: new_kms_key = CLIENTS["kms"].create_key( Policy=kms_key_policy, Description='Master Key for {}'.format(role_name) ) break except ClientError as error: if error.response['Error']['Code'] == 'MalformedPolicyDocumentException': if create_key_failures == 5: fail("Exception creating kms key: {} {}".format(role_name, error)) else: create_key_failures += 1 time.sleep(5) else: fail("Exception creating kms key: {} {}".format(role_name, error)) # Assign key an alias. This is used for all future references to it (rather than the key ARN) try: CLIENTS["kms"].create_alias( AliasName='alias/{}'.format(key_alias), TargetKeyId=new_kms_key['KeyMetadata']['KeyId'] ) except ClientError as error: fail("Exception creating alias for kms key: {} {}".format(role_name, error)) else: print_if_verbose("KMS key already exists: {}".format(key_alias))
Create KMS Master Key for encryption/decryption of sensitive values in cf templates and latebind configs Args: role_name: name of the role that kms key is being created for; it will be given decrypt privileges. service_type: service registry service type: 'aws_ec2', 'aws_fixture', 'aws_lambda', or 'http_service'
entailment
def sum_abs_distance(labels, preds): """ Compute the sum of abs distances. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the summed absolute distance. """ with tf.variable_scope("sum_abs_distance"): return tf.reduce_sum(tf.abs(preds - labels), axis=-1)
Compute the sum of abs distances. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the summed absolute distance.
entailment
def l1_distance(labels, preds): """ Compute the l1_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the l1 distance. """ with tf.variable_scope("l1_distance"): return tf.norm(preds - labels, ord=1)
Compute the l1_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the l1 distance.
entailment
def smooth_l1_distance(labels, preds, delta=1.0): """ Compute the smooth l1_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :param delta: `float`, the point where the huber loss function changes from a quadratic to linear. :return: A float tensor of shape [batch_size, ...] representing the smooth l1 distance. """ with tf.variable_scope("smooth_l1"): return tf.reduce_sum(tf.losses.huber_loss( labels=labels, predictions=preds, delta=delta, loss_collection=None, reduction=tf.losses.Reduction.NONE ), axis=-1)
Compute the smooth l1_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :param delta: `float`, the point where the huber loss function changes from a quadratic to linear. :return: A float tensor of shape [batch_size, ...] representing the smooth l1 distance.
entailment
def l2_distance(labels, preds): """ Compute the l2_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the l2 distance. """ with tf.variable_scope("l2_distance"): return tf.norm(preds - labels, ord=2)
Compute the l2_distance. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the l2 distance.
entailment
def cross_entropy(labels, logits): """ Calculate the cross_entropy. :param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities. :param logits: A float tensor of shape [batch_size, ..., num_classes] representing the logits. :return: A tensor representing the cross entropy. """ with tf.variable_scope("cross_entropy"): return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
Calculate the cross_entropy. :param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities. :param logits: A float tensor of shape [batch_size, ..., num_classes] representing the logits. :return: A tensor representing the cross entropy.
entailment
def handle_args_and_set_context(args): """ Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated EFCFContext object (extends EFContext) Raises: IOError: if service registry file can't be found or can't be opened RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH CalledProcessError: if 'git rev-parse' command to find repo root could not be run """ parser = argparse.ArgumentParser() parser.add_argument("template_file", help="/path/to/template_file.json") parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST)) parser.add_argument("--sr", help="optional /path/to/service_registry_file.json", default=None) parser.add_argument("--verbose", help="Print additional info + resolved template", action="store_true", default=False) parser.add_argument("--devel", help="Allow running from branch; don't refresh from origin", action="store_true", default=False) group = parser.add_mutually_exclusive_group() group.add_argument("--changeset", help="create a changeset; cannot be combined with --commit", action="store_true", default=False) group.add_argument("--commit", help="Make changes in AWS (dry run if omitted); cannot be combined with --changeset", action="store_true", default=False) group.add_argument("--lint", help="Execute cfn-lint on the rendered template", action="store_true", default=False) parser.add_argument("--percent", help="Specifies an override to the percentage of instances in an Auto Scaling rolling update (e.g. 10 for 10%%)", type=int, default=False) parser.add_argument("--poll", help="Poll Cloudformation to check status of stack creation/updates", action="store_true", default=False) parsed_args = vars(parser.parse_args(args)) context = EFCFContext() try: context.env = parsed_args["env"] context.template_file = parsed_args["template_file"] except ValueError as e: fail("Error in argument: {}".format(e.message)) context.changeset = parsed_args["changeset"] context.commit = parsed_args["commit"] context.devel = parsed_args["devel"] context.lint = parsed_args["lint"] context.percent = parsed_args["percent"] context.poll_status = parsed_args["poll"] context.verbose = parsed_args["verbose"] # Set up service registry and policy template path which depends on it context.service_registry = EFServiceRegistry(parsed_args["sr"]) return context
Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated EFCFContext object (extends EFContext) Raises: IOError: if service registry file can't be found or can't be opened RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH CalledProcessError: if 'git rev-parse' command to find repo root could not be run
entailment
def merge_files(service, skip_on_user_group_error=False): """ Given a prefix, find all templates below; merge with parameters; write to "dest" Args: service: "<service>", "all", or "ssh" skip_on_user_group_error: True or False For S3, full path becomes: s3://ellation-cx-global-configs/<service>/templates/<filename> s3://ellation-cx-global-configs/<service>/parameters/<filename>.parameters.<yaml|yml|json> For filesystem, full path becomes: /vagrant/configs/<service>/templates/<filename> /vagrant/configs/<service>/parameters/<filename>.parameters.<yaml|yml|json> """ if WHERE == "ec2": config_reader = EFInstanceinitConfigReader("s3", service, log_info, RESOURCES["s3"]) resolver = EFTemplateResolver() elif WHERE == "virtualbox-kvm": config_path = "{}/{}".format(VIRTUALBOX_CONFIG_ROOT, service) config_reader = EFInstanceinitConfigReader("file", config_path, log_info) environment = EFConfig.VAGRANT_ENV resolver = EFTemplateResolver(env=environment, profile=get_account_alias(environment), region=EFConfig.DEFAULT_REGION, service=service) while config_reader.next(): log_info("checking: {}".format(config_reader.current_key)) # if 'dest' for the current object contains an 'environments' list, check it dest = config_reader.dest if "environments" in dest: if not resolver.resolved["ENV_SHORT"] in dest["environments"]: log_info("Environment: {} not enabled for {}".format( resolver.resolved["ENV_SHORT"], config_reader.current_key) ) continue # If 'dest' for the current object contains a user_group that hasn't been created in the environment yet and the # flag is set to True to skip, log the error and move onto the next config file without blowing up. if skip_on_user_group_error: user, group = get_user_group(dest) try: getpwnam(user).pw_uid except KeyError: log_info("File specifies user {} that doesn't exist in environment. Skipping config file.".format(user)) continue try: getgrnam(group).gr_gid except KeyError: log_info("File specifies group {} that doesn't exist in environment. Skipping config file.".format(group)) continue # Process the template_body - apply context + parameters log_info("Resolving template") resolver.load(config_reader.template, config_reader.parameters) rendered_body = resolver.render() if not resolver.resolved_ok(): critical("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols())) # Write the rendered file dir_path = normpath(dirname(dest["path"])) # Resolved OK. try to write the template log_info("make directories: {} {}".format(dir_path, dest["dir_perm"])) try: makedirs(dir_path, int(dest["dir_perm"], 8)) except OSError as error: if error.errno != 17: critical("Error making directories {}".format(repr(error))) log_info("open: " + dest["path"] + ",w+") try: outfile = open(dest["path"], 'w+') log_info("write") outfile.write(rendered_body) log_info("close") outfile.close() log_info("chmod file to: " + dest["file_perm"]) chmod(dest["path"], int(dest["file_perm"], 8)) user, group = get_user_group(dest) uid = getpwnam(user).pw_uid gid = getgrnam(group).gr_gid log_info("chown last directory in path to: " + dest["user_group"]) chown(dir_path, uid, gid) log_info("chown file to: " + dest["user_group"]) chown(dest["path"], uid, gid) except Exception as error: critical("Error writing file: " + dest["path"] + ": " + repr(error))
Given a prefix, find all templates below; merge with parameters; write to "dest" Args: service: "<service>", "all", or "ssh" skip_on_user_group_error: True or False For S3, full path becomes: s3://ellation-cx-global-configs/<service>/templates/<filename> s3://ellation-cx-global-configs/<service>/parameters/<filename>.parameters.<yaml|yml|json> For filesystem, full path becomes: /vagrant/configs/<service>/templates/<filename> /vagrant/configs/<service>/parameters/<filename>.parameters.<yaml|yml|json>
entailment
def get_metadata_or_fail(metadata_key): """ Call get_metadata; halt with fail() if it raises an exception """ try: return http_get_metadata(metadata_key) except IOError as error: fail("Exception in http_get_metadata {} {}".format(metadata_key, repr(error)))
Call get_metadata; halt with fail() if it raises an exception
entailment
def load(self, template, parameters=None): """ 'template' Loads template text from a 'string' or 'file' type Template text contains {{TOKEN}} symbols to be replaced 'parameters' parameters contains environment-specific sections as discussed in the class documentation. the 'parameters' arg can be None, a 'string', 'file', or 'dictionary' Whether from a string or file, or already in a dictionary, parameters must follow the logical format documented in the class docstring. if 'parameters' is omitted, template resolution will proceed with AWS, credential, and version lookups. """ # load template if isinstance(template, str): self.template = template elif isinstance(template, file): try: self.template = template.read() template.close() except IOError as error: fail("Exception loading template from file: ", error) else: fail("Unknown type loading template; expected string or file: " + type(template)) # load parameters, if any if parameters: if isinstance(parameters, str): try: self.parameters = yaml.safe_load(parameters) except ValueError as error: fail("Exception loading parameters from string: ", error) elif isinstance(parameters, file): try: self.parameters = yaml.safe_load(parameters) parameters.close() except ValueError as error: fail("Exception loading parameters from file: {}".format(error), sys.exc_info()) elif isinstance(parameters, dict): self.parameters = parameters else: fail("Unknown type loading parameters; expected string, file, or dict: " + type(parameters)) # sanity check the loaded parameters if "params" not in self.parameters: fail("'params' field not found in parameters") # just the params, please self.parameters = self.parameters["params"] # are all the keys valid (must have legal characters) for k in set().union(*(self.parameters[d].keys() for d in self.parameters.keys())): invalid_char = re.search(ILLEGAL_PARAMETER_CHARS, k) if invalid_char: fail("illegal character: '" + invalid_char.group(0) + "' in parameter key: " + k)
'template' Loads template text from a 'string' or 'file' type Template text contains {{TOKEN}} symbols to be replaced 'parameters' parameters contains environment-specific sections as discussed in the class documentation. the 'parameters' arg can be None, a 'string', 'file', or 'dictionary' Whether from a string or file, or already in a dictionary, parameters must follow the logical format documented in the class docstring. if 'parameters' is omitted, template resolution will proceed with AWS, credential, and version lookups.
entailment
def search_parameters(self, symbol): """ Hierarchically searches for 'symbol' in the parameters blob if there is one (would have been retrieved by 'load()'). Order is: default, <env_short>, <env> Returns Hierarchically resolved value for 'symbol', or None if a match is not found or there are no parameters """ if not self.parameters: return None # Hierarchically lookup the key result = None if "default" in self.parameters and symbol in self.parameters["default"]: result = self.parameters["default"][symbol] if self.resolved["ENV_SHORT"] in self.parameters and symbol in self.parameters[self.resolved["ENV_SHORT"]]: result = self.parameters[self.resolved["ENV_SHORT"]][symbol] # This lookup is redundant when env_short == env, but it's also cheap if self.resolved["ENV"] in self.parameters and symbol in self.parameters[self.resolved["ENV"]]: result = self.parameters[self.resolved["ENV"]][symbol] return result
Hierarchically searches for 'symbol' in the parameters blob if there is one (would have been retrieved by 'load()'). Order is: default, <env_short>, <env> Returns Hierarchically resolved value for 'symbol', or None if a match is not found or there are no parameters
entailment
def render(self): """ Find {{}} tokens; resolve then replace them as described elsewhere Resolution is multi-pass: tokens may be nested to form parts of other tokens. Token search steps when resolving symbols - 1. lookups of AWS resource identifiers, such as a security group ID - 2. secure credentials - 3. version registry for versioned content such as an AMI ID - 4. built-in context (such as "ENV") - 5. parameters from a parameter file / dictionary of params """ # Ensure that our symbols are clean and are not from a previous template that was rendered self.symbols = set() # Until all symbols are resolved or it is determined that some cannot be resolved, repeat: go_again = True while go_again: go_again = False # if at least one symbol isn't resolved in a pass, stop # Gather all resolvable symbols in the template template_symbols = set(re.findall(SYMBOL_PATTERN, self.template)) self.symbols.update(template_symbols) # include this pass's symbols in full set # resolve and replace symbols for symbol in template_symbols: resolved_symbol = None # Lookups in AWS, only if we have an EFAwsResolver if symbol[:4] == "aws:" and EFTemplateResolver.__AWSR: resolved_symbol = EFTemplateResolver.__AWSR.lookup(symbol[4:]) # Lookups in credentials elif symbol[:12] == "credentials:": pass #TODO elif symbol[:9] == "efconfig:": resolved_symbol = EFTemplateResolver.__EFCR.lookup(symbol[9:]) elif symbol[:8] == "version:": resolved_symbol = EFTemplateResolver.__VR.lookup(symbol[8:]) if not resolved_symbol: print("WARNING: Lookup failed for {{%s}} - placeholder value of 'NONE' used in rendered template" % symbol) resolved_symbol = "NONE" else: # 1. context - these are already in the resolved table # self.resolved[symbol] may have value=None; use has_key tell "resolved w/value=None" from "not resolved" # these may be "global" symbols such like "ENV", "ACCOUNT", etc. if symbol in self.resolved: resolved_symbol = self.resolved[symbol] # 2. parameters if not resolved_symbol: resolved_symbol = self.search_parameters(symbol) # if symbol was resolved, replace it everywhere if resolved_symbol is not None: if isinstance(resolved_symbol, list): self.template = self.template.replace("{{" + symbol + "}}", "\n".join(resolved_symbol)) else: self.template = self.template.replace("{{" + symbol + "}}", resolved_symbol) go_again = True return self.template
Find {{}} tokens; resolve then replace them as described elsewhere Resolution is multi-pass: tokens may be nested to form parts of other tokens. Token search steps when resolving symbols - 1. lookups of AWS resource identifiers, such as a security group ID - 2. secure credentials - 3. version registry for versioned content such as an AMI ID - 4. built-in context (such as "ENV") - 5. parameters from a parameter file / dictionary of params
entailment
def count_braces(self): """ returns a count of "{{" and "}}" in the template, as (N_left_braces, N_right_braces) Useful to check after resolve() has run, to infer that template has an error since no {{ or }} should be present in the template after resolve() """ n_left = len(re.findall("{{", self.template)) n_right = len(re.findall("}}", self.template)) return n_left, n_right
returns a count of "{{" and "}}" in the template, as (N_left_braces, N_right_braces) Useful to check after resolve() has run, to infer that template has an error since no {{ or }} should be present in the template after resolve()
entailment
def resolved_ok(self): """ Shortcut to testing unresolved_symbols and count_braces separately. Returns false if there are unresolved symbols or {{ or }} braces remaining, true otherwise """ left_braces, right_braces = self.count_braces() return len(self.unresolved_symbols()) == left_braces == right_braces == 0
Shortcut to testing unresolved_symbols and count_braces separately. Returns false if there are unresolved symbols or {{ or }} braces remaining, true otherwise
entailment
def handle_args_and_set_context(args): """ Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated Context object based on CLI args """ parser = argparse.ArgumentParser() parser.add_argument("env", help="environment") parser.add_argument("path_to_template", help="path to the config template to process") parser.add_argument("--no_params", help="disable loading values from params file", action="store_true", default=False) parser.add_argument("--verbose", help="Output extra info", action="store_true", default=False) parser.add_argument("--lint", help="Test configs for valid JSON/YAML syntax", action="store_true", default=False) parser.add_argument("--silent", help="Suppress output of rendered template", action="store_true", default=False) parsed = vars(parser.parse_args(args)) path_to_template = abspath(parsed["path_to_template"]) service = path_to_template.split('/')[-3] return Context( get_account_alias(parsed["env"]), EFConfig.DEFAULT_REGION, parsed["env"], service, path_to_template, parsed["no_params"], parsed["verbose"], parsed["lint"], parsed["silent"] )
Args: args: the command line args, probably passed from main() as sys.argv[1:] Returns: a populated Context object based on CLI args
entailment
def merge_files(context): """ Given a context containing path to template, env, and service: merge config into template and output the result to stdout Args: context: a populated context object """ resolver = EFTemplateResolver( profile=context.profile, region=context.region, env=context.env, service=context.service ) try: with open(context.template_path, 'r') as f: template_body = f.read() f.close() except IOError as error: raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error))) if context.no_params is False: try: with open(context.param_path, 'r') as f: param_body = f.read() f.close() except IOError as error: raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error))) dest = yaml.safe_load(param_body)["dest"] # if 'dest' for the current object contains an 'environments' list, check it if "environments" in dest: if not resolver.resolved["ENV_SHORT"] in dest["environments"]: print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path)) return # Process the template_body - apply context + parameters resolver.load(template_body, param_body) else: resolver.load(template_body) rendered_body = resolver.render() if not resolver.resolved_ok(): raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols())) if context.lint: if context.template_path.endswith(".json"): try: json.loads(rendered_body, strict=False) print("JSON passed linting process.") except ValueError as e: fail("JSON failed linting process.", e) elif context.template_path.endswith((".yml", ".yaml")): conf = yamllint_config.YamlLintConfig(content='extends: relaxed') lint_output = yamllinter.run(rendered_body, conf) lint_level = 'error' lint_errors = [issue for issue in lint_output if issue.level == lint_level] if lint_errors: split_body = rendered_body.splitlines() for error in lint_errors: print(error) # printing line - 1 because lists start at 0, but files at 1 print("\t", split_body[error.line - 1]) fail("YAML failed linting process.") if context.verbose: print(context) if context.no_params: print('no_params flag set to true!') print('Inline template resolution based on external symbol lookup only and no destination for file write.\n') else: dir_path = normpath(dirname(dest["path"])) print("make directories: {} {}".format(dir_path, dest["dir_perm"])) print("chmod file to: " + dest["file_perm"]) user, group = dest["user_group"].split(":") print("chown last directory in path to user: {}, group: {}".format(user, group)) print("chown file to user: {}, group: {}\n".format(user, group)) print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body)) elif context.silent: print("Config template rendered successfully.") else: print(rendered_body)
Given a context containing path to template, env, and service: merge config into template and output the result to stdout Args: context: a populated context object
entailment
def handle_args(args): """ Handle command line arguments Raises: Exception if the config path wasn't explicitly state and dead reckoning based on script location fails """ parser = argparse.ArgumentParser() parser.add_argument("configpath", default=None, nargs="?", help="/path/to/configs (always a directory; if omitted, all /configs are checked") parser.add_argument("--verbose", action="store_true", default=False) parsed_args = vars(parser.parse_args(args)) # If a config path wasn't given, calculate it based on location of the script if parsed_args["configpath"] is None: script_dir = os.path.abspath(os.path.dirname(sys.argv[0])) parsed_args["configpath"] = os.path.normpath("{}/{}".format(script_dir, CONFIGS_RELATIVE_PATH_FROM_SCRIPT_DIR)) else: parsed_args["configpath"] = os.path.normpath(parsed_args["configpath"]) # If the path is a directory, all good. if it's a file, find the directory the file is in and check that instead if os.path.isdir(parsed_args["configpath"]): parsed_args["configdir"] = parsed_args["configpath"] else: parsed_args["configdir"] = os.path.dirname(parsed_args["configpath"]) return parsed_args
Handle command line arguments Raises: Exception if the config path wasn't explicitly state and dead reckoning based on script location fails
entailment
def load_json(json_filespec): """ Loads JSON from a config file Args: json_filespec: path/to/file.json Returns: a dict made from the JSON read, if successful Raises: IOError if the file could not be opened ValueError if the JSON could not be read successfully RuntimeError if something else went wrong """ json_fh = open(json_filespec) config_dict = json.load(json_fh) json_fh.close() return config_dict
Loads JSON from a config file Args: json_filespec: path/to/file.json Returns: a dict made from the JSON read, if successful Raises: IOError if the file could not be opened ValueError if the JSON could not be read successfully RuntimeError if something else went wrong
entailment
def lookup(self, token): """ Return key version if found, None otherwise Lookup should look like this: pattern: <key>,<env>/<service> example: ami-id,staging/core """ # get search key and env/service from token try: key, envservice = token.split(",") except ValueError: return None # get env, service from value try: env, service = envservice.split("/") except ValueError as e: raise RuntimeError("Request:{} can't resolve to env, service. {}".format(envservice, e.message)) return self._s3_get(env, service, key)
Return key version if found, None otherwise Lookup should look like this: pattern: <key>,<env>/<service> example: ami-id,staging/core
entailment
def alert_policy_exists(self, policy_name): """Check to see if an alert policy exists in NewRelic. Return True if so, False if not""" if next((policy for policy in self.all_alerts if policy['name'] == policy_name), False): return True
Check to see if an alert policy exists in NewRelic. Return True if so, False if not
entailment
def create_alert_policy(self, policy_name): """Creates an alert policy in NewRelic""" policy_data = { 'policy': { 'incident_preference': 'PER_POLICY', 'name': policy_name } } create_policy = requests.post( 'https://api.newrelic.com/v2/alerts_policies.json', headers=self.auth_header, data=json.dumps(policy_data)) create_policy.raise_for_status() policy_id = create_policy.json()['policy']['id'] self.refresh_all_alerts() return policy_id
Creates an alert policy in NewRelic
entailment
def element_href(name): """ Get specified element href by element name :param name: name of element :return: string href location of object, else None """ if name: element = fetch_meta_by_name(name) if element.href: return element.href
Get specified element href by element name :param name: name of element :return: string href location of object, else None
entailment
def element_as_json(name): """ Get specified element json data by name :param name: name of element :return: json data representing element, else None """ if name: element = fetch_json_by_name(name) if element.json: return element.json
Get specified element json data by name :param name: name of element :return: json data representing element, else None
entailment
def element_as_json_with_filter(name, _filter): """ Get specified element json data by name with filter. Filter can be any valid element type. :param name: name of element :param _filter: element filter, host, network, tcp_service, network_elements, services, services_and_applications, etc :return: json data representing element, else None """ if name: element_href = element_href_use_filter(name, _filter) if element_href: return element_by_href_as_json(element_href)
Get specified element json data by name with filter. Filter can be any valid element type. :param name: name of element :param _filter: element filter, host, network, tcp_service, network_elements, services, services_and_applications, etc :return: json data representing element, else None
entailment
def element_info_as_json(name): """ Get specified element META data based on search query This is the base level search that returns basic object info with the following attributes: * href: link to element * name: name of element * type: type of element :param str name: name of element :return: list dict with meta (href, name, type) if found, otherwise None """ if name: element = fetch_meta_by_name(name) if element.json: return element.json
Get specified element META data based on search query This is the base level search that returns basic object info with the following attributes: * href: link to element * name: name of element * type: type of element :param str name: name of element :return: list dict with meta (href, name, type) if found, otherwise None
entailment
def element_info_as_json_with_filter(name, _filter): """ Top level json meta data (href, name, type) for element :param str name: name of element :param str _filter: filter of entry point :return: list dict with metadata, otherwise None """ if name and _filter: element = fetch_meta_by_name(name, filter_context=_filter) if element.json: return element.json
Top level json meta data (href, name, type) for element :param str name: name of element :param str _filter: filter of entry point :return: list dict with metadata, otherwise None
entailment
def element_href_use_wildcard(name): """ Get element href using a wildcard rather than matching only on the name field. This will likely return multiple results. :param name: name of element :return: list of matched elements """ if name: element = fetch_meta_by_name(name, exact_match=False) return element.json
Get element href using a wildcard rather than matching only on the name field. This will likely return multiple results. :param name: name of element :return: list of matched elements
entailment
def element_href_use_filter(name, _filter): """ Get element href using filter Filter should be a valid entry point value, ie host, router, network, single_fw, etc :param name: name of element :param _filter: filter type, unknown filter will result in no matches :return: element href (if found), else None """ if name and _filter: element = fetch_meta_by_name(name, filter_context=_filter) if element.json: return element.json.pop().get('href')
Get element href using filter Filter should be a valid entry point value, ie host, router, network, single_fw, etc :param name: name of element :param _filter: filter type, unknown filter will result in no matches :return: element href (if found), else None
entailment
def element_by_href_as_json(href, params=None): """ Get specified element by href :param href: link to object :param params: optional search query parameters :return: json data representing element, else None """ if href: element = fetch_json_by_href(href, params=params) if element: return element.json
Get specified element by href :param href: link to object :param params: optional search query parameters :return: json data representing element, else None
entailment
def element_name_by_href(href): """ The element href is known, possibly from a reference in an elements json. You want to retrieve the name of this element. :param str href: href of element :return: str name of element, or None """ if href: element = fetch_json_by_href(href) if element.json: return element.json.get('name')
The element href is known, possibly from a reference in an elements json. You want to retrieve the name of this element. :param str href: href of element :return: str name of element, or None
entailment
def element_name_and_type_by_href(href): """ Retrieve the element name and type of element based on the href. You may have a href that is within another element reference and want more information on that reference. :param str href: href of element :return: tuple (name, type) """ if href: element = fetch_json_by_href(href) if element.json: for entries in element.json.get('link'): if entries.get('rel') == 'self': typeof = entries.get('type') return (element.json.get('name'), typeof)
Retrieve the element name and type of element based on the href. You may have a href that is within another element reference and want more information on that reference. :param str href: href of element :return: tuple (name, type)
entailment