Search is not available for this dataset
text
stringlengths
75
104k
def normalized_distance(self, image): """Calculates the distance of a given image to the original image. Parameters ---------- image : `numpy.ndarray` The image that should be compared to the original image. Returns ------- :class:`Distance` The distance between the given image and the original image. """ return self.__distance( self.__original_image_for_distance, image, bounds=self.bounds())
def __is_adversarial(self, image, predictions, in_bounds): """Interface to criterion.is_adverarial that calls __new_adversarial if necessary. Parameters ---------- predictions : :class:`numpy.ndarray` A vector with the pre-softmax predictions for some image. label : int The label of the unperturbed reference image. """ is_adversarial = self.__criterion.is_adversarial( predictions, self.__original_class) assert isinstance(is_adversarial, bool) or \ isinstance(is_adversarial, np.bool_) if is_adversarial: is_best, distance = self.__new_adversarial( image, predictions, in_bounds) else: is_best = False distance = None return is_adversarial, is_best, distance
def channel_axis(self, batch): """Interface to model.channel_axis for attacks. Parameters ---------- batch : bool Controls whether the index of the axis for a batch of images (4 dimensions) or a single image (3 dimensions) should be returned. """ axis = self.__model.channel_axis() if not batch: axis = axis - 1 return axis
def has_gradient(self): """Returns true if _backward and _forward_backward can be called by an attack, False otherwise. """ try: self.__model.gradient self.__model.predictions_and_gradient except AttributeError: return False else: return True
def predictions(self, image, strict=True, return_details=False): """Interface to model.predictions for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). strict : bool Controls if the bounds for the pixel values should be checked. """ in_bounds = self.in_bounds(image) assert not strict or in_bounds self._total_prediction_calls += 1 predictions = self.__model.predictions(image) is_adversarial, is_best, distance = self.__is_adversarial( image, predictions, in_bounds) assert predictions.ndim == 1 if return_details: return predictions, is_adversarial, is_best, distance else: return predictions, is_adversarial
def batch_predictions( self, images, greedy=False, strict=True, return_details=False): """Interface to model.batch_predictions for attacks. Parameters ---------- images : `numpy.ndarray` Batch of inputs with shape as expected by the model. greedy : bool Whether the first adversarial should be returned. strict : bool Controls if the bounds for the pixel values should be checked. """ if strict: in_bounds = self.in_bounds(images) assert in_bounds self._total_prediction_calls += len(images) predictions = self.__model.batch_predictions(images) assert predictions.ndim == 2 assert predictions.shape[0] == images.shape[0] if return_details: assert greedy adversarials = [] for i in range(len(predictions)): if strict: in_bounds_i = True else: in_bounds_i = self.in_bounds(images[i]) is_adversarial, is_best, distance = self.__is_adversarial( images[i], predictions[i], in_bounds_i) if is_adversarial and greedy: if return_details: return predictions, is_adversarial, i, is_best, distance else: return predictions, is_adversarial, i adversarials.append(is_adversarial) if greedy: # pragma: no cover # no adversarial found if return_details: return predictions, False, None, False, None else: return predictions, False, None is_adversarial = np.array(adversarials) assert is_adversarial.ndim == 1 assert is_adversarial.shape[0] == images.shape[0] return predictions, is_adversarial
def gradient(self, image=None, label=None, strict=True): """Interface to model.gradient for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). Defaults to the original image. label : int Label used to calculate the loss that is differentiated. Defaults to the original label. strict : bool Controls if the bounds for the pixel values should be checked. """ assert self.has_gradient() if image is None: image = self.__original_image if label is None: label = self.__original_class assert not strict or self.in_bounds(image) self._total_gradient_calls += 1 gradient = self.__model.gradient(image, label) assert gradient.shape == image.shape return gradient
def predictions_and_gradient( self, image=None, label=None, strict=True, return_details=False): """Interface to model.predictions_and_gradient for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). Defaults to the original image. label : int Label used to calculate the loss that is differentiated. Defaults to the original label. strict : bool Controls if the bounds for the pixel values should be checked. """ assert self.has_gradient() if image is None: image = self.__original_image if label is None: label = self.__original_class in_bounds = self.in_bounds(image) assert not strict or in_bounds self._total_prediction_calls += 1 self._total_gradient_calls += 1 predictions, gradient = self.__model.predictions_and_gradient(image, label) # noqa: E501 is_adversarial, is_best, distance = self.__is_adversarial( image, predictions, in_bounds) assert predictions.ndim == 1 assert gradient.shape == image.shape if return_details: return predictions, gradient, is_adversarial, is_best, distance else: return predictions, gradient, is_adversarial
def backward(self, gradient, image=None, strict=True): """Interface to model.backward for attacks. Parameters ---------- gradient : `numpy.ndarray` Gradient of some loss w.r.t. the logits. image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). Returns ------- gradient : `numpy.ndarray` The gradient w.r.t the image. See Also -------- :meth:`gradient` """ assert self.has_gradient() assert gradient.ndim == 1 if image is None: image = self.__original_image assert not strict or self.in_bounds(image) self._total_gradient_calls += 1 gradient = self.__model.backward(gradient, image) assert gradient.shape == image.shape return gradient
def loss_function(cls, const, a, x, logits, reconstructed_original, confidence, min_, max_): """Returns the loss and the gradient of the loss w.r.t. x, assuming that logits = model(x).""" targeted = a.target_class() is not None if targeted: c_minimize = cls.best_other_class(logits, a.target_class()) c_maximize = a.target_class() else: c_minimize = a.original_class c_maximize = cls.best_other_class(logits, a.original_class) is_adv_loss = logits[c_minimize] - logits[c_maximize] # is_adv is True as soon as the is_adv_loss goes below 0 # but sometimes we want additional confidence is_adv_loss += confidence is_adv_loss = max(0, is_adv_loss) s = max_ - min_ squared_l2_distance = np.sum((x - reconstructed_original)**2) / s**2 total_loss = squared_l2_distance + const * is_adv_loss # calculate the gradient of total_loss w.r.t. x logits_diff_grad = np.zeros_like(logits) logits_diff_grad[c_minimize] = 1 logits_diff_grad[c_maximize] = -1 is_adv_loss_grad = a.backward(logits_diff_grad, x) assert is_adv_loss >= 0 if is_adv_loss == 0: is_adv_loss_grad = 0 squared_l2_distance_grad = (2 / s**2) * (x - reconstructed_original) total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad return total_loss, total_loss_grad
def best_other_class(logits, exclude): """Returns the index of the largest logit, ignoring the class that is passed as `exclude`.""" other_logits = logits - onehot_like(logits, exclude, value=np.inf) return np.argmax(other_logits)
def name(self): """Concatenates the names of the given criteria in alphabetical order. If a sub-criterion is itself a combined criterion, its name is first split into the individual names and the names of the sub-sub criteria is used instead of the name of the sub-criterion. This is done recursively to ensure that the order and the hierarchy of the criteria does not influence the name. Returns ------- str The alphabetically sorted names of the sub-criteria concatenated using double underscores between them. """ names = (criterion.name() for criterion in self._criteria) return '__'.join(sorted(names))
def _difference_map(image, color_axis): """Difference map of the image. Approximate derivatives of the function image[c, :, :] (e.g. PyTorch) or image[:, :, c] (e.g. Keras). dfdx, dfdy = difference_map(image) In: image: numpy.ndarray of shape C x h x w or h x w x C, with C = 1 or C = 3 (color channels), h, w >= 3, and [type] is 'Float' or 'Double'. Contains the values of functions f_b: R ^ 2 -> R ^ C, b = 1, ..., B, on the grid {0, ..., h - 1} x {0, ..., w - 1}. Out: dfdx: numpy.ndarray dfdy: numpy.ndarray of shape C x h x w or h x w x C contain the x and y derivatives of f at the points on the grid, approximated by central differences (except on boundaries): For c = 0, ... , C, i = 1, ..., h - 2, j = 1, ..., w - 2. e.g. for shape = c x h x w: dfdx[c, i, j] = (image[c, i, j + 1] - image[c, i, j - 1]) / 2 dfdx[c, i, j] = (image[c, i + 1, j] - image[c, i - 1, j]) / 2 positive x-direction is along rows from left to right. positive y-direction is along columns from above to below. """ if color_axis == 2: image = _transpose_image(image) # Derivative in x direction (rows from left to right) dfdx = np.zeros_like(image) # forward difference in first column dfdx[:, :, 0] = image[:, :, 1] - image[:, :, 0] # backwards difference in last column dfdx[:, :, -1] = image[:, :, -1] - image[:, :, -2] # central difference elsewhere dfdx[:, :, 1:-1] = 0.5 * (image[:, :, 2:] - image[:, :, :-2]) # Derivative in y direction (columns from above to below) dfdy = np.zeros_like(image) # forward difference in first row dfdy[:, 0, :] = image[:, 1, :] - image[:, 0, :] # backwards difference in last row dfdy[:, -1, :] = image[:, -1, :] - image[:, -2, :] # central difference elsewhere dfdy[:, 1:-1, :] = 0.5 * (image[:, 2:, :] - image[:, :-2, :]) return dfdx, dfdy
def _compose(image, vec_field, color_axis): """Calculate the composition of the function image with the vector field vec_field by interpolation. new_func = compose(image, vec_field) In: image: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 2, and [type] = 'Float' or 'Double'. Contains the values of a function f: R ^ 2 -> R ^ C on the grid {0, ..., h - 1} x {0, ..., w - 1}. vec_field: numpy.array of shape (h, w, 2) vec_field[y, x, 0] is the x-coordinate of the vector vec_field[y, x] vec_field[y, x, 1] is the y-coordinate of the vector vec_field[y, x] positive x-direction is along rows from left to right positive y-direction is along columns from above to below """ if color_axis == 2: image = _transpose_image(image) c, h, w = image.shape # colors, height, width hrange = np.arange(h) wrange = np.arange(w) MGx, MGy = np.meshgrid(wrange, hrange) defMGx = (MGx + vec_field[:, :, 0]).clip(0, w - 1) defMGy = (MGy + vec_field[:, :, 1]).clip(0, h - 1) new_image = np.empty_like(image) for channel in range(c): # Get a linear interpolation for this color channel. interpolation = RectBivariateSpline(hrange, wrange, image[channel], kx=1, ky=1) # grid = False since the deformed grid is irregular new_image[channel] = interpolation(defMGy, defMGx, grid=False) if color_axis == 2: return _re_transpose_image(new_image) else: return new_image
def _create_vec_field(fval, gradf, d1x, d2x, color_axis, smooth=0): """Calculate the deformation vector field In: fval: float gradf: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 1. d1x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. d2x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. smooth: float Width of the Gaussian kernel used for smoothing (default is 0 for no smoothing). Out: vec_field: numpy.ndarray of shape (2, h, w). """ if color_axis == 2: gradf = _transpose_image(gradf) c, h, w = gradf.shape # colors, height, width # Sum over color channels alpha1 = np.sum(gradf * d1x, axis=0) alpha2 = np.sum(gradf * d2x, axis=0) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # Smoothing if smooth > 0: alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # In theory, we need to apply the filter a second time. alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) vec_field = np.empty((h, w, 2)) vec_field[:, :, 0] = -fval * alpha1 / norm_squared_alpha vec_field[:, :, 1] = -fval * alpha2 / norm_squared_alpha return vec_field
def softmax(logits): """Transforms predictions into probability values. Parameters ---------- logits : array_like The logits predicted by the model. Returns ------- `numpy.ndarray` Probability values corresponding to the logits. """ assert logits.ndim == 1 # for numerical reasons we subtract the max logit # (mathematically it doesn't matter!) # otherwise exp(logits) might become too large or too small logits = logits - np.max(logits) e = np.exp(logits) return e / np.sum(e)
def crossentropy(label, logits): """Calculates the cross-entropy. Parameters ---------- logits : array_like The logits predicted by the model. label : int The label describing the target distribution. Returns ------- float The cross-entropy between softmax(logits) and onehot(label). """ assert logits.ndim == 1 # for numerical reasons we subtract the max logit # (mathematically it doesn't matter!) # otherwise exp(logits) might become too large or too small logits = logits - np.max(logits) e = np.exp(logits) s = np.sum(e) ce = np.log(s) - logits[label] return ce
def batch_crossentropy(label, logits): """Calculates the cross-entropy for a batch of logits. Parameters ---------- logits : array_like The logits predicted by the model for a batch of inputs. label : int The label describing the target distribution. Returns ------- np.ndarray The cross-entropy between softmax(logits[i]) and onehot(label) for all i. """ assert logits.ndim == 2 # for numerical reasons we subtract the max logit # (mathematically it doesn't matter!) # otherwise exp(logits) might become too large or too small logits = logits - np.max(logits, axis=1, keepdims=True) e = np.exp(logits) s = np.sum(e, axis=1) ces = np.log(s) - logits[:, label] return ces
def binarize(x, values, threshold=None, included_in='upper'): """Binarizes the values of x. Parameters ---------- values : tuple of two floats The lower and upper value to which the inputs are mapped. threshold : float The threshold; defaults to (values[0] + values[1]) / 2 if None. included_in : str Whether the threshold value itself belongs to the lower or upper interval. """ lower, upper = values if threshold is None: threshold = (lower + upper) / 2. x = x.copy() if included_in == 'lower': x[x <= threshold] = lower x[x > threshold] = upper elif included_in == 'upper': x[x < threshold] = lower x[x >= threshold] = upper else: raise ValueError('included_in must be "lower" or "upper"') return x
def imagenet_example(shape=(224, 224), data_format='channels_last'): """ Returns an example image and its imagenet class label. Parameters ---------- shape : list of integers The shape of the returned image. data_format : str "channels_first" or "channels_last" Returns ------- image : array_like The example image. label : int The imagenet label associated with the image. NOTE: This function is deprecated and will be removed in the future. """ assert len(shape) == 2 assert data_format in ['channels_first', 'channels_last'] from PIL import Image path = os.path.join(os.path.dirname(__file__), 'example.png') image = Image.open(path) image = image.resize(shape) image = np.asarray(image, dtype=np.float32) image = image[:, :, :3] assert image.shape == shape + (3,) if data_format == 'channels_first': image = np.transpose(image, (2, 0, 1)) return image, 282
def samples(dataset='imagenet', index=0, batchsize=1, shape=(224, 224), data_format='channels_last'): ''' Returns a batch of example images and the corresponding labels Parameters ---------- dataset : string The data set to load (options: imagenet, mnist, cifar10, cifar100, fashionMNIST) index : int For each data set 20 example images exist. The returned batch contains the images with index [index, index + 1, index + 2, ...] batchsize : int Size of batch. shape : list of integers The shape of the returned image (only relevant for Imagenet). data_format : str "channels_first" or "channels_last" Returns ------- images : array_like The batch of example images labels : array of int The labels associated with the images. ''' from PIL import Image images, labels = [], [] basepath = os.path.dirname(__file__) samplepath = os.path.join(basepath, 'data') files = os.listdir(samplepath) for idx in range(index, index + batchsize): i = idx % 20 # get filename and label file = [n for n in files if '{}_{:02d}_'.format(dataset, i) in n][0] label = int(file.split('.')[0].split('_')[-1]) # open file path = os.path.join(samplepath, file) image = Image.open(path) if dataset == 'imagenet': image = image.resize(shape) image = np.asarray(image, dtype=np.float32) if dataset != 'mnist' and data_format == 'channels_first': image = np.transpose(image, (2, 0, 1)) images.append(image) labels.append(label) labels = np.array(labels) images = np.stack(images) return images, labels
def onehot_like(a, index, value=1): """Creates an array like a, with all values set to 0 except one. Parameters ---------- a : array_like The returned one-hot array will have the same shape and dtype as this array index : int The index that should be set to `value` value : single value compatible with a.dtype The value to set at the given index Returns ------- `numpy.ndarray` One-hot array with the given value at the given location and zeros everywhere else. """ x = np.zeros_like(a) x[index] = value return x
def _get_output(self, a, image): """ Looks up the precomputed adversarial image for a given image. """ sd = np.square(self._input_images - image) mses = np.mean(sd, axis=tuple(range(1, sd.ndim))) index = np.argmin(mses) # if we run into numerical problems with this approach, we might # need to add a very tiny threshold here if mses[index] > 0: raise ValueError('No precomputed output image for this image') return self._output_images[index]
def _process_gradient(self, backward, dmdp): """ backward: `callable` callable that backpropagates the gradient of the model w.r.t to preprocessed input through the preprocessing to get the gradient of the model's output w.r.t. the input before preprocessing dmdp: gradient of model w.r.t. preprocessed input """ if backward is None: # pragma: no cover raise ValueError('Your preprocessing function does not provide' ' an (approximate) gradient') dmdx = backward(dmdp) assert dmdx.dtype == dmdp.dtype return dmdx
def predictions(self, image): """Convenience method that calculates predictions for a single image. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). Returns ------- `numpy.ndarray` Vector of predictions (logits, i.e. before the softmax) with shape (number of classes,). See Also -------- :meth:`batch_predictions` """ return np.squeeze(self.batch_predictions(image[np.newaxis]), axis=0)
def gradient(self, image, label): """Calculates the gradient of the cross-entropy loss w.r.t. the image. The default implementation calls predictions_and_gradient. Subclasses can provide more efficient implementations that only calculate the gradient. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). label : int Reference label used to calculate the gradient. Returns ------- gradient : `numpy.ndarray` The gradient of the cross-entropy loss w.r.t. the image. Will have the same shape as the image. See Also -------- :meth:`gradient` """ _, gradient = self.predictions_and_gradient(image, label) return gradient
def clone(git_uri): """ Clone a remote git repository to a local path. :param git_uri: the URI to the git repository to be cloned :return: the generated local path where the repository has been cloned to """ hash_digest = sha256_hash(git_uri) local_path = home_directory_path(FOLDER, hash_digest) exists_locally = path_exists(local_path) if not exists_locally: _clone_repo(git_uri, local_path) else: logging.info( # pragma: no cover "Git repository already exists locally.") # pragma: no cover return local_path
def run(command, parser, cl_args, unknown_args): ''' :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Deactivate Args: %s", cl_args) return cli_helper.run(command, cl_args, "deactivate topology")
def poll(self, timeout=0.0): """Modified version of poll() from asyncore module""" if self.sock_map is None: Log.warning("Socket map is not registered to Gateway Looper") readable_lst = [] writable_lst = [] error_lst = [] if self.sock_map is not None: for fd, obj in self.sock_map.items(): is_r = obj.readable() is_w = obj.writable() if is_r: readable_lst.append(fd) if is_w and not obj.accepting: writable_lst.append(fd) if is_r or is_w: error_lst.append(fd) # Add wakeup fd readable_lst.append(self.pipe_r) Log.debug("Will select() with timeout: " + str(timeout) + ", with map: " + str(self.sock_map)) try: readable_lst, writable_lst, error_lst = \ select.select(readable_lst, writable_lst, error_lst, timeout) except select.error as err: Log.debug("Trivial error: " + str(err)) if err.args[0] != errno.EINTR: raise else: return Log.debug("Selected [r]: " + str(readable_lst) + " [w]: " + str(writable_lst) + " [e]: " + str(error_lst)) if self.pipe_r in readable_lst: Log.debug("Read from pipe") os.read(self.pipe_r, 1024) readable_lst.remove(self.pipe_r) if self.sock_map is not None: for fd in readable_lst: obj = self.sock_map.get(fd) if obj is None: continue asyncore.read(obj) for fd in writable_lst: obj = self.sock_map.get(fd) if obj is None: continue asyncore.write(obj) for fd in error_lst: obj = self.sock_map.get(fd) if obj is None: continue # pylint: disable=W0212 asyncore._exception(obj)
def configure(level, logfile=None): """ configure logging """ log_format = "%(asctime)s-%(levelname)s: %(message)s" date_format = '%a, %d %b %Y %H:%M:%S' logging.basicConfig(format=log_format, datefmt=date_format) Log.setLevel(level) if logfile is not None: fh = logging.FileHandler(logfile) fh.setFormatter(logging.Formatter(log_format)) Log.addHandler(fh)
def write_success_response(self, result): """ Result may be a python dictionary, array or a primitive type that can be converted to JSON for writing back the result. """ response = self.make_success_response(result) now = time.time() spent = now - self.basehandler_starttime response[constants.RESPONSE_KEY_EXECUTION_TIME] = spent self.write_json_response(response)
def write_error_response(self, message): """ Writes the message as part of the response and sets 404 status. """ self.set_status(404) response = self.make_error_response(str(message)) now = time.time() spent = now - self.basehandler_starttime response[constants.RESPONSE_KEY_EXECUTION_TIME] = spent self.write_json_response(response)
def write_json_response(self, response): """ write back json response """ self.write(tornado.escape.json_encode(response)) self.set_header("Content-Type", "application/json")
def make_response(self, status): """ Makes the base dict for the response. The status is the string value for the key "status" of the response. This should be "success" or "failure". """ response = { constants.RESPONSE_KEY_STATUS: status, constants.RESPONSE_KEY_VERSION: constants.API_VERSION, constants.RESPONSE_KEY_EXECUTION_TIME: 0, constants.RESPONSE_KEY_MESSAGE: "", } return response
def make_success_response(self, result): """ Makes the python dict corresponding to the JSON that needs to be sent for a successful response. Result is the actual payload that gets sent. """ response = self.make_response(constants.RESPONSE_STATUS_SUCCESS) response[constants.RESPONSE_KEY_RESULT] = result return response
def make_error_response(self, message): """ Makes the python dict corresponding to the JSON that needs to be sent for a failed response. Message is the message that is sent as the reason for failure. """ response = self.make_response(constants.RESPONSE_STATUS_FAILURE) response[constants.RESPONSE_KEY_MESSAGE] = message return response
def get_argument_cluster(self): """ Helper function to get request argument. Raises exception if argument is missing. Returns the cluster argument. """ try: return self.get_argument(constants.PARAM_CLUSTER) except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_role(self): """ Helper function to get request argument. Raises exception if argument is missing. Returns the role argument. """ try: return self.get_argument(constants.PARAM_ROLE, default=None) except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_environ(self): """ Helper function to get request argument. Raises exception if argument is missing. Returns the environ argument. """ try: return self.get_argument(constants.PARAM_ENVIRON) except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_topology(self): """ Helper function to get topology argument. Raises exception if argument is missing. Returns the topology argument. """ try: topology = self.get_argument(constants.PARAM_TOPOLOGY) return topology except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_component(self): """ Helper function to get component argument. Raises exception if argument is missing. Returns the component argument. """ try: component = self.get_argument(constants.PARAM_COMPONENT) return component except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_instance(self): """ Helper function to get instance argument. Raises exception if argument is missing. Returns the instance argument. """ try: instance = self.get_argument(constants.PARAM_INSTANCE) return instance except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_starttime(self): """ Helper function to get starttime argument. Raises exception if argument is missing. Returns the starttime argument. """ try: starttime = self.get_argument(constants.PARAM_STARTTIME) return starttime except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_endtime(self): """ Helper function to get endtime argument. Raises exception if argument is missing. Returns the endtime argument. """ try: endtime = self.get_argument(constants.PARAM_ENDTIME) return endtime except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_query(self): """ Helper function to get query argument. Raises exception if argument is missing. Returns the query argument. """ try: query = self.get_argument(constants.PARAM_QUERY) return query except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_offset(self): """ Helper function to get offset argument. Raises exception if argument is missing. Returns the offset argument. """ try: offset = self.get_argument(constants.PARAM_OFFSET) return offset except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_argument_length(self): """ Helper function to get length argument. Raises exception if argument is missing. Returns the length argument. """ try: length = self.get_argument(constants.PARAM_LENGTH) return length except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def get_required_arguments_metricnames(self): """ Helper function to get metricname arguments. Notice that it is get_argument"s" variation, which means that this can be repeated. Raises exception if argument is missing. Returns a list of metricname arguments """ try: metricnames = self.get_arguments(constants.PARAM_METRICNAME) if not metricnames: raise tornado.web.MissingArgumentError(constants.PARAM_METRICNAME) return metricnames except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
def validateInterval(self, startTime, endTime): """ Helper function to validate interval. An interval is valid if starttime and endtime are integrals, and starttime is less than the endtime. Raises exception if interval is not valid. """ start = int(startTime) end = int(endTime) if start > end: raise Exception("starttime is greater than endtime.")
def start_connect(self): """Tries to connect to the Heron Server ``loop()`` method needs to be called after this. """ Log.debug("In start_connect() of %s" % self._get_classname()) # TODO: specify buffer size, exception handling self.create_socket(socket.AF_INET, socket.SOCK_STREAM) # when ready, handle_connect is called self._connecting = True self.connect(self.endpoint)
def register_on_message(self, msg_builder): """Registers protobuf message builders that this client wants to receive :param msg_builder: callable to create a protobuf message that this client wants to receive """ message = msg_builder() Log.debug("In register_on_message(): %s" % message.DESCRIPTOR.full_name) self.registered_message_map[message.DESCRIPTOR.full_name] = msg_builder
def send_request(self, request, context, response_type, timeout_sec): """Sends a request message (REQID is non-zero)""" # generates a unique request id reqid = REQID.generate() Log.debug("%s: In send_request() with REQID: %s" % (self._get_classname(), str(reqid))) # register response message type self.response_message_map[reqid] = response_type self.context_map[reqid] = context # Add timeout for this request if necessary if timeout_sec > 0: def timeout_task(): self.handle_timeout(reqid) self.looper.register_timer_task_in_sec(timeout_task, timeout_sec) outgoing_pkt = OutgoingPacket.create_packet(reqid, request) self._send_packet(outgoing_pkt)
def send_message(self, message): """Sends a message (REQID is zero)""" Log.debug("In send_message() of %s" % self._get_classname()) outgoing_pkt = OutgoingPacket.create_packet(REQID.generate_zero(), message) self._send_packet(outgoing_pkt)
def handle_timeout(self, reqid): """Handles timeout""" if reqid in self.context_map: context = self.context_map.pop(reqid) self.response_message_map.pop(reqid) self.on_response(StatusCode.TIMEOUT_ERROR, context, None)
def create_tar(tar_filename, files, config_dir, config_files): ''' Create a tar file with a given set of files ''' with contextlib.closing(tarfile.open(tar_filename, 'w:gz', dereference=True)) as tar: for filename in files: if os.path.isfile(filename): tar.add(filename, arcname=os.path.basename(filename)) else: raise Exception("%s is not an existing file" % filename) if os.path.isdir(config_dir): tar.add(config_dir, arcname=get_heron_sandbox_conf_dir()) else: raise Exception("%s is not an existing directory" % config_dir) for filename in config_files: if os.path.isfile(filename): arcfile = os.path.join(get_heron_sandbox_conf_dir(), os.path.basename(filename)) tar.add(filename, arcname=arcfile) else: raise Exception("%s is not an existing file" % filename)
def get_subparser(parser, command): ''' Retrieve the given subparser from parser ''' # pylint: disable=protected-access subparsers_actions = [action for action in parser._actions if isinstance(action, argparse._SubParsersAction)] # there will probably only be one subparser_action, # but better save than sorry for subparsers_action in subparsers_actions: # get all subparsers for choice, subparser in subparsers_action.choices.items(): if choice == command: return subparser return None
def get_heron_dir(): """ This will extract heron directory from .pex file. For example, when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc', the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory This means the variable `go_above_dirs` below is 9. :return: root location of the .pex file """ go_above_dirs = 9 path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs]) return normalized_class_path(path)
def get_heron_libs(local_jars): """Get all the heron lib jars with the absolute paths""" heron_lib_dir = get_heron_lib_dir() heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars] return heron_libs
def parse_cluster_role_env(cluster_role_env, config_path): """Parse cluster/[role]/[environ], supply default, if not provided, not required""" parts = cluster_role_env.split('/')[:3] if not os.path.isdir(config_path): Log.error("Config path cluster directory does not exist: %s" % config_path) raise Exception("Invalid config path") # if cluster/role/env is not completely provided, check further if len(parts) < 3: cli_conf_file = os.path.join(config_path, CLIENT_YAML) # if client conf doesn't exist, use default value if not os.path.isfile(cli_conf_file): if len(parts) == 1: parts.append(getpass.getuser()) if len(parts) == 2: parts.append(ENVIRON) else: cli_confs = {} with open(cli_conf_file, 'r') as conf_file: tmp_confs = yaml.load(conf_file) # the return value of yaml.load can be None if conf_file is an empty file if tmp_confs is not None: cli_confs = tmp_confs else: print("Failed to read: %s due to it is empty" % (CLIENT_YAML)) # if role is required but not provided, raise exception if len(parts) == 1: if (ROLE_REQUIRED in cli_confs) and (cli_confs[ROLE_REQUIRED] is True): raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s" % (cluster_role_env, ROLE_REQUIRED, cli_conf_file)) else: parts.append(getpass.getuser()) # if environ is required but not provided, raise exception if len(parts) == 2: if (ENV_REQUIRED in cli_confs) and (cli_confs[ENV_REQUIRED] is True): raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s" % (cluster_role_env, ENV_REQUIRED, cli_conf_file)) else: parts.append(ENVIRON) # if cluster or role or environ is empty, print if len(parts[0]) == 0 or len(parts[1]) == 0 or len(parts[2]) == 0: print("Failed to parse") sys.exit(1) return (parts[0], parts[1], parts[2])
def get_cluster_role_env(cluster_role_env): """Parse cluster/[role]/[environ], supply empty string, if not provided""" parts = cluster_role_env.split('/')[:3] if len(parts) == 3: return (parts[0], parts[1], parts[2]) if len(parts) == 2: return (parts[0], parts[1], "") if len(parts) == 1: return (parts[0], "", "") return ("", "", "")
def direct_mode_cluster_role_env(cluster_role_env, config_path): """Check cluster/[role]/[environ], if they are required""" # otherwise, get the client.yaml file cli_conf_file = os.path.join(config_path, CLIENT_YAML) # if client conf doesn't exist, use default value if not os.path.isfile(cli_conf_file): return True client_confs = {} with open(cli_conf_file, 'r') as conf_file: client_confs = yaml.load(conf_file) # the return value of yaml.load can be None if conf_file is an empty file if not client_confs: return True # if role is required but not provided, raise exception role_present = True if len(cluster_role_env[1]) > 0 else False if ROLE_REQUIRED in client_confs and client_confs[ROLE_REQUIRED] and not role_present: raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s" % (cluster_role_env, ROLE_REQUIRED, cli_conf_file)) # if environ is required but not provided, raise exception environ_present = True if len(cluster_role_env[2]) > 0 else False if ENV_REQUIRED in client_confs and client_confs[ENV_REQUIRED] and not environ_present: raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s" % (cluster_role_env, ENV_REQUIRED, cli_conf_file)) return True
def server_mode_cluster_role_env(cluster_role_env, config_map): """Check cluster/[role]/[environ], if they are required""" cmap = config_map[cluster_role_env[0]] # if role is required but not provided, raise exception role_present = True if len(cluster_role_env[1]) > 0 else False if ROLE_KEY in cmap and cmap[ROLE_KEY] and not role_present: raise Exception("role required but not provided (cluster/role/env = %s)."\ % (cluster_role_env)) # if environ is required but not provided, raise exception environ_present = True if len(cluster_role_env[2]) > 0 else False if ENVIRON_KEY in cmap and cmap[ENVIRON_KEY] and not environ_present: raise Exception("environ required but not provided (cluster/role/env = %s)."\ % (cluster_role_env)) return True
def defaults_cluster_role_env(cluster_role_env): """ if role is not provided, supply userid if environ is not provided, supply 'default' """ if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0: return (cluster_role_env[0], getpass.getuser(), ENVIRON) return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2])
def parse_override_config_and_write_file(namespace): """ Parse the command line for overriding the defaults and create an override file. """ overrides = parse_override_config(namespace) try: tmp_dir = tempfile.mkdtemp() override_config_file = os.path.join(tmp_dir, OVERRIDE_YAML) with open(override_config_file, 'w') as f: f.write(yaml.dump(overrides)) return override_config_file except Exception as e: raise Exception("Failed to parse override config: %s" % str(e))
def parse_override_config(namespace): """Parse the command line for overriding the defaults""" overrides = dict() for config in namespace: kv = config.split("=") if len(kv) != 2: raise Exception("Invalid config property format (%s) expected key=value" % config) if kv[1] in ['true', 'True', 'TRUE']: overrides[kv[0]] = True elif kv[1] in ['false', 'False', 'FALSE']: overrides[kv[0]] = False else: overrides[kv[0]] = kv[1] return overrides
def get_java_path(): """Get the path of java executable""" java_home = os.environ.get("JAVA_HOME") return os.path.join(java_home, BIN_DIR, "java")
def check_java_home_set(): """Check if the java home set""" # check if environ variable is set if "JAVA_HOME" not in os.environ: Log.error("JAVA_HOME not set") return False # check if the value set is correct java_path = get_java_path() if os.path.isfile(java_path) and os.access(java_path, os.X_OK): return True Log.error("JAVA_HOME/bin/java either does not exist or not an executable") return False
def check_release_file_exists(): """Check if the release.yaml file exists""" release_file = get_heron_release_file() # if the file does not exist and is not a file if not os.path.isfile(release_file): Log.error("Required file not found: %s" % release_file) return False return True
def print_build_info(zipped_pex=False): """Print build_info from release.yaml :param zipped_pex: True if the PEX file is built with flag `zip_safe=False'. """ if zipped_pex: release_file = get_zipped_heron_release_file() else: release_file = get_heron_release_file() with open(release_file) as release_info: release_map = yaml.load(release_info) release_items = sorted(release_map.items(), key=lambda tup: tup[0]) for key, value in release_items: print("%s : %s" % (key, value))
def get_version_number(zipped_pex=False): """Print version from release.yaml :param zipped_pex: True if the PEX file is built with flag `zip_safe=False'. """ if zipped_pex: release_file = get_zipped_heron_release_file() else: release_file = get_heron_release_file() with open(release_file) as release_info: for line in release_info: trunks = line[:-1].split(' ') if trunks[0] == 'heron.build.version': return trunks[-1].replace("'", "") return 'unknown'
def insert_bool(param, command_args): ''' :param param: :param command_args: :return: ''' index = 0 found = False for lelem in command_args: if lelem == '--' and not found: break if lelem == param: found = True break index = index + 1 if found: command_args.insert(index + 1, 'True') return command_args
def run(command, parser, args, unknown_args): """ run command """ # get the command for detailed help command_help = args['help-command'] # if no command is provided, just print main help if command_help == 'help': parser.print_help() return True # get the subparser for the specific command subparser = config.get_subparser(parser, command_help) if subparser: print(subparser.format_help()) return True else: Log.error("Unknown subcommand \'%s\'" % command_help) return False
def get(self): """ get """ try: cluster = self.get_argument_cluster() environ = self.get_argument_environ() role = self.get_argument_role() topology_name = self.get_argument_topology() component = self.get_argument_component() topology = self.tracker.getTopologyByClusterRoleEnvironAndName( cluster, role, environ, topology_name) instances = self.get_arguments(constants.PARAM_INSTANCE) exceptions_summary = yield tornado.gen.Task(self.getComponentExceptionSummary, topology.tmaster, component, instances) self.write_success_response(exceptions_summary) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None): """ Get the summary of exceptions for component_name and list of instances. Empty instance list will fetch all exceptions. """ if not tmaster or not tmaster.host or not tmaster.stats_port: return exception_request = tmaster_pb2.ExceptionLogRequest() exception_request.component_name = component_name if len(instances) > 0: exception_request.instances.extend(instances) request_str = exception_request.SerializeToString() port = str(tmaster.stats_port) host = tmaster.host url = "http://{0}:{1}/exceptionsummary".format(host, port) Log.debug("Creating request object.") request = tornado.httpclient.HTTPRequest(url, body=request_str, method='POST', request_timeout=5) Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting exceptions from Tmaster, code: " + responseCode Log.error(message) raise tornado.gen.Return({ "message": message }) # Parse the response from tmaster. exception_response = tmaster_pb2.ExceptionLogResponse() exception_response.ParseFromString(result.body) if exception_response.status.status == common_pb2.NOTOK: if exception_response.status.HasField("message"): raise tornado.gen.Return({ "message": exception_response.status.message }) # Send response ret = [] for exception_log in exception_response.exceptions: ret.append({'class_name': exception_log.stacktrace, 'lasttime': exception_log.lasttime, 'firsttime': exception_log.firsttime, 'count': str(exception_log.count)}) raise tornado.gen.Return(ret)
def get(self, cluster, environ, topology): ''' :param cluster: :param environ: :param topology: :return: ''' # pylint: disable=no-member options = dict( cluster=cluster, environ=environ, topology=topology, active="topologies", function=common.className, baseUrl=self.baseUrl) self.render("config.html", **options)
def get(self): ''' :return: ''' clusters = yield access.get_clusters() # pylint: disable=no-member options = dict( topologies=[], # no topologies clusters=[str(cluster) for cluster in clusters], active="topologies", # active icon the nav bar function=common.className, baseUrl=self.baseUrl ) # send the all topologies page self.render("topologies.html", **options)
def get(self, cluster, environ, topology): ''' :param cluster: :param environ: :param topology: :return: ''' # fetch the execution of the topology asynchronously execution_state = yield access.get_execution_state(cluster, environ, topology) # fetch scheduler location of the topology scheduler_location = yield access.get_scheduler_location(cluster, environ, topology) job_page_link = scheduler_location["job_page_link"] # convert the topology launch time to display format launched_at = datetime.utcfromtimestamp(execution_state['submission_time']) launched_time = launched_at.strftime('%Y-%m-%d %H:%M:%S UTC') # pylint: disable=no-member options = dict( cluster=cluster, environ=environ, topology=topology, execution_state=execution_state, launched=launched_time, status="running" if random.randint(0, 1) else "errors", active="topologies", job_page_link=job_page_link, function=common.className, baseUrl=self.baseUrl ) # send the single topology page self.render("topology.html", **options)
def get(self, cluster, environ, topology, container): ''' :param cluster: :param environ: :param topology: :param container: :return: ''' path = self.get_argument("path") options = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path, baseUrl=self.baseUrl ) self.render("file.html", **options)
def get(self, cluster, environ, topology, container): ''' :param cluster: :param environ: :param topology: :param container: :return: ''' offset = self.get_argument("offset") length = self.get_argument("length") path = self.get_argument("path") data = yield access.get_container_file_data(cluster, environ, topology, container, path, offset, length) self.write(data) self.finish()
def get(self, cluster, environ, topology, container): ''' :param cluster: :param environ: :param topology: :param container: :return: ''' path = self.get_argument("path", default=".") data = yield access.get_filestats(cluster, environ, topology, container, path) options = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path, filestats=data, baseUrl=self.baseUrl) self.render("browse.html", **options)
def get(self, cluster, environ, topology, container): ''' :param cluster: :param environ: :param topology: :param container: :return: ''' # If the file is large, we want to abandon downloading # if user cancels the requests. # pylint: disable=attribute-defined-outside-init self.connection_closed = False path = self.get_argument("path") filename = path.split("/")[-1] self.set_header("Content-Disposition", "attachment; filename=%s" % filename) # Download the files in chunks. We are downloading from Tracker, # which in turns downloads from heron-shell. This much indirection # means that if we use static file downloading, the whole files would # be cached in memory before it can be sent downstream. Hence, we reuse # the file data API to read in chunks until the EOF, or until the download # is cancelled by user. # 4 MB gives good enough chunk size giving good speed for small files. # If files are large, a single threaded download may not be enough. file_download_url = access.get_container_file_download_url(cluster, environ, topology, container, path) Log.debug("file download url: %s", str(file_download_url)) def streaming_callback(chunk): self.write(chunk) self.flush() http_client = tornado.httpclient.AsyncHTTPClient() yield http_client.fetch(file_download_url, streaming_callback=streaming_callback) self.finish()
def get(self, path): ''' get method ''' path = tornado.escape.url_unescape(path) if not path: path = "." # User should not be able to access anything outside # of the dir that heron-shell is running in. This ensures # sandboxing. So we don't allow absolute paths and parent # accessing. if not utils.check_path(path): self.write("Only relative paths are allowed") self.set_status(403) self.finish() return listing = utils.get_listing(path) file_stats = {} for fn in listing: try: is_dir = False formatted_stat = utils.format_prefix(fn, utils.get_stat(path, fn)) if stat.S_ISDIR(utils.get_stat(path, fn).st_mode): is_dir = True file_stats[fn] = { "formatted_stat": formatted_stat, "is_dir": is_dir, "path": tornado.escape.url_escape(os.path.join(path, fn)), } if fn == "..": path_fragments = path.split("/") if not path_fragments: file_stats[fn]["path"] = "." else: file_stats[fn]["path"] = tornado.escape.url_escape("/".join(path_fragments[:-1])) except: continue self.write(json.dumps(file_stats)) self.finish()
def register_watch(self, callback): """ Returns the UUID with which the watch is registered. This UUID can be used to unregister the watch. Returns None if watch could not be registered. The argument 'callback' must be a function that takes exactly one argument, the topology on which the watch was triggered. Note that the watch will be unregistered in case it raises any Exception the first time. This callback is also called at the time of registration. """ RETRY_COUNT = 5 # Retry in case UID is previously # generated, just in case... for _ in range(RETRY_COUNT): # Generate a random UUID. uid = uuid.uuid4() if uid not in self.watches: Log.info("Registering a watch with uid: " + str(uid)) try: callback(self) except Exception as e: Log.error("Caught exception while triggering callback: " + str(e)) Log.debug(traceback.format_exc()) return None self.watches[uid] = callback return uid return None
def unregister_watch(self, uid): """ Unregister the watch with the given UUID. """ # Do not raise an error if UUID is # not present in the watches. Log.info("Unregister a watch with uid: " + str(uid)) self.watches.pop(uid, None)
def trigger_watches(self): """ Call all the callbacks. If any callback raises an Exception, unregister the corresponding watch. """ to_remove = [] for uid, callback in self.watches.items(): try: callback(self) except Exception as e: Log.error("Caught exception while triggering callback: " + str(e)) Log.debug(traceback.format_exc()) to_remove.append(uid) for uid in to_remove: self.unregister_watch(uid)
def set_physical_plan(self, physical_plan): """ set physical plan """ if not physical_plan: self.physical_plan = None self.id = None else: self.physical_plan = physical_plan self.id = physical_plan.topology.id self.trigger_watches()
def set_packing_plan(self, packing_plan): """ set packing plan """ if not packing_plan: self.packing_plan = None self.id = None else: self.packing_plan = packing_plan self.id = packing_plan.id self.trigger_watches()
def set_execution_state(self, execution_state): """ set exectuion state """ if not execution_state: self.execution_state = None self.cluster = None self.environ = None else: self.execution_state = execution_state cluster, environ = self.get_execution_state_dc_environ(execution_state) self.cluster = cluster self.environ = environ self.zone = cluster self.trigger_watches()
def num_instances(self): """ Number of spouts + bolts """ num = 0 # Get all the components components = self.spouts() + self.bolts() # Get instances for each worker for component in components: config = component.comp.config for kvs in config.kvs: if kvs.key == api_constants.TOPOLOGY_COMPONENT_PARALLELISM: num += int(kvs.value) break return num
def get_machines(self): """ Get all the machines that this topology is running on. These are the hosts of all the stmgrs. """ if self.physical_plan: stmgrs = list(self.physical_plan.stmgrs) return map(lambda s: s.host_name, stmgrs) return []
def get_status(self): """ Get the current state of this topology. The state values are from the topology.proto RUNNING = 1, PAUSED = 2, KILLED = 3 if the state is None "Unknown" is returned. """ status = None if self.physical_plan and self.physical_plan.topology: status = self.physical_plan.topology.state if status == 1: return "Running" elif status == 2: return "Paused" elif status == 3: return "Killed" else: return "Unknown"
def convert_pb_kvs(kvs, include_non_primitives=True): """ converts pb kvs to dict """ config = {} for kv in kvs: if kv.value: config[kv.key] = kv.value elif kv.serialized_value: # add serialized_value support for python values (fixme) # is this a serialized java object if topology_pb2.JAVA_SERIALIZED_VALUE == kv.type: jv = _convert_java_value(kv, include_non_primitives=include_non_primitives) if jv is not None: config[kv.key] = jv else: config[kv.key] = _raw_value(kv) return config
def synch_topologies(self): """ Sync the topologies with the statemgrs. """ self.state_managers = statemanagerfactory.get_all_state_managers(self.config.statemgr_config) try: for state_manager in self.state_managers: state_manager.start() except Exception as ex: Log.error("Found exception while initializing state managers: %s. Bailing out..." % ex) traceback.print_exc() sys.exit(1) # pylint: disable=deprecated-lambda def on_topologies_watch(state_manager, topologies): """watch topologies""" Log.info("State watch triggered for topologies.") Log.debug("Topologies: " + str(topologies)) existingTopologies = self.getTopologiesForStateLocation(state_manager.name) existingTopNames = map(lambda t: t.name, existingTopologies) Log.debug("Existing topologies: " + str(existingTopNames)) for name in existingTopNames: if name not in topologies: Log.info("Removing topology: %s in rootpath: %s", name, state_manager.rootpath) self.removeTopology(name, state_manager.name) for name in topologies: if name not in existingTopNames: self.addNewTopology(state_manager, name) for state_manager in self.state_managers: # The callback function with the bound # state_manager as first variable. onTopologiesWatch = partial(on_topologies_watch, state_manager) state_manager.get_topologies(onTopologiesWatch)
def getTopologyByClusterRoleEnvironAndName(self, cluster, role, environ, topologyName): """ Find and return the topology given its cluster, environ, topology name, and an optional role. Raises exception if topology is not found, or more than one are found. """ topologies = list(filter(lambda t: t.name == topologyName and t.cluster == cluster and (not role or t.execution_state.role == role) and t.environ == environ, self.topologies)) if not topologies or len(topologies) > 1: if role is not None: raise Exception("Topology not found for {0}, {1}, {2}, {3}".format( cluster, role, environ, topologyName)) else: raise Exception("Topology not found for {0}, {1}, {2}".format( cluster, environ, topologyName)) # There is only one topology which is returned. return topologies[0]
def getTopologiesForStateLocation(self, name): """ Returns all the topologies for a given state manager. """ return filter(lambda t: t.state_manager_name == name, self.topologies)
def addNewTopology(self, state_manager, topologyName): """ Adds a topology in the local cache, and sets a watch on any changes on the topology. """ topology = Topology(topologyName, state_manager.name) Log.info("Adding new topology: %s, state_manager: %s", topologyName, state_manager.name) self.topologies.append(topology) # Register a watch on topology and change # the topologyInfo on any new change. topology.register_watch(self.setTopologyInfo) def on_topology_pplan(data): """watch physical plan""" Log.info("Watch triggered for topology pplan: " + topologyName) topology.set_physical_plan(data) if not data: Log.debug("No data to be set") def on_topology_packing_plan(data): """watch packing plan""" Log.info("Watch triggered for topology packing plan: " + topologyName) topology.set_packing_plan(data) if not data: Log.debug("No data to be set") def on_topology_execution_state(data): """watch execution state""" Log.info("Watch triggered for topology execution state: " + topologyName) topology.set_execution_state(data) if not data: Log.debug("No data to be set") def on_topology_tmaster(data): """set tmaster""" Log.info("Watch triggered for topology tmaster: " + topologyName) topology.set_tmaster(data) if not data: Log.debug("No data to be set") def on_topology_scheduler_location(data): """set scheduler location""" Log.info("Watch triggered for topology scheduler location: " + topologyName) topology.set_scheduler_location(data) if not data: Log.debug("No data to be set") # Set watches on the pplan, execution_state, tmaster and scheduler_location. state_manager.get_pplan(topologyName, on_topology_pplan) state_manager.get_packing_plan(topologyName, on_topology_packing_plan) state_manager.get_execution_state(topologyName, on_topology_execution_state) state_manager.get_tmaster(topologyName, on_topology_tmaster) state_manager.get_scheduler_location(topologyName, on_topology_scheduler_location)
def removeTopology(self, topology_name, state_manager_name): """ Removes the topology from the local cache. """ topologies = [] for top in self.topologies: if (top.name == topology_name and top.state_manager_name == state_manager_name): # Remove topologyInfo if (topology_name, state_manager_name) in self.topologyInfos: self.topologyInfos.pop((topology_name, state_manager_name)) else: topologies.append(top) self.topologies = topologies
def extract_execution_state(self, topology): """ Returns the repesentation of execution state that will be returned from Tracker. """ execution_state = topology.execution_state executionState = { "cluster": execution_state.cluster, "environ": execution_state.environ, "role": execution_state.role, "jobname": topology.name, "submission_time": execution_state.submission_time, "submission_user": execution_state.submission_user, "release_username": execution_state.release_state.release_username, "release_tag": execution_state.release_state.release_tag, "release_version": execution_state.release_state.release_version, "has_physical_plan": None, "has_tmaster_location": None, "has_scheduler_location": None, "extra_links": [], } for extra_link in self.config.extra_links: link = extra_link.copy() link["url"] = self.config.get_formatted_url(executionState, link[EXTRA_LINK_FORMATTER_KEY]) executionState["extra_links"].append(link) return executionState
def extract_scheduler_location(self, topology): """ Returns the representation of scheduler location that will be returned from Tracker. """ schedulerLocation = { "name": None, "http_endpoint": None, "job_page_link": None, } if topology.scheduler_location: schedulerLocation["name"] = topology.scheduler_location.topology_name schedulerLocation["http_endpoint"] = topology.scheduler_location.http_endpoint schedulerLocation["job_page_link"] = \ topology.scheduler_location.job_page_link[0] \ if len(topology.scheduler_location.job_page_link) > 0 else "" return schedulerLocation
def extract_tmaster(self, topology): """ Returns the representation of tmaster that will be returned from Tracker. """ tmasterLocation = { "name": None, "id": None, "host": None, "controller_port": None, "master_port": None, "stats_port": None, } if topology.tmaster: tmasterLocation["name"] = topology.tmaster.topology_name tmasterLocation["id"] = topology.tmaster.topology_id tmasterLocation["host"] = topology.tmaster.host tmasterLocation["controller_port"] = topology.tmaster.controller_port tmasterLocation["master_port"] = topology.tmaster.master_port tmasterLocation["stats_port"] = topology.tmaster.stats_port return tmasterLocation