id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
28,500
tensorflow/cleverhans
cleverhans/experimental/certification/optimization.py
Optimization.run_one_step
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val, penalty_val, learning_rate_val): """Run one step of gradient descent for optimization. Args: eig_init_vec_val: Start value for eigen value computations eig_num_iter_val: Number of iterations to run for eigen computations smooth_val: Value of smoothness parameter penalty_val: Value of penalty for the current step learning_rate_val: Value of learning rate Returns: found_cert: True is negative certificate is found, False otherwise """ # Running step step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val, self.eig_num_iter_placeholder: eig_num_iter_val, self.smooth_placeholder: smooth_val, self.penalty_placeholder: penalty_val, self.learning_rate: learning_rate_val} if self.params['eig_type'] == 'SCIPY': current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec() step_feed_dict.update({ self.eig_vec_estimate: current_eig_vector }) elif self.params['eig_type'] == 'LZS': step_feed_dict.update({ self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate }) self.sess.run(self.train_step, feed_dict=step_feed_dict) [ _, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate ] = self.sess.run([ self.proj_step, self.eig_vec_estimate, self.eig_val_estimate ], feed_dict=step_feed_dict) if self.current_step % self.params['print_stats_steps'] == 0: [self.current_total_objective, self.current_unconstrained_objective, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate, self.current_nu] = self.sess.run( [self.total_objective, self.dual_object.unconstrained_objective, self.eig_vec_estimate, self.eig_val_estimate, self.dual_object.nu], feed_dict=step_feed_dict) stats = { 'total_objective': float(self.current_total_objective), 'unconstrained_objective': float(self.current_unconstrained_objective), 'min_eig_val_estimate': float(self.current_eig_val_estimate) } tf.logging.info('Current inner step: %d, optimization stats: %s', self.current_step, stats) if self.params['stats_folder'] is not None: stats = json.dumps(stats) filename = os.path.join(self.params['stats_folder'], str(self.current_step) + '.json') with tf.gfile.Open(filename) as file_f: file_f.write(stats) # Project onto feasible set of dual variables if self.current_step % self.params['projection_steps'] == 0 and self.current_unconstrained_objective < 0: nu = self.sess.run(self.dual_object.nu) dual_feed_dict = { self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate } _, min_eig_val_h_lz = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict) projected_dual_feed_dict = { self.dual_object.projected_dual.nu: nu, self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz } if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict): return True return False
python
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val, penalty_val, learning_rate_val): """Run one step of gradient descent for optimization. Args: eig_init_vec_val: Start value for eigen value computations eig_num_iter_val: Number of iterations to run for eigen computations smooth_val: Value of smoothness parameter penalty_val: Value of penalty for the current step learning_rate_val: Value of learning rate Returns: found_cert: True is negative certificate is found, False otherwise """ # Running step step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val, self.eig_num_iter_placeholder: eig_num_iter_val, self.smooth_placeholder: smooth_val, self.penalty_placeholder: penalty_val, self.learning_rate: learning_rate_val} if self.params['eig_type'] == 'SCIPY': current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec() step_feed_dict.update({ self.eig_vec_estimate: current_eig_vector }) elif self.params['eig_type'] == 'LZS': step_feed_dict.update({ self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate }) self.sess.run(self.train_step, feed_dict=step_feed_dict) [ _, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate ] = self.sess.run([ self.proj_step, self.eig_vec_estimate, self.eig_val_estimate ], feed_dict=step_feed_dict) if self.current_step % self.params['print_stats_steps'] == 0: [self.current_total_objective, self.current_unconstrained_objective, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate, self.current_nu] = self.sess.run( [self.total_objective, self.dual_object.unconstrained_objective, self.eig_vec_estimate, self.eig_val_estimate, self.dual_object.nu], feed_dict=step_feed_dict) stats = { 'total_objective': float(self.current_total_objective), 'unconstrained_objective': float(self.current_unconstrained_objective), 'min_eig_val_estimate': float(self.current_eig_val_estimate) } tf.logging.info('Current inner step: %d, optimization stats: %s', self.current_step, stats) if self.params['stats_folder'] is not None: stats = json.dumps(stats) filename = os.path.join(self.params['stats_folder'], str(self.current_step) + '.json') with tf.gfile.Open(filename) as file_f: file_f.write(stats) # Project onto feasible set of dual variables if self.current_step % self.params['projection_steps'] == 0 and self.current_unconstrained_objective < 0: nu = self.sess.run(self.dual_object.nu) dual_feed_dict = { self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate } _, min_eig_val_h_lz = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict) projected_dual_feed_dict = { self.dual_object.projected_dual.nu: nu, self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz } if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict): return True return False
[ "def", "run_one_step", "(", "self", ",", "eig_init_vec_val", ",", "eig_num_iter_val", ",", "smooth_val", ",", "penalty_val", ",", "learning_rate_val", ")", ":", "# Running step", "step_feed_dict", "=", "{", "self", ".", "eig_init_vec_placeholder", ":", "eig_init_vec_val", ",", "self", ".", "eig_num_iter_placeholder", ":", "eig_num_iter_val", ",", "self", ".", "smooth_placeholder", ":", "smooth_val", ",", "self", ".", "penalty_placeholder", ":", "penalty_val", ",", "self", ".", "learning_rate", ":", "learning_rate_val", "}", "if", "self", ".", "params", "[", "'eig_type'", "]", "==", "'SCIPY'", ":", "current_eig_vector", ",", "self", ".", "current_eig_val_estimate", "=", "self", ".", "get_scipy_eig_vec", "(", ")", "step_feed_dict", ".", "update", "(", "{", "self", ".", "eig_vec_estimate", ":", "current_eig_vector", "}", ")", "elif", "self", ".", "params", "[", "'eig_type'", "]", "==", "'LZS'", ":", "step_feed_dict", ".", "update", "(", "{", "self", ".", "dual_object", ".", "m_min_vec_ph", ":", "self", ".", "dual_object", ".", "m_min_vec_estimate", "}", ")", "self", ".", "sess", ".", "run", "(", "self", ".", "train_step", ",", "feed_dict", "=", "step_feed_dict", ")", "[", "_", ",", "self", ".", "dual_object", ".", "m_min_vec_estimate", ",", "self", ".", "current_eig_val_estimate", "]", "=", "self", ".", "sess", ".", "run", "(", "[", "self", ".", "proj_step", ",", "self", ".", "eig_vec_estimate", ",", "self", ".", "eig_val_estimate", "]", ",", "feed_dict", "=", "step_feed_dict", ")", "if", "self", ".", "current_step", "%", "self", ".", "params", "[", "'print_stats_steps'", "]", "==", "0", ":", "[", "self", ".", "current_total_objective", ",", "self", ".", "current_unconstrained_objective", ",", "self", ".", "dual_object", ".", "m_min_vec_estimate", ",", "self", ".", "current_eig_val_estimate", ",", "self", ".", "current_nu", "]", "=", "self", ".", "sess", ".", "run", "(", "[", "self", ".", "total_objective", ",", "self", ".", "dual_object", ".", "unconstrained_objective", ",", "self", ".", "eig_vec_estimate", ",", "self", ".", "eig_val_estimate", ",", "self", ".", "dual_object", ".", "nu", "]", ",", "feed_dict", "=", "step_feed_dict", ")", "stats", "=", "{", "'total_objective'", ":", "float", "(", "self", ".", "current_total_objective", ")", ",", "'unconstrained_objective'", ":", "float", "(", "self", ".", "current_unconstrained_objective", ")", ",", "'min_eig_val_estimate'", ":", "float", "(", "self", ".", "current_eig_val_estimate", ")", "}", "tf", ".", "logging", ".", "info", "(", "'Current inner step: %d, optimization stats: %s'", ",", "self", ".", "current_step", ",", "stats", ")", "if", "self", ".", "params", "[", "'stats_folder'", "]", "is", "not", "None", ":", "stats", "=", "json", ".", "dumps", "(", "stats", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "params", "[", "'stats_folder'", "]", ",", "str", "(", "self", ".", "current_step", ")", "+", "'.json'", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "file_f", ":", "file_f", ".", "write", "(", "stats", ")", "# Project onto feasible set of dual variables", "if", "self", ".", "current_step", "%", "self", ".", "params", "[", "'projection_steps'", "]", "==", "0", "and", "self", ".", "current_unconstrained_objective", "<", "0", ":", "nu", "=", "self", ".", "sess", ".", "run", "(", "self", ".", "dual_object", ".", "nu", ")", "dual_feed_dict", "=", "{", "self", ".", "dual_object", ".", "h_min_vec_ph", ":", "self", ".", "dual_object", ".", "h_min_vec_estimate", "}", "_", ",", "min_eig_val_h_lz", "=", "self", ".", "dual_object", ".", "get_lanczos_eig", "(", "compute_m", "=", "False", ",", "feed_dict", "=", "dual_feed_dict", ")", "projected_dual_feed_dict", "=", "{", "self", ".", "dual_object", ".", "projected_dual", ".", "nu", ":", "nu", ",", "self", ".", "dual_object", ".", "projected_dual", ".", "min_eig_val_h", ":", "min_eig_val_h_lz", "}", "if", "self", ".", "dual_object", ".", "projected_dual", ".", "compute_certificate", "(", "self", ".", "current_step", ",", "projected_dual_feed_dict", ")", ":", "return", "True", "return", "False" ]
Run one step of gradient descent for optimization. Args: eig_init_vec_val: Start value for eigen value computations eig_num_iter_val: Number of iterations to run for eigen computations smooth_val: Value of smoothness parameter penalty_val: Value of penalty for the current step learning_rate_val: Value of learning rate Returns: found_cert: True is negative certificate is found, False otherwise
[ "Run", "one", "step", "of", "gradient", "descent", "for", "optimization", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/optimization.py#L214-L296
28,501
tensorflow/cleverhans
cleverhans/experimental/certification/optimization.py
Optimization.run_optimization
def run_optimization(self): """Run the optimization, call run_one_step with suitable placeholders. Returns: True if certificate is found False otherwise """ penalty_val = self.params['init_penalty'] # Don't use smoothing initially - very inaccurate for large dimension self.smooth_on = False smooth_val = 0 learning_rate_val = self.params['init_learning_rate'] self.current_outer_step = 1 while self.current_outer_step <= self.params['outer_num_steps']: tf.logging.info('Running outer step %d with penalty %f', self.current_outer_step, penalty_val) # Running inner loop of optimization with current_smooth_val, # current_penalty as smoothness parameters and penalty respectively self.current_step = 0 # Run first step with random eig initialization and large number of steps found_cert = self.run_one_step( self.dual_object.m_min_vec_estimate, self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val) if found_cert: return True while self.current_step < self.params['inner_num_steps']: self.current_step = self.current_step + 1 found_cert = self.run_one_step(self.dual_object.m_min_vec_estimate, self.params['small_eig_num_steps'], smooth_val, penalty_val, learning_rate_val) if found_cert: return True # Update penalty only if it looks like current objective is optimizes if self.current_total_objective < UPDATE_PARAM_CONSTANT: penalty_val = penalty_val * self.params['beta'] learning_rate_val = learning_rate_val*self.params['learning_rate_decay'] else: # To get more accurate gradient estimate self.params['small_eig_num_steps'] = ( 1.5 * self.params['small_eig_num_steps']) # If eigen values seem small enough, turn on smoothing # useful only when performing full eigen decomposition if np.abs(self.current_eig_val_estimate) < 0.01: smooth_val = self.params['smoothness_parameter'] self.current_outer_step = self.current_outer_step + 1 return False
python
def run_optimization(self): """Run the optimization, call run_one_step with suitable placeholders. Returns: True if certificate is found False otherwise """ penalty_val = self.params['init_penalty'] # Don't use smoothing initially - very inaccurate for large dimension self.smooth_on = False smooth_val = 0 learning_rate_val = self.params['init_learning_rate'] self.current_outer_step = 1 while self.current_outer_step <= self.params['outer_num_steps']: tf.logging.info('Running outer step %d with penalty %f', self.current_outer_step, penalty_val) # Running inner loop of optimization with current_smooth_val, # current_penalty as smoothness parameters and penalty respectively self.current_step = 0 # Run first step with random eig initialization and large number of steps found_cert = self.run_one_step( self.dual_object.m_min_vec_estimate, self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val) if found_cert: return True while self.current_step < self.params['inner_num_steps']: self.current_step = self.current_step + 1 found_cert = self.run_one_step(self.dual_object.m_min_vec_estimate, self.params['small_eig_num_steps'], smooth_val, penalty_val, learning_rate_val) if found_cert: return True # Update penalty only if it looks like current objective is optimizes if self.current_total_objective < UPDATE_PARAM_CONSTANT: penalty_val = penalty_val * self.params['beta'] learning_rate_val = learning_rate_val*self.params['learning_rate_decay'] else: # To get more accurate gradient estimate self.params['small_eig_num_steps'] = ( 1.5 * self.params['small_eig_num_steps']) # If eigen values seem small enough, turn on smoothing # useful only when performing full eigen decomposition if np.abs(self.current_eig_val_estimate) < 0.01: smooth_val = self.params['smoothness_parameter'] self.current_outer_step = self.current_outer_step + 1 return False
[ "def", "run_optimization", "(", "self", ")", ":", "penalty_val", "=", "self", ".", "params", "[", "'init_penalty'", "]", "# Don't use smoothing initially - very inaccurate for large dimension", "self", ".", "smooth_on", "=", "False", "smooth_val", "=", "0", "learning_rate_val", "=", "self", ".", "params", "[", "'init_learning_rate'", "]", "self", ".", "current_outer_step", "=", "1", "while", "self", ".", "current_outer_step", "<=", "self", ".", "params", "[", "'outer_num_steps'", "]", ":", "tf", ".", "logging", ".", "info", "(", "'Running outer step %d with penalty %f'", ",", "self", ".", "current_outer_step", ",", "penalty_val", ")", "# Running inner loop of optimization with current_smooth_val,", "# current_penalty as smoothness parameters and penalty respectively", "self", ".", "current_step", "=", "0", "# Run first step with random eig initialization and large number of steps", "found_cert", "=", "self", ".", "run_one_step", "(", "self", ".", "dual_object", ".", "m_min_vec_estimate", ",", "self", ".", "params", "[", "'large_eig_num_steps'", "]", ",", "smooth_val", ",", "penalty_val", ",", "learning_rate_val", ")", "if", "found_cert", ":", "return", "True", "while", "self", ".", "current_step", "<", "self", ".", "params", "[", "'inner_num_steps'", "]", ":", "self", ".", "current_step", "=", "self", ".", "current_step", "+", "1", "found_cert", "=", "self", ".", "run_one_step", "(", "self", ".", "dual_object", ".", "m_min_vec_estimate", ",", "self", ".", "params", "[", "'small_eig_num_steps'", "]", ",", "smooth_val", ",", "penalty_val", ",", "learning_rate_val", ")", "if", "found_cert", ":", "return", "True", "# Update penalty only if it looks like current objective is optimizes", "if", "self", ".", "current_total_objective", "<", "UPDATE_PARAM_CONSTANT", ":", "penalty_val", "=", "penalty_val", "*", "self", ".", "params", "[", "'beta'", "]", "learning_rate_val", "=", "learning_rate_val", "*", "self", ".", "params", "[", "'learning_rate_decay'", "]", "else", ":", "# To get more accurate gradient estimate", "self", ".", "params", "[", "'small_eig_num_steps'", "]", "=", "(", "1.5", "*", "self", ".", "params", "[", "'small_eig_num_steps'", "]", ")", "# If eigen values seem small enough, turn on smoothing", "# useful only when performing full eigen decomposition", "if", "np", ".", "abs", "(", "self", ".", "current_eig_val_estimate", ")", "<", "0.01", ":", "smooth_val", "=", "self", ".", "params", "[", "'smoothness_parameter'", "]", "self", ".", "current_outer_step", "=", "self", ".", "current_outer_step", "+", "1", "return", "False" ]
Run the optimization, call run_one_step with suitable placeholders. Returns: True if certificate is found False otherwise
[ "Run", "the", "optimization", "call", "run_one_step", "with", "suitable", "placeholders", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/optimization.py#L298-L347
28,502
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/sample_targeted_attacks/iter_target_class/attack_iter_target_class.py
load_target_class
def load_target_class(input_dir): """Loads target classes.""" with tf.gfile.Open(os.path.join(input_dir, 'target_class.csv')) as f: return {row[0]: int(row[1]) for row in csv.reader(f) if len(row) >= 2}
python
def load_target_class(input_dir): """Loads target classes.""" with tf.gfile.Open(os.path.join(input_dir, 'target_class.csv')) as f: return {row[0]: int(row[1]) for row in csv.reader(f) if len(row) >= 2}
[ "def", "load_target_class", "(", "input_dir", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "os", ".", "path", ".", "join", "(", "input_dir", ",", "'target_class.csv'", ")", ")", "as", "f", ":", "return", "{", "row", "[", "0", "]", ":", "int", "(", "row", "[", "1", "]", ")", "for", "row", "in", "csv", ".", "reader", "(", "f", ")", "if", "len", "(", "row", ")", ">=", "2", "}" ]
Loads target classes.
[ "Loads", "target", "classes", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_targeted_attacks/iter_target_class/attack_iter_target_class.py#L53-L56
28,503
tensorflow/cleverhans
cleverhans/utils_pytorch.py
clip_eta
def clip_eta(eta, ord, eps): """ PyTorch implementation of the clip_eta in utils_tf. :param eta: Tensor :param ord: np.inf, 1, or 2 :param eps: float """ if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') avoid_zero_div = torch.tensor(1e-12, dtype=eta.dtype, device=eta.device) reduc_ind = list(range(1, len(eta.size()))) if ord == np.inf: eta = torch.clamp(eta, -eps, eps) else: if ord == 1: # TODO # raise NotImplementedError("L1 clip is not implemented.") norm = torch.max( avoid_zero_div, torch.sum(torch.abs(eta), dim=reduc_ind, keepdim=True) ) elif ord == 2: norm = torch.sqrt(torch.max( avoid_zero_div, torch.sum(eta ** 2, dim=reduc_ind, keepdim=True) )) factor = torch.min( torch.tensor(1., dtype=eta.dtype, device=eta.device), eps / norm ) eta *= factor return eta
python
def clip_eta(eta, ord, eps): """ PyTorch implementation of the clip_eta in utils_tf. :param eta: Tensor :param ord: np.inf, 1, or 2 :param eps: float """ if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') avoid_zero_div = torch.tensor(1e-12, dtype=eta.dtype, device=eta.device) reduc_ind = list(range(1, len(eta.size()))) if ord == np.inf: eta = torch.clamp(eta, -eps, eps) else: if ord == 1: # TODO # raise NotImplementedError("L1 clip is not implemented.") norm = torch.max( avoid_zero_div, torch.sum(torch.abs(eta), dim=reduc_ind, keepdim=True) ) elif ord == 2: norm = torch.sqrt(torch.max( avoid_zero_div, torch.sum(eta ** 2, dim=reduc_ind, keepdim=True) )) factor = torch.min( torch.tensor(1., dtype=eta.dtype, device=eta.device), eps / norm ) eta *= factor return eta
[ "def", "clip_eta", "(", "eta", ",", "ord", ",", "eps", ")", ":", "if", "ord", "not", "in", "[", "np", ".", "inf", ",", "1", ",", "2", "]", ":", "raise", "ValueError", "(", "'ord must be np.inf, 1, or 2.'", ")", "avoid_zero_div", "=", "torch", ".", "tensor", "(", "1e-12", ",", "dtype", "=", "eta", ".", "dtype", ",", "device", "=", "eta", ".", "device", ")", "reduc_ind", "=", "list", "(", "range", "(", "1", ",", "len", "(", "eta", ".", "size", "(", ")", ")", ")", ")", "if", "ord", "==", "np", ".", "inf", ":", "eta", "=", "torch", ".", "clamp", "(", "eta", ",", "-", "eps", ",", "eps", ")", "else", ":", "if", "ord", "==", "1", ":", "# TODO", "# raise NotImplementedError(\"L1 clip is not implemented.\")", "norm", "=", "torch", ".", "max", "(", "avoid_zero_div", ",", "torch", ".", "sum", "(", "torch", ".", "abs", "(", "eta", ")", ",", "dim", "=", "reduc_ind", ",", "keepdim", "=", "True", ")", ")", "elif", "ord", "==", "2", ":", "norm", "=", "torch", ".", "sqrt", "(", "torch", ".", "max", "(", "avoid_zero_div", ",", "torch", ".", "sum", "(", "eta", "**", "2", ",", "dim", "=", "reduc_ind", ",", "keepdim", "=", "True", ")", ")", ")", "factor", "=", "torch", ".", "min", "(", "torch", ".", "tensor", "(", "1.", ",", "dtype", "=", "eta", ".", "dtype", ",", "device", "=", "eta", ".", "device", ")", ",", "eps", "/", "norm", ")", "eta", "*=", "factor", "return", "eta" ]
PyTorch implementation of the clip_eta in utils_tf. :param eta: Tensor :param ord: np.inf, 1, or 2 :param eps: float
[ "PyTorch", "implementation", "of", "the", "clip_eta", "in", "utils_tf", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_pytorch.py#L97-L130
28,504
tensorflow/cleverhans
cleverhans/attacks/elastic_net_method.py
EAD.attack
def attack(self, imgs, targets): """ Perform the EAD attack on the given instance for the given targets. If self.targeted is true, then the targets represents the target labels If self.targeted is false, then targets are the original class labels """ batch_size = self.batch_size r = [] for i in range(0, len(imgs) // batch_size): _logger.debug( ("Running EAD attack on instance %s of %s", i * batch_size, len(imgs))) r.extend( self.attack_batch( imgs[i * batch_size:(i + 1) * batch_size], targets[i * batch_size:(i + 1) * batch_size])) if len(imgs) % batch_size != 0: last_elements = len(imgs) - (len(imgs) % batch_size) _logger.debug( ("Running EAD attack on instance %s of %s", last_elements, len(imgs))) temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:]) temp_targets = np.zeros((batch_size, ) + targets.shape[2:]) temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:] temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:] temp_data = self.attack_batch(temp_imgs, temp_targets) r.extend(temp_data[:(len(imgs) % batch_size)], targets[last_elements:]) return np.array(r)
python
def attack(self, imgs, targets): """ Perform the EAD attack on the given instance for the given targets. If self.targeted is true, then the targets represents the target labels If self.targeted is false, then targets are the original class labels """ batch_size = self.batch_size r = [] for i in range(0, len(imgs) // batch_size): _logger.debug( ("Running EAD attack on instance %s of %s", i * batch_size, len(imgs))) r.extend( self.attack_batch( imgs[i * batch_size:(i + 1) * batch_size], targets[i * batch_size:(i + 1) * batch_size])) if len(imgs) % batch_size != 0: last_elements = len(imgs) - (len(imgs) % batch_size) _logger.debug( ("Running EAD attack on instance %s of %s", last_elements, len(imgs))) temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:]) temp_targets = np.zeros((batch_size, ) + targets.shape[2:]) temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:] temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:] temp_data = self.attack_batch(temp_imgs, temp_targets) r.extend(temp_data[:(len(imgs) % batch_size)], targets[last_elements:]) return np.array(r)
[ "def", "attack", "(", "self", ",", "imgs", ",", "targets", ")", ":", "batch_size", "=", "self", ".", "batch_size", "r", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "imgs", ")", "//", "batch_size", ")", ":", "_logger", ".", "debug", "(", "(", "\"Running EAD attack on instance %s of %s\"", ",", "i", "*", "batch_size", ",", "len", "(", "imgs", ")", ")", ")", "r", ".", "extend", "(", "self", ".", "attack_batch", "(", "imgs", "[", "i", "*", "batch_size", ":", "(", "i", "+", "1", ")", "*", "batch_size", "]", ",", "targets", "[", "i", "*", "batch_size", ":", "(", "i", "+", "1", ")", "*", "batch_size", "]", ")", ")", "if", "len", "(", "imgs", ")", "%", "batch_size", "!=", "0", ":", "last_elements", "=", "len", "(", "imgs", ")", "-", "(", "len", "(", "imgs", ")", "%", "batch_size", ")", "_logger", ".", "debug", "(", "(", "\"Running EAD attack on instance %s of %s\"", ",", "last_elements", ",", "len", "(", "imgs", ")", ")", ")", "temp_imgs", "=", "np", ".", "zeros", "(", "(", "batch_size", ",", ")", "+", "imgs", ".", "shape", "[", "2", ":", "]", ")", "temp_targets", "=", "np", ".", "zeros", "(", "(", "batch_size", ",", ")", "+", "targets", ".", "shape", "[", "2", ":", "]", ")", "temp_imgs", "[", ":", "(", "len", "(", "imgs", ")", "%", "batch_size", ")", "]", "=", "imgs", "[", "last_elements", ":", "]", "temp_targets", "[", ":", "(", "len", "(", "imgs", ")", "%", "batch_size", ")", "]", "=", "targets", "[", "last_elements", ":", "]", "temp_data", "=", "self", ".", "attack_batch", "(", "temp_imgs", ",", "temp_targets", ")", "r", ".", "extend", "(", "temp_data", "[", ":", "(", "len", "(", "imgs", ")", "%", "batch_size", ")", "]", ",", "targets", "[", "last_elements", ":", "]", ")", "return", "np", ".", "array", "(", "r", ")" ]
Perform the EAD attack on the given instance for the given targets. If self.targeted is true, then the targets represents the target labels If self.targeted is false, then targets are the original class labels
[ "Perform", "the", "EAD", "attack", "on", "the", "given", "instance", "for", "the", "given", "targets", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/elastic_net_method.py#L374-L404
28,505
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/validation_tool/validate_submission.py
main
def main(args): """ Validates the submission. """ print_in_box('Validating submission ' + args.submission_filename) random.seed() temp_dir = args.temp_dir delete_temp_dir = False if not temp_dir: temp_dir = tempfile.mkdtemp() logging.info('Created temporary directory: %s', temp_dir) delete_temp_dir = True validator = submission_validator_lib.SubmissionValidator(temp_dir, args.use_gpu) if validator.validate_submission(args.submission_filename, args.submission_type): print_in_box('Submission is VALID!') else: print_in_box('Submission is INVALID, see log messages for details') if delete_temp_dir: logging.info('Deleting temporary directory: %s', temp_dir) subprocess.call(['rm', '-rf', temp_dir])
python
def main(args): """ Validates the submission. """ print_in_box('Validating submission ' + args.submission_filename) random.seed() temp_dir = args.temp_dir delete_temp_dir = False if not temp_dir: temp_dir = tempfile.mkdtemp() logging.info('Created temporary directory: %s', temp_dir) delete_temp_dir = True validator = submission_validator_lib.SubmissionValidator(temp_dir, args.use_gpu) if validator.validate_submission(args.submission_filename, args.submission_type): print_in_box('Submission is VALID!') else: print_in_box('Submission is INVALID, see log messages for details') if delete_temp_dir: logging.info('Deleting temporary directory: %s', temp_dir) subprocess.call(['rm', '-rf', temp_dir])
[ "def", "main", "(", "args", ")", ":", "print_in_box", "(", "'Validating submission '", "+", "args", ".", "submission_filename", ")", "random", ".", "seed", "(", ")", "temp_dir", "=", "args", ".", "temp_dir", "delete_temp_dir", "=", "False", "if", "not", "temp_dir", ":", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logging", ".", "info", "(", "'Created temporary directory: %s'", ",", "temp_dir", ")", "delete_temp_dir", "=", "True", "validator", "=", "submission_validator_lib", ".", "SubmissionValidator", "(", "temp_dir", ",", "args", ".", "use_gpu", ")", "if", "validator", ".", "validate_submission", "(", "args", ".", "submission_filename", ",", "args", ".", "submission_type", ")", ":", "print_in_box", "(", "'Submission is VALID!'", ")", "else", ":", "print_in_box", "(", "'Submission is INVALID, see log messages for details'", ")", "if", "delete_temp_dir", ":", "logging", ".", "info", "(", "'Deleting temporary directory: %s'", ",", "temp_dir", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "'-rf'", ",", "temp_dir", "]", ")" ]
Validates the submission.
[ "Validates", "the", "submission", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/validation_tool/validate_submission.py#L41-L62
28,506
tensorflow/cleverhans
examples/multigpu_advtrain/attacks_multigpu.py
MadryEtAlMultiGPU.attack
def attack(self, x, y_p, **kwargs): """ This method creates a symoblic graph of the MadryEtAl attack on multiple GPUs. The graph is created on the first n GPUs. Stop gradient is needed to get the speed-up. This prevents us from being able to back-prop through the attack. :param x: A tensor with the input image. :param y_p: Ground truth label or predicted label. :return: Two lists containing the input and output tensors of each GPU. """ inputs = [] outputs = [] # Create the initial random perturbation device_name = '/gpu:0' self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('init_rand'): if self.rand_init: eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps) eta = clip_eta(eta, self.ord, self.eps) eta = tf.stop_gradient(eta) else: eta = tf.zeros_like(x) # TODO: Break the graph only nGPU times instead of nb_iter times. # The current implementation by the time an adversarial example is # used for training, the weights of the model have changed nb_iter # times. This can cause slower convergence compared to the single GPU # adversarial training. for i in range(self.nb_iter): # Create the graph for i'th step of attack inputs += [OrderedDict()] outputs += [OrderedDict()] device_name = x.device self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('step%d' % i): if i > 0: # Clone the variables to separate the graph of 2 GPUs x = clone_variable('x', x) y_p = clone_variable('y_p', y_p) eta = clone_variable('eta', eta) inputs[i]['x'] = x inputs[i]['y_p'] = y_p outputs[i]['x'] = x outputs[i]['y_p'] = y_p inputs[i]['eta'] = eta eta = self.attack_single_step(x, eta, y_p) if i < self.nb_iter-1: outputs[i]['eta'] = eta else: # adv_x, not eta is the output of the last step adv_x = x + eta if (self.clip_min is not None and self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) adv_x = tf.stop_gradient(adv_x, name='adv_x') outputs[i]['adv_x'] = adv_x return inputs, outputs
python
def attack(self, x, y_p, **kwargs): """ This method creates a symoblic graph of the MadryEtAl attack on multiple GPUs. The graph is created on the first n GPUs. Stop gradient is needed to get the speed-up. This prevents us from being able to back-prop through the attack. :param x: A tensor with the input image. :param y_p: Ground truth label or predicted label. :return: Two lists containing the input and output tensors of each GPU. """ inputs = [] outputs = [] # Create the initial random perturbation device_name = '/gpu:0' self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('init_rand'): if self.rand_init: eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps) eta = clip_eta(eta, self.ord, self.eps) eta = tf.stop_gradient(eta) else: eta = tf.zeros_like(x) # TODO: Break the graph only nGPU times instead of nb_iter times. # The current implementation by the time an adversarial example is # used for training, the weights of the model have changed nb_iter # times. This can cause slower convergence compared to the single GPU # adversarial training. for i in range(self.nb_iter): # Create the graph for i'th step of attack inputs += [OrderedDict()] outputs += [OrderedDict()] device_name = x.device self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('step%d' % i): if i > 0: # Clone the variables to separate the graph of 2 GPUs x = clone_variable('x', x) y_p = clone_variable('y_p', y_p) eta = clone_variable('eta', eta) inputs[i]['x'] = x inputs[i]['y_p'] = y_p outputs[i]['x'] = x outputs[i]['y_p'] = y_p inputs[i]['eta'] = eta eta = self.attack_single_step(x, eta, y_p) if i < self.nb_iter-1: outputs[i]['eta'] = eta else: # adv_x, not eta is the output of the last step adv_x = x + eta if (self.clip_min is not None and self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) adv_x = tf.stop_gradient(adv_x, name='adv_x') outputs[i]['adv_x'] = adv_x return inputs, outputs
[ "def", "attack", "(", "self", ",", "x", ",", "y_p", ",", "*", "*", "kwargs", ")", ":", "inputs", "=", "[", "]", "outputs", "=", "[", "]", "# Create the initial random perturbation", "device_name", "=", "'/gpu:0'", "self", ".", "model", ".", "set_device", "(", "device_name", ")", "with", "tf", ".", "device", "(", "device_name", ")", ":", "with", "tf", ".", "variable_scope", "(", "'init_rand'", ")", ":", "if", "self", ".", "rand_init", ":", "eta", "=", "tf", ".", "random_uniform", "(", "tf", ".", "shape", "(", "x", ")", ",", "-", "self", ".", "eps", ",", "self", ".", "eps", ")", "eta", "=", "clip_eta", "(", "eta", ",", "self", ".", "ord", ",", "self", ".", "eps", ")", "eta", "=", "tf", ".", "stop_gradient", "(", "eta", ")", "else", ":", "eta", "=", "tf", ".", "zeros_like", "(", "x", ")", "# TODO: Break the graph only nGPU times instead of nb_iter times.", "# The current implementation by the time an adversarial example is", "# used for training, the weights of the model have changed nb_iter", "# times. This can cause slower convergence compared to the single GPU", "# adversarial training.", "for", "i", "in", "range", "(", "self", ".", "nb_iter", ")", ":", "# Create the graph for i'th step of attack", "inputs", "+=", "[", "OrderedDict", "(", ")", "]", "outputs", "+=", "[", "OrderedDict", "(", ")", "]", "device_name", "=", "x", ".", "device", "self", ".", "model", ".", "set_device", "(", "device_name", ")", "with", "tf", ".", "device", "(", "device_name", ")", ":", "with", "tf", ".", "variable_scope", "(", "'step%d'", "%", "i", ")", ":", "if", "i", ">", "0", ":", "# Clone the variables to separate the graph of 2 GPUs", "x", "=", "clone_variable", "(", "'x'", ",", "x", ")", "y_p", "=", "clone_variable", "(", "'y_p'", ",", "y_p", ")", "eta", "=", "clone_variable", "(", "'eta'", ",", "eta", ")", "inputs", "[", "i", "]", "[", "'x'", "]", "=", "x", "inputs", "[", "i", "]", "[", "'y_p'", "]", "=", "y_p", "outputs", "[", "i", "]", "[", "'x'", "]", "=", "x", "outputs", "[", "i", "]", "[", "'y_p'", "]", "=", "y_p", "inputs", "[", "i", "]", "[", "'eta'", "]", "=", "eta", "eta", "=", "self", ".", "attack_single_step", "(", "x", ",", "eta", ",", "y_p", ")", "if", "i", "<", "self", ".", "nb_iter", "-", "1", ":", "outputs", "[", "i", "]", "[", "'eta'", "]", "=", "eta", "else", ":", "# adv_x, not eta is the output of the last step", "adv_x", "=", "x", "+", "eta", "if", "(", "self", ".", "clip_min", "is", "not", "None", "and", "self", ".", "clip_max", "is", "not", "None", ")", ":", "adv_x", "=", "tf", ".", "clip_by_value", "(", "adv_x", ",", "self", ".", "clip_min", ",", "self", ".", "clip_max", ")", "adv_x", "=", "tf", ".", "stop_gradient", "(", "adv_x", ",", "name", "=", "'adv_x'", ")", "outputs", "[", "i", "]", "[", "'adv_x'", "]", "=", "adv_x", "return", "inputs", ",", "outputs" ]
This method creates a symoblic graph of the MadryEtAl attack on multiple GPUs. The graph is created on the first n GPUs. Stop gradient is needed to get the speed-up. This prevents us from being able to back-prop through the attack. :param x: A tensor with the input image. :param y_p: Ground truth label or predicted label. :return: Two lists containing the input and output tensors of each GPU.
[ "This", "method", "creates", "a", "symoblic", "graph", "of", "the", "MadryEtAl", "attack", "on", "multiple", "GPUs", ".", "The", "graph", "is", "created", "on", "the", "first", "n", "GPUs", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/attacks_multigpu.py#L42-L106
28,507
tensorflow/cleverhans
examples/multigpu_advtrain/attacks_multigpu.py
MadryEtAlMultiGPU.generate_np
def generate_np(self, x_val, **kwargs): """ Facilitates testing this attack. """ _, feedable, _feedable_types, hash_key = self.construct_variables(kwargs) if hash_key not in self.graphs: with tf.variable_scope(None, 'attack_%d' % len(self.graphs)): # x is a special placeholder we always want to have with tf.device('/gpu:0'): x = tf.placeholder(tf.float32, shape=x_val.shape, name='x') inputs, outputs = self.generate(x, **kwargs) from runner import RunnerMultiGPU runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) self.graphs[hash_key] = runner runner = self.graphs[hash_key] feed_dict = {'x': x_val} for name in feedable: feed_dict[name] = feedable[name] fvals = runner.run(feed_dict) while not runner.is_finished(): fvals = runner.run() return fvals['adv_x']
python
def generate_np(self, x_val, **kwargs): """ Facilitates testing this attack. """ _, feedable, _feedable_types, hash_key = self.construct_variables(kwargs) if hash_key not in self.graphs: with tf.variable_scope(None, 'attack_%d' % len(self.graphs)): # x is a special placeholder we always want to have with tf.device('/gpu:0'): x = tf.placeholder(tf.float32, shape=x_val.shape, name='x') inputs, outputs = self.generate(x, **kwargs) from runner import RunnerMultiGPU runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) self.graphs[hash_key] = runner runner = self.graphs[hash_key] feed_dict = {'x': x_val} for name in feedable: feed_dict[name] = feedable[name] fvals = runner.run(feed_dict) while not runner.is_finished(): fvals = runner.run() return fvals['adv_x']
[ "def", "generate_np", "(", "self", ",", "x_val", ",", "*", "*", "kwargs", ")", ":", "_", ",", "feedable", ",", "_feedable_types", ",", "hash_key", "=", "self", ".", "construct_variables", "(", "kwargs", ")", "if", "hash_key", "not", "in", "self", ".", "graphs", ":", "with", "tf", ".", "variable_scope", "(", "None", ",", "'attack_%d'", "%", "len", "(", "self", ".", "graphs", ")", ")", ":", "# x is a special placeholder we always want to have", "with", "tf", ".", "device", "(", "'/gpu:0'", ")", ":", "x", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "x_val", ".", "shape", ",", "name", "=", "'x'", ")", "inputs", ",", "outputs", "=", "self", ".", "generate", "(", "x", ",", "*", "*", "kwargs", ")", "from", "runner", "import", "RunnerMultiGPU", "runner", "=", "RunnerMultiGPU", "(", "inputs", ",", "outputs", ",", "sess", "=", "self", ".", "sess", ")", "self", ".", "graphs", "[", "hash_key", "]", "=", "runner", "runner", "=", "self", ".", "graphs", "[", "hash_key", "]", "feed_dict", "=", "{", "'x'", ":", "x_val", "}", "for", "name", "in", "feedable", ":", "feed_dict", "[", "name", "]", "=", "feedable", "[", "name", "]", "fvals", "=", "runner", ".", "run", "(", "feed_dict", ")", "while", "not", "runner", ".", "is_finished", "(", ")", ":", "fvals", "=", "runner", ".", "run", "(", ")", "return", "fvals", "[", "'adv_x'", "]" ]
Facilitates testing this attack.
[ "Facilitates", "testing", "this", "attack", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/attacks_multigpu.py#L108-L134
28,508
tensorflow/cleverhans
cleverhans/evaluation.py
batch_eval
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, batch_size=None, feed=None, args=None): """ A helper function that computes a tensor on numpy inputs by batches. This version uses exactly the tensorflow graph constructed by the caller, so the caller can place specific ops on specific devices to implement model parallelism. Most users probably prefer `batch_eval_multi_worker` which maps a single-device expression to multiple devices in order to evaluate faster by parallelizing across data. :param sess: tf Session to use :param tf_inputs: list of tf Placeholders to feed from the dataset :param tf_outputs: list of tf tensors to calculate :param numpy_inputs: list of numpy arrays defining the dataset :param batch_size: int, batch size to use for evaluation If not specified, this function will try to guess the batch size, but might get an out of memory error or run the model with an unsupported batch size, etc. :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Deprecated and included only for backwards compatibility. Should contain `batch_size` """ if args is not None: warnings.warn("`args` is deprecated and will be removed on or " "after 2019-03-09. Pass `batch_size` directly.") if "batch_size" in args: assert batch_size is None batch_size = args["batch_size"] if batch_size is None: batch_size = DEFAULT_EXAMPLES_PER_DEVICE n = len(numpy_inputs) assert n > 0 assert n == len(tf_inputs) m = numpy_inputs[0].shape[0] for i in range(1, n): assert numpy_inputs[i].shape[0] == m out = [] for _ in tf_outputs: out.append([]) for start in range(0, m, batch_size): batch = start // batch_size if batch % 100 == 0 and batch > 0: _logger.debug("Batch " + str(batch)) # Compute batch start and end indices start = batch * batch_size end = start + batch_size numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs] cur_batch_size = numpy_input_batches[0].shape[0] assert cur_batch_size <= batch_size for e in numpy_input_batches: assert e.shape[0] == cur_batch_size feed_dict = dict(zip(tf_inputs, numpy_input_batches)) if feed is not None: feed_dict.update(feed) numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict) for e in numpy_output_batches: assert e.shape[0] == cur_batch_size, e.shape for out_elem, numpy_output_batch in zip(out, numpy_output_batches): out_elem.append(numpy_output_batch) out = [np.concatenate(x, axis=0) for x in out] for e in out: assert e.shape[0] == m, e.shape return out
python
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, batch_size=None, feed=None, args=None): """ A helper function that computes a tensor on numpy inputs by batches. This version uses exactly the tensorflow graph constructed by the caller, so the caller can place specific ops on specific devices to implement model parallelism. Most users probably prefer `batch_eval_multi_worker` which maps a single-device expression to multiple devices in order to evaluate faster by parallelizing across data. :param sess: tf Session to use :param tf_inputs: list of tf Placeholders to feed from the dataset :param tf_outputs: list of tf tensors to calculate :param numpy_inputs: list of numpy arrays defining the dataset :param batch_size: int, batch size to use for evaluation If not specified, this function will try to guess the batch size, but might get an out of memory error or run the model with an unsupported batch size, etc. :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Deprecated and included only for backwards compatibility. Should contain `batch_size` """ if args is not None: warnings.warn("`args` is deprecated and will be removed on or " "after 2019-03-09. Pass `batch_size` directly.") if "batch_size" in args: assert batch_size is None batch_size = args["batch_size"] if batch_size is None: batch_size = DEFAULT_EXAMPLES_PER_DEVICE n = len(numpy_inputs) assert n > 0 assert n == len(tf_inputs) m = numpy_inputs[0].shape[0] for i in range(1, n): assert numpy_inputs[i].shape[0] == m out = [] for _ in tf_outputs: out.append([]) for start in range(0, m, batch_size): batch = start // batch_size if batch % 100 == 0 and batch > 0: _logger.debug("Batch " + str(batch)) # Compute batch start and end indices start = batch * batch_size end = start + batch_size numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs] cur_batch_size = numpy_input_batches[0].shape[0] assert cur_batch_size <= batch_size for e in numpy_input_batches: assert e.shape[0] == cur_batch_size feed_dict = dict(zip(tf_inputs, numpy_input_batches)) if feed is not None: feed_dict.update(feed) numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict) for e in numpy_output_batches: assert e.shape[0] == cur_batch_size, e.shape for out_elem, numpy_output_batch in zip(out, numpy_output_batches): out_elem.append(numpy_output_batch) out = [np.concatenate(x, axis=0) for x in out] for e in out: assert e.shape[0] == m, e.shape return out
[ "def", "batch_eval", "(", "sess", ",", "tf_inputs", ",", "tf_outputs", ",", "numpy_inputs", ",", "batch_size", "=", "None", ",", "feed", "=", "None", ",", "args", "=", "None", ")", ":", "if", "args", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"`args` is deprecated and will be removed on or \"", "\"after 2019-03-09. Pass `batch_size` directly.\"", ")", "if", "\"batch_size\"", "in", "args", ":", "assert", "batch_size", "is", "None", "batch_size", "=", "args", "[", "\"batch_size\"", "]", "if", "batch_size", "is", "None", ":", "batch_size", "=", "DEFAULT_EXAMPLES_PER_DEVICE", "n", "=", "len", "(", "numpy_inputs", ")", "assert", "n", ">", "0", "assert", "n", "==", "len", "(", "tf_inputs", ")", "m", "=", "numpy_inputs", "[", "0", "]", ".", "shape", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "assert", "numpy_inputs", "[", "i", "]", ".", "shape", "[", "0", "]", "==", "m", "out", "=", "[", "]", "for", "_", "in", "tf_outputs", ":", "out", ".", "append", "(", "[", "]", ")", "for", "start", "in", "range", "(", "0", ",", "m", ",", "batch_size", ")", ":", "batch", "=", "start", "//", "batch_size", "if", "batch", "%", "100", "==", "0", "and", "batch", ">", "0", ":", "_logger", ".", "debug", "(", "\"Batch \"", "+", "str", "(", "batch", ")", ")", "# Compute batch start and end indices", "start", "=", "batch", "*", "batch_size", "end", "=", "start", "+", "batch_size", "numpy_input_batches", "=", "[", "numpy_input", "[", "start", ":", "end", "]", "for", "numpy_input", "in", "numpy_inputs", "]", "cur_batch_size", "=", "numpy_input_batches", "[", "0", "]", ".", "shape", "[", "0", "]", "assert", "cur_batch_size", "<=", "batch_size", "for", "e", "in", "numpy_input_batches", ":", "assert", "e", ".", "shape", "[", "0", "]", "==", "cur_batch_size", "feed_dict", "=", "dict", "(", "zip", "(", "tf_inputs", ",", "numpy_input_batches", ")", ")", "if", "feed", "is", "not", "None", ":", "feed_dict", ".", "update", "(", "feed", ")", "numpy_output_batches", "=", "sess", ".", "run", "(", "tf_outputs", ",", "feed_dict", "=", "feed_dict", ")", "for", "e", "in", "numpy_output_batches", ":", "assert", "e", ".", "shape", "[", "0", "]", "==", "cur_batch_size", ",", "e", ".", "shape", "for", "out_elem", ",", "numpy_output_batch", "in", "zip", "(", "out", ",", "numpy_output_batches", ")", ":", "out_elem", ".", "append", "(", "numpy_output_batch", ")", "out", "=", "[", "np", ".", "concatenate", "(", "x", ",", "axis", "=", "0", ")", "for", "x", "in", "out", "]", "for", "e", "in", "out", ":", "assert", "e", ".", "shape", "[", "0", "]", "==", "m", ",", "e", ".", "shape", "return", "out" ]
A helper function that computes a tensor on numpy inputs by batches. This version uses exactly the tensorflow graph constructed by the caller, so the caller can place specific ops on specific devices to implement model parallelism. Most users probably prefer `batch_eval_multi_worker` which maps a single-device expression to multiple devices in order to evaluate faster by parallelizing across data. :param sess: tf Session to use :param tf_inputs: list of tf Placeholders to feed from the dataset :param tf_outputs: list of tf tensors to calculate :param numpy_inputs: list of numpy arrays defining the dataset :param batch_size: int, batch size to use for evaluation If not specified, this function will try to guess the batch size, but might get an out of memory error or run the model with an unsupported batch size, etc. :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Deprecated and included only for backwards compatibility. Should contain `batch_size`
[ "A", "helper", "function", "that", "computes", "a", "tensor", "on", "numpy", "inputs", "by", "batches", ".", "This", "version", "uses", "exactly", "the", "tensorflow", "graph", "constructed", "by", "the", "caller", "so", "the", "caller", "can", "place", "specific", "ops", "on", "specific", "devices", "to", "implement", "model", "parallelism", ".", "Most", "users", "probably", "prefer", "batch_eval_multi_worker", "which", "maps", "a", "single", "-", "device", "expression", "to", "multiple", "devices", "in", "order", "to", "evaluate", "faster", "by", "parallelizing", "across", "data", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/evaluation.py#L414-L488
28,509
tensorflow/cleverhans
cleverhans/evaluation.py
_check_y
def _check_y(y): """ Makes sure a `y` argument is a vliad numpy dataset. """ if not isinstance(y, np.ndarray): raise TypeError("y must be numpy array. Typically y contains " "the entire test set labels. Got " + str(y) + " of type " + str(type(y)))
python
def _check_y(y): """ Makes sure a `y` argument is a vliad numpy dataset. """ if not isinstance(y, np.ndarray): raise TypeError("y must be numpy array. Typically y contains " "the entire test set labels. Got " + str(y) + " of type " + str(type(y)))
[ "def", "_check_y", "(", "y", ")", ":", "if", "not", "isinstance", "(", "y", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"y must be numpy array. Typically y contains \"", "\"the entire test set labels. Got \"", "+", "str", "(", "y", ")", "+", "\" of type \"", "+", "str", "(", "type", "(", "y", ")", ")", ")" ]
Makes sure a `y` argument is a vliad numpy dataset.
[ "Makes", "sure", "a", "y", "argument", "is", "a", "vliad", "numpy", "dataset", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/evaluation.py#L726-L732
28,510
tensorflow/cleverhans
examples/multigpu_advtrain/utils.py
preprocess_batch
def preprocess_batch(images_batch, preproc_func=None): """ Creates a preprocessing graph for a batch given a function that processes a single image. :param images_batch: A tensor for an image batch. :param preproc_func: (optional function) A function that takes in a tensor and returns a preprocessed input. """ if preproc_func is None: return images_batch with tf.variable_scope('preprocess'): images_list = tf.split(images_batch, int(images_batch.shape[0])) result_list = [] for img in images_list: reshaped_img = tf.reshape(img, img.shape[1:]) processed_img = preproc_func(reshaped_img) result_list.append(tf.expand_dims(processed_img, axis=0)) result_images = tf.concat(result_list, axis=0) return result_images
python
def preprocess_batch(images_batch, preproc_func=None): """ Creates a preprocessing graph for a batch given a function that processes a single image. :param images_batch: A tensor for an image batch. :param preproc_func: (optional function) A function that takes in a tensor and returns a preprocessed input. """ if preproc_func is None: return images_batch with tf.variable_scope('preprocess'): images_list = tf.split(images_batch, int(images_batch.shape[0])) result_list = [] for img in images_list: reshaped_img = tf.reshape(img, img.shape[1:]) processed_img = preproc_func(reshaped_img) result_list.append(tf.expand_dims(processed_img, axis=0)) result_images = tf.concat(result_list, axis=0) return result_images
[ "def", "preprocess_batch", "(", "images_batch", ",", "preproc_func", "=", "None", ")", ":", "if", "preproc_func", "is", "None", ":", "return", "images_batch", "with", "tf", ".", "variable_scope", "(", "'preprocess'", ")", ":", "images_list", "=", "tf", ".", "split", "(", "images_batch", ",", "int", "(", "images_batch", ".", "shape", "[", "0", "]", ")", ")", "result_list", "=", "[", "]", "for", "img", "in", "images_list", ":", "reshaped_img", "=", "tf", ".", "reshape", "(", "img", ",", "img", ".", "shape", "[", "1", ":", "]", ")", "processed_img", "=", "preproc_func", "(", "reshaped_img", ")", "result_list", ".", "append", "(", "tf", ".", "expand_dims", "(", "processed_img", ",", "axis", "=", "0", ")", ")", "result_images", "=", "tf", ".", "concat", "(", "result_list", ",", "axis", "=", "0", ")", "return", "result_images" ]
Creates a preprocessing graph for a batch given a function that processes a single image. :param images_batch: A tensor for an image batch. :param preproc_func: (optional function) A function that takes in a tensor and returns a preprocessed input.
[ "Creates", "a", "preprocessing", "graph", "for", "a", "batch", "given", "a", "function", "that", "processes", "a", "single", "image", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/utils.py#L5-L25
28,511
tensorflow/cleverhans
cleverhans/model.py
Model.make_params
def make_params(self): """ Create all Variables to be returned later by get_params. By default this is a no-op. Models that need their fprop to be called for their params to be created can set `needs_dummy_fprop=True` in the constructor. """ if self.needs_dummy_fprop: if hasattr(self, "_dummy_input"): return self._dummy_input = self.make_input_placeholder() self.fprop(self._dummy_input)
python
def make_params(self): """ Create all Variables to be returned later by get_params. By default this is a no-op. Models that need their fprop to be called for their params to be created can set `needs_dummy_fprop=True` in the constructor. """ if self.needs_dummy_fprop: if hasattr(self, "_dummy_input"): return self._dummy_input = self.make_input_placeholder() self.fprop(self._dummy_input)
[ "def", "make_params", "(", "self", ")", ":", "if", "self", ".", "needs_dummy_fprop", ":", "if", "hasattr", "(", "self", ",", "\"_dummy_input\"", ")", ":", "return", "self", ".", "_dummy_input", "=", "self", ".", "make_input_placeholder", "(", ")", "self", ".", "fprop", "(", "self", ".", "_dummy_input", ")" ]
Create all Variables to be returned later by get_params. By default this is a no-op. Models that need their fprop to be called for their params to be created can set `needs_dummy_fprop=True` in the constructor.
[ "Create", "all", "Variables", "to", "be", "returned", "later", "by", "get_params", ".", "By", "default", "this", "is", "a", "no", "-", "op", ".", "Models", "that", "need", "their", "fprop", "to", "be", "called", "for", "their", "params", "to", "be", "created", "can", "set", "needs_dummy_fprop", "=", "True", "in", "the", "constructor", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model.py#L152-L164
28,512
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
DkNNModel.init_lsh
def init_lsh(self): """ Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data. """ self.query_objects = { } # contains the object that can be queried to find nearest neighbors at each layer. # mean of training data representation per layer (that needs to be substracted before LSH). self.centers = {} for layer in self.layers: assert self.nb_tables >= self.neighbors # Normalize all the lenghts, since we care about the cosine similarity. self.train_activations_lsh[layer] /= np.linalg.norm( self.train_activations_lsh[layer], axis=1).reshape(-1, 1) # Center the dataset and the queries: this improves the performance of LSH quite a bit. center = np.mean(self.train_activations_lsh[layer], axis=0) self.train_activations_lsh[layer] -= center self.centers[layer] = center # LSH parameters params_cp = falconn.LSHConstructionParameters() params_cp.dimension = len(self.train_activations_lsh[layer][1]) params_cp.lsh_family = falconn.LSHFamily.CrossPolytope params_cp.distance_function = falconn.DistanceFunction.EuclideanSquared params_cp.l = self.nb_tables params_cp.num_rotations = 2 # for dense set it to 1; for sparse data set it to 2 params_cp.seed = 5721840 # we want to use all the available threads to set up params_cp.num_setup_threads = 0 params_cp.storage_hash_table = falconn.StorageHashTable.BitPackedFlatHashTable # we build 18-bit hashes so that each table has # 2^18 bins; this is a good choice since 2^18 is of the same # order of magnitude as the number of data points falconn.compute_number_of_hash_functions(self.number_bits, params_cp) print('Constructing the LSH table') table = falconn.LSHIndex(params_cp) table.setup(self.train_activations_lsh[layer]) # Parse test feature vectors and find k nearest neighbors query_object = table.construct_query_object() query_object.set_num_probes(self.nb_tables) self.query_objects[layer] = query_object
python
def init_lsh(self): """ Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data. """ self.query_objects = { } # contains the object that can be queried to find nearest neighbors at each layer. # mean of training data representation per layer (that needs to be substracted before LSH). self.centers = {} for layer in self.layers: assert self.nb_tables >= self.neighbors # Normalize all the lenghts, since we care about the cosine similarity. self.train_activations_lsh[layer] /= np.linalg.norm( self.train_activations_lsh[layer], axis=1).reshape(-1, 1) # Center the dataset and the queries: this improves the performance of LSH quite a bit. center = np.mean(self.train_activations_lsh[layer], axis=0) self.train_activations_lsh[layer] -= center self.centers[layer] = center # LSH parameters params_cp = falconn.LSHConstructionParameters() params_cp.dimension = len(self.train_activations_lsh[layer][1]) params_cp.lsh_family = falconn.LSHFamily.CrossPolytope params_cp.distance_function = falconn.DistanceFunction.EuclideanSquared params_cp.l = self.nb_tables params_cp.num_rotations = 2 # for dense set it to 1; for sparse data set it to 2 params_cp.seed = 5721840 # we want to use all the available threads to set up params_cp.num_setup_threads = 0 params_cp.storage_hash_table = falconn.StorageHashTable.BitPackedFlatHashTable # we build 18-bit hashes so that each table has # 2^18 bins; this is a good choice since 2^18 is of the same # order of magnitude as the number of data points falconn.compute_number_of_hash_functions(self.number_bits, params_cp) print('Constructing the LSH table') table = falconn.LSHIndex(params_cp) table.setup(self.train_activations_lsh[layer]) # Parse test feature vectors and find k nearest neighbors query_object = table.construct_query_object() query_object.set_num_probes(self.nb_tables) self.query_objects[layer] = query_object
[ "def", "init_lsh", "(", "self", ")", ":", "self", ".", "query_objects", "=", "{", "}", "# contains the object that can be queried to find nearest neighbors at each layer.", "# mean of training data representation per layer (that needs to be substracted before LSH).", "self", ".", "centers", "=", "{", "}", "for", "layer", "in", "self", ".", "layers", ":", "assert", "self", ".", "nb_tables", ">=", "self", ".", "neighbors", "# Normalize all the lenghts, since we care about the cosine similarity.", "self", ".", "train_activations_lsh", "[", "layer", "]", "/=", "np", ".", "linalg", ".", "norm", "(", "self", ".", "train_activations_lsh", "[", "layer", "]", ",", "axis", "=", "1", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Center the dataset and the queries: this improves the performance of LSH quite a bit.", "center", "=", "np", ".", "mean", "(", "self", ".", "train_activations_lsh", "[", "layer", "]", ",", "axis", "=", "0", ")", "self", ".", "train_activations_lsh", "[", "layer", "]", "-=", "center", "self", ".", "centers", "[", "layer", "]", "=", "center", "# LSH parameters", "params_cp", "=", "falconn", ".", "LSHConstructionParameters", "(", ")", "params_cp", ".", "dimension", "=", "len", "(", "self", ".", "train_activations_lsh", "[", "layer", "]", "[", "1", "]", ")", "params_cp", ".", "lsh_family", "=", "falconn", ".", "LSHFamily", ".", "CrossPolytope", "params_cp", ".", "distance_function", "=", "falconn", ".", "DistanceFunction", ".", "EuclideanSquared", "params_cp", ".", "l", "=", "self", ".", "nb_tables", "params_cp", ".", "num_rotations", "=", "2", "# for dense set it to 1; for sparse data set it to 2", "params_cp", ".", "seed", "=", "5721840", "# we want to use all the available threads to set up", "params_cp", ".", "num_setup_threads", "=", "0", "params_cp", ".", "storage_hash_table", "=", "falconn", ".", "StorageHashTable", ".", "BitPackedFlatHashTable", "# we build 18-bit hashes so that each table has", "# 2^18 bins; this is a good choice since 2^18 is of the same", "# order of magnitude as the number of data points", "falconn", ".", "compute_number_of_hash_functions", "(", "self", ".", "number_bits", ",", "params_cp", ")", "print", "(", "'Constructing the LSH table'", ")", "table", "=", "falconn", ".", "LSHIndex", "(", "params_cp", ")", "table", ".", "setup", "(", "self", ".", "train_activations_lsh", "[", "layer", "]", ")", "# Parse test feature vectors and find k nearest neighbors", "query_object", "=", "table", ".", "construct_query_object", "(", ")", "query_object", ".", "set_num_probes", "(", "self", ".", "nb_tables", ")", "self", ".", "query_objects", "[", "layer", "]", "=", "query_object" ]
Initializes locality-sensitive hashing with FALCONN to find nearest neighbors in training data.
[ "Initializes", "locality", "-", "sensitive", "hashing", "with", "FALCONN", "to", "find", "nearest", "neighbors", "in", "training", "data", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L88-L132
28,513
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
DkNNModel.find_train_knns
def find_train_knns(self, data_activations): """ Given a data_activation dictionary that contains a np array with activations for each layer, find the knns in the training data. """ knns_ind = {} knns_labels = {} for layer in self.layers: # Pre-process representations of data to normalize and remove training data mean. data_activations_layer = copy.copy(data_activations[layer]) nb_data = data_activations_layer.shape[0] data_activations_layer /= np.linalg.norm( data_activations_layer, axis=1).reshape(-1, 1) data_activations_layer -= self.centers[layer] # Use FALCONN to find indices of nearest neighbors in training data. knns_ind[layer] = np.zeros( (data_activations_layer.shape[0], self.neighbors), dtype=np.int32) knn_errors = 0 for i in range(data_activations_layer.shape[0]): query_res = self.query_objects[layer].find_k_nearest_neighbors( data_activations_layer[i], self.neighbors) try: knns_ind[layer][i, :] = query_res except: # pylint: disable-msg=W0702 knns_ind[layer][i, :len(query_res)] = query_res knn_errors += knns_ind[layer].shape[1] - len(query_res) # Find labels of neighbors found in the training data. knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32) for data_id in range(nb_data): knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]] return knns_ind, knns_labels
python
def find_train_knns(self, data_activations): """ Given a data_activation dictionary that contains a np array with activations for each layer, find the knns in the training data. """ knns_ind = {} knns_labels = {} for layer in self.layers: # Pre-process representations of data to normalize and remove training data mean. data_activations_layer = copy.copy(data_activations[layer]) nb_data = data_activations_layer.shape[0] data_activations_layer /= np.linalg.norm( data_activations_layer, axis=1).reshape(-1, 1) data_activations_layer -= self.centers[layer] # Use FALCONN to find indices of nearest neighbors in training data. knns_ind[layer] = np.zeros( (data_activations_layer.shape[0], self.neighbors), dtype=np.int32) knn_errors = 0 for i in range(data_activations_layer.shape[0]): query_res = self.query_objects[layer].find_k_nearest_neighbors( data_activations_layer[i], self.neighbors) try: knns_ind[layer][i, :] = query_res except: # pylint: disable-msg=W0702 knns_ind[layer][i, :len(query_res)] = query_res knn_errors += knns_ind[layer].shape[1] - len(query_res) # Find labels of neighbors found in the training data. knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32) for data_id in range(nb_data): knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]] return knns_ind, knns_labels
[ "def", "find_train_knns", "(", "self", ",", "data_activations", ")", ":", "knns_ind", "=", "{", "}", "knns_labels", "=", "{", "}", "for", "layer", "in", "self", ".", "layers", ":", "# Pre-process representations of data to normalize and remove training data mean.", "data_activations_layer", "=", "copy", ".", "copy", "(", "data_activations", "[", "layer", "]", ")", "nb_data", "=", "data_activations_layer", ".", "shape", "[", "0", "]", "data_activations_layer", "/=", "np", ".", "linalg", ".", "norm", "(", "data_activations_layer", ",", "axis", "=", "1", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "data_activations_layer", "-=", "self", ".", "centers", "[", "layer", "]", "# Use FALCONN to find indices of nearest neighbors in training data.", "knns_ind", "[", "layer", "]", "=", "np", ".", "zeros", "(", "(", "data_activations_layer", ".", "shape", "[", "0", "]", ",", "self", ".", "neighbors", ")", ",", "dtype", "=", "np", ".", "int32", ")", "knn_errors", "=", "0", "for", "i", "in", "range", "(", "data_activations_layer", ".", "shape", "[", "0", "]", ")", ":", "query_res", "=", "self", ".", "query_objects", "[", "layer", "]", ".", "find_k_nearest_neighbors", "(", "data_activations_layer", "[", "i", "]", ",", "self", ".", "neighbors", ")", "try", ":", "knns_ind", "[", "layer", "]", "[", "i", ",", ":", "]", "=", "query_res", "except", ":", "# pylint: disable-msg=W0702", "knns_ind", "[", "layer", "]", "[", "i", ",", ":", "len", "(", "query_res", ")", "]", "=", "query_res", "knn_errors", "+=", "knns_ind", "[", "layer", "]", ".", "shape", "[", "1", "]", "-", "len", "(", "query_res", ")", "# Find labels of neighbors found in the training data.", "knns_labels", "[", "layer", "]", "=", "np", ".", "zeros", "(", "(", "nb_data", ",", "self", ".", "neighbors", ")", ",", "dtype", "=", "np", ".", "int32", ")", "for", "data_id", "in", "range", "(", "nb_data", ")", ":", "knns_labels", "[", "layer", "]", "[", "data_id", ",", ":", "]", "=", "self", ".", "train_labels", "[", "knns_ind", "[", "layer", "]", "[", "data_id", "]", "]", "return", "knns_ind", ",", "knns_labels" ]
Given a data_activation dictionary that contains a np array with activations for each layer, find the knns in the training data.
[ "Given", "a", "data_activation", "dictionary", "that", "contains", "a", "np", "array", "with", "activations", "for", "each", "layer", "find", "the", "knns", "in", "the", "training", "data", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L134-L168
28,514
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
DkNNModel.preds_conf_cred
def preds_conf_cred(self, knns_not_in_class): """ Given an array of nb_data x nb_classes dimensions, use conformal prediction to compute the DkNN's prediction, confidence and credibility. """ nb_data = knns_not_in_class.shape[0] preds_knn = np.zeros(nb_data, dtype=np.int32) confs = np.zeros((nb_data, self.nb_classes), dtype=np.float32) creds = np.zeros((nb_data, self.nb_classes), dtype=np.float32) for i in range(nb_data): # p-value of test input for each class p_value = np.zeros(self.nb_classes, dtype=np.float32) for class_id in range(self.nb_classes): # p-value of (test point, candidate label) p_value[class_id] = (float(self.nb_cali) - bisect_left( self.cali_nonconformity, knns_not_in_class[i, class_id])) / float(self.nb_cali) preds_knn[i] = np.argmax(p_value) confs[i, preds_knn[i]] = 1. - p_value[np.argsort(p_value)[-2]] creds[i, preds_knn[i]] = p_value[preds_knn[i]] return preds_knn, confs, creds
python
def preds_conf_cred(self, knns_not_in_class): """ Given an array of nb_data x nb_classes dimensions, use conformal prediction to compute the DkNN's prediction, confidence and credibility. """ nb_data = knns_not_in_class.shape[0] preds_knn = np.zeros(nb_data, dtype=np.int32) confs = np.zeros((nb_data, self.nb_classes), dtype=np.float32) creds = np.zeros((nb_data, self.nb_classes), dtype=np.float32) for i in range(nb_data): # p-value of test input for each class p_value = np.zeros(self.nb_classes, dtype=np.float32) for class_id in range(self.nb_classes): # p-value of (test point, candidate label) p_value[class_id] = (float(self.nb_cali) - bisect_left( self.cali_nonconformity, knns_not_in_class[i, class_id])) / float(self.nb_cali) preds_knn[i] = np.argmax(p_value) confs[i, preds_knn[i]] = 1. - p_value[np.argsort(p_value)[-2]] creds[i, preds_knn[i]] = p_value[preds_knn[i]] return preds_knn, confs, creds
[ "def", "preds_conf_cred", "(", "self", ",", "knns_not_in_class", ")", ":", "nb_data", "=", "knns_not_in_class", ".", "shape", "[", "0", "]", "preds_knn", "=", "np", ".", "zeros", "(", "nb_data", ",", "dtype", "=", "np", ".", "int32", ")", "confs", "=", "np", ".", "zeros", "(", "(", "nb_data", ",", "self", ".", "nb_classes", ")", ",", "dtype", "=", "np", ".", "float32", ")", "creds", "=", "np", ".", "zeros", "(", "(", "nb_data", ",", "self", ".", "nb_classes", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", "in", "range", "(", "nb_data", ")", ":", "# p-value of test input for each class", "p_value", "=", "np", ".", "zeros", "(", "self", ".", "nb_classes", ",", "dtype", "=", "np", ".", "float32", ")", "for", "class_id", "in", "range", "(", "self", ".", "nb_classes", ")", ":", "# p-value of (test point, candidate label)", "p_value", "[", "class_id", "]", "=", "(", "float", "(", "self", ".", "nb_cali", ")", "-", "bisect_left", "(", "self", ".", "cali_nonconformity", ",", "knns_not_in_class", "[", "i", ",", "class_id", "]", ")", ")", "/", "float", "(", "self", ".", "nb_cali", ")", "preds_knn", "[", "i", "]", "=", "np", ".", "argmax", "(", "p_value", ")", "confs", "[", "i", ",", "preds_knn", "[", "i", "]", "]", "=", "1.", "-", "p_value", "[", "np", ".", "argsort", "(", "p_value", ")", "[", "-", "2", "]", "]", "creds", "[", "i", ",", "preds_knn", "[", "i", "]", "]", "=", "p_value", "[", "preds_knn", "[", "i", "]", "]", "return", "preds_knn", ",", "confs", ",", "creds" ]
Given an array of nb_data x nb_classes dimensions, use conformal prediction to compute the DkNN's prediction, confidence and credibility.
[ "Given", "an", "array", "of", "nb_data", "x", "nb_classes", "dimensions", "use", "conformal", "prediction", "to", "compute", "the", "DkNN", "s", "prediction", "confidence", "and", "credibility", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L192-L214
28,515
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
DkNNModel.fprop_np
def fprop_np(self, data_np): """ Performs a forward pass through the DkNN on an numpy array of data. """ if not self.calibrated: raise ValueError( "DkNN needs to be calibrated by calling DkNNModel.calibrate method once before inferring.") data_activations = self.get_activations(data_np) _, knns_labels = self.find_train_knns(data_activations) knns_not_in_class = self.nonconformity(knns_labels) _, _, creds = self.preds_conf_cred(knns_not_in_class) return creds
python
def fprop_np(self, data_np): """ Performs a forward pass through the DkNN on an numpy array of data. """ if not self.calibrated: raise ValueError( "DkNN needs to be calibrated by calling DkNNModel.calibrate method once before inferring.") data_activations = self.get_activations(data_np) _, knns_labels = self.find_train_knns(data_activations) knns_not_in_class = self.nonconformity(knns_labels) _, _, creds = self.preds_conf_cred(knns_not_in_class) return creds
[ "def", "fprop_np", "(", "self", ",", "data_np", ")", ":", "if", "not", "self", ".", "calibrated", ":", "raise", "ValueError", "(", "\"DkNN needs to be calibrated by calling DkNNModel.calibrate method once before inferring.\"", ")", "data_activations", "=", "self", ".", "get_activations", "(", "data_np", ")", "_", ",", "knns_labels", "=", "self", ".", "find_train_knns", "(", "data_activations", ")", "knns_not_in_class", "=", "self", ".", "nonconformity", "(", "knns_labels", ")", "_", ",", "_", ",", "creds", "=", "self", ".", "preds_conf_cred", "(", "knns_not_in_class", ")", "return", "creds" ]
Performs a forward pass through the DkNN on an numpy array of data.
[ "Performs", "a", "forward", "pass", "through", "the", "DkNN", "on", "an", "numpy", "array", "of", "data", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L216-L227
28,516
tensorflow/cleverhans
cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
DkNNModel.fprop
def fprop(self, x): """ Performs a forward pass through the DkNN on a TF tensor by wrapping the fprop_np method. """ logits = tf.py_func(self.fprop_np, [x], tf.float32) return {self.O_LOGITS: logits}
python
def fprop(self, x): """ Performs a forward pass through the DkNN on a TF tensor by wrapping the fprop_np method. """ logits = tf.py_func(self.fprop_np, [x], tf.float32) return {self.O_LOGITS: logits}
[ "def", "fprop", "(", "self", ",", "x", ")", ":", "logits", "=", "tf", ".", "py_func", "(", "self", ".", "fprop_np", ",", "[", "x", "]", ",", "tf", ".", "float32", ")", "return", "{", "self", ".", "O_LOGITS", ":", "logits", "}" ]
Performs a forward pass through the DkNN on a TF tensor by wrapping the fprop_np method.
[ "Performs", "a", "forward", "pass", "through", "the", "DkNN", "on", "a", "TF", "tensor", "by", "wrapping", "the", "fprop_np", "method", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py#L229-L235
28,517
tensorflow/cleverhans
cleverhans/model_zoo/madry_lab_challenges/cifar10_model.py
_relu
def _relu(x, leakiness=0.0): """Relu, with optional leaky support.""" return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
python
def _relu(x, leakiness=0.0): """Relu, with optional leaky support.""" return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
[ "def", "_relu", "(", "x", ",", "leakiness", "=", "0.0", ")", ":", "return", "tf", ".", "where", "(", "tf", ".", "less", "(", "x", ",", "0.0", ")", ",", "leakiness", "*", "x", ",", "x", ",", "name", "=", "'leaky_relu'", ")" ]
Relu, with optional leaky support.
[ "Relu", "with", "optional", "leaky", "support", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/madry_lab_challenges/cifar10_model.py#L292-L294
28,518
tensorflow/cleverhans
cleverhans/experimental/certification/dual_formulation.py
DualFormulation.construct_lanczos_params
def construct_lanczos_params(self): """Computes matrices T and V using the Lanczos algorithm. Args: k: number of iterations and dimensionality of the tridiagonal matrix Returns: eig_vec: eigen vector corresponding to min eigenvalue """ # Using autograph to automatically handle # the control flow of minimum_eigen_vector self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval) def _m_vector_prod_fn(x): return self.get_psd_product(x, dtype=self.lanczos_dtype) def _h_vector_prod_fn(x): return self.get_h_product(x, dtype=self.lanczos_dtype) # Construct nodes for computing eigenvalue of M self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64) zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64) self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m, shape=(self.matrix_m_dimension, 1), name='m_min_vec_ph') self.m_min_eig, self.m_min_vec = self.min_eigen_vec(_m_vector_prod_fn, self.matrix_m_dimension, self.m_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype) self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype) self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=np.float64) zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=tf.float64) self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h, shape=(self.matrix_m_dimension - 1, 1), name='h_min_vec_ph') self.h_min_eig, self.h_min_vec = self.min_eigen_vec(_h_vector_prod_fn, self.matrix_m_dimension-1, self.h_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype) self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)
python
def construct_lanczos_params(self): """Computes matrices T and V using the Lanczos algorithm. Args: k: number of iterations and dimensionality of the tridiagonal matrix Returns: eig_vec: eigen vector corresponding to min eigenvalue """ # Using autograph to automatically handle # the control flow of minimum_eigen_vector self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval) def _m_vector_prod_fn(x): return self.get_psd_product(x, dtype=self.lanczos_dtype) def _h_vector_prod_fn(x): return self.get_h_product(x, dtype=self.lanczos_dtype) # Construct nodes for computing eigenvalue of M self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64) zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64) self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m, shape=(self.matrix_m_dimension, 1), name='m_min_vec_ph') self.m_min_eig, self.m_min_vec = self.min_eigen_vec(_m_vector_prod_fn, self.matrix_m_dimension, self.m_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype) self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype) self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=np.float64) zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=tf.float64) self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h, shape=(self.matrix_m_dimension - 1, 1), name='h_min_vec_ph') self.h_min_eig, self.h_min_vec = self.min_eigen_vec(_h_vector_prod_fn, self.matrix_m_dimension-1, self.h_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype) self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)
[ "def", "construct_lanczos_params", "(", "self", ")", ":", "# Using autograph to automatically handle", "# the control flow of minimum_eigen_vector", "self", ".", "min_eigen_vec", "=", "autograph", ".", "to_graph", "(", "utils", ".", "tf_lanczos_smallest_eigval", ")", "def", "_m_vector_prod_fn", "(", "x", ")", ":", "return", "self", ".", "get_psd_product", "(", "x", ",", "dtype", "=", "self", ".", "lanczos_dtype", ")", "def", "_h_vector_prod_fn", "(", "x", ")", ":", "return", "self", ".", "get_h_product", "(", "x", ",", "dtype", "=", "self", ".", "lanczos_dtype", ")", "# Construct nodes for computing eigenvalue of M", "self", ".", "m_min_vec_estimate", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "matrix_m_dimension", ",", "1", ")", ",", "dtype", "=", "np", ".", "float64", ")", "zeros_m", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "matrix_m_dimension", ",", "1", ")", ",", "dtype", "=", "tf", ".", "float64", ")", "self", ".", "m_min_vec_ph", "=", "tf", ".", "placeholder_with_default", "(", "input", "=", "zeros_m", ",", "shape", "=", "(", "self", ".", "matrix_m_dimension", ",", "1", ")", ",", "name", "=", "'m_min_vec_ph'", ")", "self", ".", "m_min_eig", ",", "self", ".", "m_min_vec", "=", "self", ".", "min_eigen_vec", "(", "_m_vector_prod_fn", ",", "self", ".", "matrix_m_dimension", ",", "self", ".", "m_min_vec_ph", ",", "self", ".", "lzs_params", "[", "'max_iter'", "]", ",", "dtype", "=", "self", ".", "lanczos_dtype", ")", "self", ".", "m_min_eig", "=", "tf", ".", "cast", "(", "self", ".", "m_min_eig", ",", "self", ".", "nn_dtype", ")", "self", ".", "m_min_vec", "=", "tf", ".", "cast", "(", "self", ".", "m_min_vec", ",", "self", ".", "nn_dtype", ")", "self", ".", "h_min_vec_estimate", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "matrix_m_dimension", "-", "1", ",", "1", ")", ",", "dtype", "=", "np", ".", "float64", ")", "zeros_h", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "matrix_m_dimension", "-", "1", ",", "1", ")", ",", "dtype", "=", "tf", ".", "float64", ")", "self", ".", "h_min_vec_ph", "=", "tf", ".", "placeholder_with_default", "(", "input", "=", "zeros_h", ",", "shape", "=", "(", "self", ".", "matrix_m_dimension", "-", "1", ",", "1", ")", ",", "name", "=", "'h_min_vec_ph'", ")", "self", ".", "h_min_eig", ",", "self", ".", "h_min_vec", "=", "self", ".", "min_eigen_vec", "(", "_h_vector_prod_fn", ",", "self", ".", "matrix_m_dimension", "-", "1", ",", "self", ".", "h_min_vec_ph", ",", "self", ".", "lzs_params", "[", "'max_iter'", "]", ",", "dtype", "=", "self", ".", "lanczos_dtype", ")", "self", ".", "h_min_eig", "=", "tf", ".", "cast", "(", "self", ".", "h_min_eig", ",", "self", ".", "nn_dtype", ")", "self", ".", "h_min_vec", "=", "tf", ".", "cast", "(", "self", ".", "h_min_vec", ",", "self", ".", "nn_dtype", ")" ]
Computes matrices T and V using the Lanczos algorithm. Args: k: number of iterations and dimensionality of the tridiagonal matrix Returns: eig_vec: eigen vector corresponding to min eigenvalue
[ "Computes", "matrices", "T", "and", "V", "using", "the", "Lanczos", "algorithm", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/dual_formulation.py#L205-L247
28,519
tensorflow/cleverhans
cleverhans/experimental/certification/dual_formulation.py
DualFormulation.set_differentiable_objective
def set_differentiable_objective(self): """Function that constructs minimization objective from dual variables.""" # Checking if graphs are already created if self.vector_g is not None: return # Computing the scalar term bias_sum = 0 for i in range(0, self.nn_params.num_hidden_layers): bias_sum = bias_sum + tf.reduce_sum( tf.multiply(self.nn_params.biases[i], self.lambda_pos[i + 1])) lu_sum = 0 for i in range(0, self.nn_params.num_hidden_layers + 1): lu_sum = lu_sum + tf.reduce_sum( tf.multiply(tf.multiply(self.lower[i], self.upper[i]), self.lambda_lu[i])) self.scalar_f = -bias_sum - lu_sum + self.final_constant # Computing the vector term g_rows = [] for i in range(0, self.nn_params.num_hidden_layers): if i > 0: current_row = (self.lambda_neg[i] + self.lambda_pos[i] - self.nn_params.forward_pass(self.lambda_pos[i+1], i, is_transpose=True) + tf.multiply(self.lower[i]+self.upper[i], self.lambda_lu[i]) + tf.multiply(self.lambda_quad[i], self.nn_params.biases[i-1])) else: current_row = (-self.nn_params.forward_pass(self.lambda_pos[i+1], i, is_transpose=True) + tf.multiply(self.lower[i]+self.upper[i], self.lambda_lu[i])) g_rows.append(current_row) # Term for final linear term g_rows.append((self.lambda_pos[self.nn_params.num_hidden_layers] + self.lambda_neg[self.nn_params.num_hidden_layers] + self.final_linear + tf.multiply((self.lower[self.nn_params.num_hidden_layers]+ self.upper[self.nn_params.num_hidden_layers]), self.lambda_lu[self.nn_params.num_hidden_layers]) + tf.multiply( self.lambda_quad[self.nn_params.num_hidden_layers], self.nn_params.biases[ self.nn_params.num_hidden_layers-1]))) self.vector_g = tf.concat(g_rows, axis=0) self.unconstrained_objective = self.scalar_f + 0.5 * self.nu
python
def set_differentiable_objective(self): """Function that constructs minimization objective from dual variables.""" # Checking if graphs are already created if self.vector_g is not None: return # Computing the scalar term bias_sum = 0 for i in range(0, self.nn_params.num_hidden_layers): bias_sum = bias_sum + tf.reduce_sum( tf.multiply(self.nn_params.biases[i], self.lambda_pos[i + 1])) lu_sum = 0 for i in range(0, self.nn_params.num_hidden_layers + 1): lu_sum = lu_sum + tf.reduce_sum( tf.multiply(tf.multiply(self.lower[i], self.upper[i]), self.lambda_lu[i])) self.scalar_f = -bias_sum - lu_sum + self.final_constant # Computing the vector term g_rows = [] for i in range(0, self.nn_params.num_hidden_layers): if i > 0: current_row = (self.lambda_neg[i] + self.lambda_pos[i] - self.nn_params.forward_pass(self.lambda_pos[i+1], i, is_transpose=True) + tf.multiply(self.lower[i]+self.upper[i], self.lambda_lu[i]) + tf.multiply(self.lambda_quad[i], self.nn_params.biases[i-1])) else: current_row = (-self.nn_params.forward_pass(self.lambda_pos[i+1], i, is_transpose=True) + tf.multiply(self.lower[i]+self.upper[i], self.lambda_lu[i])) g_rows.append(current_row) # Term for final linear term g_rows.append((self.lambda_pos[self.nn_params.num_hidden_layers] + self.lambda_neg[self.nn_params.num_hidden_layers] + self.final_linear + tf.multiply((self.lower[self.nn_params.num_hidden_layers]+ self.upper[self.nn_params.num_hidden_layers]), self.lambda_lu[self.nn_params.num_hidden_layers]) + tf.multiply( self.lambda_quad[self.nn_params.num_hidden_layers], self.nn_params.biases[ self.nn_params.num_hidden_layers-1]))) self.vector_g = tf.concat(g_rows, axis=0) self.unconstrained_objective = self.scalar_f + 0.5 * self.nu
[ "def", "set_differentiable_objective", "(", "self", ")", ":", "# Checking if graphs are already created", "if", "self", ".", "vector_g", "is", "not", "None", ":", "return", "# Computing the scalar term", "bias_sum", "=", "0", "for", "i", "in", "range", "(", "0", ",", "self", ".", "nn_params", ".", "num_hidden_layers", ")", ":", "bias_sum", "=", "bias_sum", "+", "tf", ".", "reduce_sum", "(", "tf", ".", "multiply", "(", "self", ".", "nn_params", ".", "biases", "[", "i", "]", ",", "self", ".", "lambda_pos", "[", "i", "+", "1", "]", ")", ")", "lu_sum", "=", "0", "for", "i", "in", "range", "(", "0", ",", "self", ".", "nn_params", ".", "num_hidden_layers", "+", "1", ")", ":", "lu_sum", "=", "lu_sum", "+", "tf", ".", "reduce_sum", "(", "tf", ".", "multiply", "(", "tf", ".", "multiply", "(", "self", ".", "lower", "[", "i", "]", ",", "self", ".", "upper", "[", "i", "]", ")", ",", "self", ".", "lambda_lu", "[", "i", "]", ")", ")", "self", ".", "scalar_f", "=", "-", "bias_sum", "-", "lu_sum", "+", "self", ".", "final_constant", "# Computing the vector term", "g_rows", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "nn_params", ".", "num_hidden_layers", ")", ":", "if", "i", ">", "0", ":", "current_row", "=", "(", "self", ".", "lambda_neg", "[", "i", "]", "+", "self", ".", "lambda_pos", "[", "i", "]", "-", "self", ".", "nn_params", ".", "forward_pass", "(", "self", ".", "lambda_pos", "[", "i", "+", "1", "]", ",", "i", ",", "is_transpose", "=", "True", ")", "+", "tf", ".", "multiply", "(", "self", ".", "lower", "[", "i", "]", "+", "self", ".", "upper", "[", "i", "]", ",", "self", ".", "lambda_lu", "[", "i", "]", ")", "+", "tf", ".", "multiply", "(", "self", ".", "lambda_quad", "[", "i", "]", ",", "self", ".", "nn_params", ".", "biases", "[", "i", "-", "1", "]", ")", ")", "else", ":", "current_row", "=", "(", "-", "self", ".", "nn_params", ".", "forward_pass", "(", "self", ".", "lambda_pos", "[", "i", "+", "1", "]", ",", "i", ",", "is_transpose", "=", "True", ")", "+", "tf", ".", "multiply", "(", "self", ".", "lower", "[", "i", "]", "+", "self", ".", "upper", "[", "i", "]", ",", "self", ".", "lambda_lu", "[", "i", "]", ")", ")", "g_rows", ".", "append", "(", "current_row", ")", "# Term for final linear term", "g_rows", ".", "append", "(", "(", "self", ".", "lambda_pos", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", "+", "self", ".", "lambda_neg", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", "+", "self", ".", "final_linear", "+", "tf", ".", "multiply", "(", "(", "self", ".", "lower", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", "+", "self", ".", "upper", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", ")", ",", "self", ".", "lambda_lu", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", ")", "+", "tf", ".", "multiply", "(", "self", ".", "lambda_quad", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "]", ",", "self", ".", "nn_params", ".", "biases", "[", "self", ".", "nn_params", ".", "num_hidden_layers", "-", "1", "]", ")", ")", ")", "self", ".", "vector_g", "=", "tf", ".", "concat", "(", "g_rows", ",", "axis", "=", "0", ")", "self", ".", "unconstrained_objective", "=", "self", ".", "scalar_f", "+", "0.5", "*", "self", ".", "nu" ]
Function that constructs minimization objective from dual variables.
[ "Function", "that", "constructs", "minimization", "objective", "from", "dual", "variables", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/dual_formulation.py#L249-L298
28,520
tensorflow/cleverhans
cleverhans/experimental/certification/dual_formulation.py
DualFormulation.get_full_psd_matrix
def get_full_psd_matrix(self): """Function that returns the tf graph corresponding to the entire matrix M. Returns: matrix_h: unrolled version of tf matrix corresponding to H matrix_m: unrolled tf matrix corresponding to M """ if self.matrix_m is not None: return self.matrix_h, self.matrix_m # Computing the matrix term h_columns = [] for i in range(self.nn_params.num_hidden_layers + 1): current_col_elems = [] for j in range(i): current_col_elems.append( tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]])) # For the first layer, there is no relu constraint if i == 0: current_col_elems.append(utils.diag(self.lambda_lu[i])) else: current_col_elems.append( utils.diag(self.lambda_lu[i] + self.lambda_quad[i])) if i < self.nn_params.num_hidden_layers: current_col_elems.append(tf.matmul( utils.diag(-1 * self.lambda_quad[i + 1]), self.nn_params.weights[i])) for j in range(i + 2, self.nn_params.num_hidden_layers + 1): current_col_elems.append( tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]])) current_column = tf.concat(current_col_elems, 0) h_columns.append(current_column) self.matrix_h = tf.concat(h_columns, 1) self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h)) self.matrix_m = tf.concat( [ tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1), tf.concat([self.vector_g, self.matrix_h], axis=1) ], axis=0) return self.matrix_h, self.matrix_m
python
def get_full_psd_matrix(self): """Function that returns the tf graph corresponding to the entire matrix M. Returns: matrix_h: unrolled version of tf matrix corresponding to H matrix_m: unrolled tf matrix corresponding to M """ if self.matrix_m is not None: return self.matrix_h, self.matrix_m # Computing the matrix term h_columns = [] for i in range(self.nn_params.num_hidden_layers + 1): current_col_elems = [] for j in range(i): current_col_elems.append( tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]])) # For the first layer, there is no relu constraint if i == 0: current_col_elems.append(utils.diag(self.lambda_lu[i])) else: current_col_elems.append( utils.diag(self.lambda_lu[i] + self.lambda_quad[i])) if i < self.nn_params.num_hidden_layers: current_col_elems.append(tf.matmul( utils.diag(-1 * self.lambda_quad[i + 1]), self.nn_params.weights[i])) for j in range(i + 2, self.nn_params.num_hidden_layers + 1): current_col_elems.append( tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]])) current_column = tf.concat(current_col_elems, 0) h_columns.append(current_column) self.matrix_h = tf.concat(h_columns, 1) self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h)) self.matrix_m = tf.concat( [ tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1), tf.concat([self.vector_g, self.matrix_h], axis=1) ], axis=0) return self.matrix_h, self.matrix_m
[ "def", "get_full_psd_matrix", "(", "self", ")", ":", "if", "self", ".", "matrix_m", "is", "not", "None", ":", "return", "self", ".", "matrix_h", ",", "self", ".", "matrix_m", "# Computing the matrix term", "h_columns", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "nn_params", ".", "num_hidden_layers", "+", "1", ")", ":", "current_col_elems", "=", "[", "]", "for", "j", "in", "range", "(", "i", ")", ":", "current_col_elems", ".", "append", "(", "tf", ".", "zeros", "(", "[", "self", ".", "nn_params", ".", "sizes", "[", "j", "]", ",", "self", ".", "nn_params", ".", "sizes", "[", "i", "]", "]", ")", ")", "# For the first layer, there is no relu constraint", "if", "i", "==", "0", ":", "current_col_elems", ".", "append", "(", "utils", ".", "diag", "(", "self", ".", "lambda_lu", "[", "i", "]", ")", ")", "else", ":", "current_col_elems", ".", "append", "(", "utils", ".", "diag", "(", "self", ".", "lambda_lu", "[", "i", "]", "+", "self", ".", "lambda_quad", "[", "i", "]", ")", ")", "if", "i", "<", "self", ".", "nn_params", ".", "num_hidden_layers", ":", "current_col_elems", ".", "append", "(", "tf", ".", "matmul", "(", "utils", ".", "diag", "(", "-", "1", "*", "self", ".", "lambda_quad", "[", "i", "+", "1", "]", ")", ",", "self", ".", "nn_params", ".", "weights", "[", "i", "]", ")", ")", "for", "j", "in", "range", "(", "i", "+", "2", ",", "self", ".", "nn_params", ".", "num_hidden_layers", "+", "1", ")", ":", "current_col_elems", ".", "append", "(", "tf", ".", "zeros", "(", "[", "self", ".", "nn_params", ".", "sizes", "[", "j", "]", ",", "self", ".", "nn_params", ".", "sizes", "[", "i", "]", "]", ")", ")", "current_column", "=", "tf", ".", "concat", "(", "current_col_elems", ",", "0", ")", "h_columns", ".", "append", "(", "current_column", ")", "self", ".", "matrix_h", "=", "tf", ".", "concat", "(", "h_columns", ",", "1", ")", "self", ".", "matrix_h", "=", "(", "self", ".", "matrix_h", "+", "tf", ".", "transpose", "(", "self", ".", "matrix_h", ")", ")", "self", ".", "matrix_m", "=", "tf", ".", "concat", "(", "[", "tf", ".", "concat", "(", "[", "tf", ".", "reshape", "(", "self", ".", "nu", ",", "(", "1", ",", "1", ")", ")", ",", "tf", ".", "transpose", "(", "self", ".", "vector_g", ")", "]", ",", "axis", "=", "1", ")", ",", "tf", ".", "concat", "(", "[", "self", ".", "vector_g", ",", "self", ".", "matrix_h", "]", ",", "axis", "=", "1", ")", "]", ",", "axis", "=", "0", ")", "return", "self", ".", "matrix_h", ",", "self", ".", "matrix_m" ]
Function that returns the tf graph corresponding to the entire matrix M. Returns: matrix_h: unrolled version of tf matrix corresponding to H matrix_m: unrolled tf matrix corresponding to M
[ "Function", "that", "returns", "the", "tf", "graph", "corresponding", "to", "the", "entire", "matrix", "M", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/dual_formulation.py#L380-L423
28,521
tensorflow/cleverhans
cleverhans/experimental/certification/dual_formulation.py
DualFormulation.compute_certificate
def compute_certificate(self, current_step, feed_dictionary): """ Function to compute the certificate based either current value or dual variables loaded from dual folder """ feed_dict = feed_dictionary.copy() nu = feed_dict[self.nu] second_term = self.make_m_psd(nu, feed_dict) tf.logging.info('Nu after modifying: ' + str(second_term)) feed_dict.update({self.nu: second_term}) computed_certificate = self.sess.run(self.unconstrained_objective, feed_dict=feed_dict) tf.logging.info('Inner step: %d, current value of certificate: %f', current_step, computed_certificate) # Sometimes due to either overflow or instability in inverses, # the returned certificate is large and negative -- keeping a check if LOWER_CERT_BOUND < computed_certificate < 0: _, min_eig_val_m = self.get_lanczos_eig(feed_dict=feed_dict) tf.logging.info('min eig val from lanczos: ' + str(min_eig_val_m)) input_vector_m = tf.placeholder(tf.float32, shape=(self.matrix_m_dimension, 1)) output_vector_m = self.get_psd_product(input_vector_m) def np_vector_prod_fn_m(np_vector): np_vector = np.reshape(np_vector, [-1, 1]) feed_dict.update({input_vector_m:np_vector}) output_np_vector = self.sess.run(output_vector_m, feed_dict=feed_dict) return output_np_vector linear_operator_m = LinearOperator((self.matrix_m_dimension, self.matrix_m_dimension), matvec=np_vector_prod_fn_m) # Performing shift invert scipy operation when eig val estimate is available min_eig_val_m_scipy, _ = eigs(linear_operator_m, k=1, which='SR', tol=TOL) tf.logging.info('min eig val m from scipy: ' + str(min_eig_val_m_scipy)) if min_eig_val_m - TOL > 0: tf.logging.info('Found certificate of robustness!') return True return False
python
def compute_certificate(self, current_step, feed_dictionary): """ Function to compute the certificate based either current value or dual variables loaded from dual folder """ feed_dict = feed_dictionary.copy() nu = feed_dict[self.nu] second_term = self.make_m_psd(nu, feed_dict) tf.logging.info('Nu after modifying: ' + str(second_term)) feed_dict.update({self.nu: second_term}) computed_certificate = self.sess.run(self.unconstrained_objective, feed_dict=feed_dict) tf.logging.info('Inner step: %d, current value of certificate: %f', current_step, computed_certificate) # Sometimes due to either overflow or instability in inverses, # the returned certificate is large and negative -- keeping a check if LOWER_CERT_BOUND < computed_certificate < 0: _, min_eig_val_m = self.get_lanczos_eig(feed_dict=feed_dict) tf.logging.info('min eig val from lanczos: ' + str(min_eig_val_m)) input_vector_m = tf.placeholder(tf.float32, shape=(self.matrix_m_dimension, 1)) output_vector_m = self.get_psd_product(input_vector_m) def np_vector_prod_fn_m(np_vector): np_vector = np.reshape(np_vector, [-1, 1]) feed_dict.update({input_vector_m:np_vector}) output_np_vector = self.sess.run(output_vector_m, feed_dict=feed_dict) return output_np_vector linear_operator_m = LinearOperator((self.matrix_m_dimension, self.matrix_m_dimension), matvec=np_vector_prod_fn_m) # Performing shift invert scipy operation when eig val estimate is available min_eig_val_m_scipy, _ = eigs(linear_operator_m, k=1, which='SR', tol=TOL) tf.logging.info('min eig val m from scipy: ' + str(min_eig_val_m_scipy)) if min_eig_val_m - TOL > 0: tf.logging.info('Found certificate of robustness!') return True return False
[ "def", "compute_certificate", "(", "self", ",", "current_step", ",", "feed_dictionary", ")", ":", "feed_dict", "=", "feed_dictionary", ".", "copy", "(", ")", "nu", "=", "feed_dict", "[", "self", ".", "nu", "]", "second_term", "=", "self", ".", "make_m_psd", "(", "nu", ",", "feed_dict", ")", "tf", ".", "logging", ".", "info", "(", "'Nu after modifying: '", "+", "str", "(", "second_term", ")", ")", "feed_dict", ".", "update", "(", "{", "self", ".", "nu", ":", "second_term", "}", ")", "computed_certificate", "=", "self", ".", "sess", ".", "run", "(", "self", ".", "unconstrained_objective", ",", "feed_dict", "=", "feed_dict", ")", "tf", ".", "logging", ".", "info", "(", "'Inner step: %d, current value of certificate: %f'", ",", "current_step", ",", "computed_certificate", ")", "# Sometimes due to either overflow or instability in inverses,", "# the returned certificate is large and negative -- keeping a check", "if", "LOWER_CERT_BOUND", "<", "computed_certificate", "<", "0", ":", "_", ",", "min_eig_val_m", "=", "self", ".", "get_lanczos_eig", "(", "feed_dict", "=", "feed_dict", ")", "tf", ".", "logging", ".", "info", "(", "'min eig val from lanczos: '", "+", "str", "(", "min_eig_val_m", ")", ")", "input_vector_m", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "(", "self", ".", "matrix_m_dimension", ",", "1", ")", ")", "output_vector_m", "=", "self", ".", "get_psd_product", "(", "input_vector_m", ")", "def", "np_vector_prod_fn_m", "(", "np_vector", ")", ":", "np_vector", "=", "np", ".", "reshape", "(", "np_vector", ",", "[", "-", "1", ",", "1", "]", ")", "feed_dict", ".", "update", "(", "{", "input_vector_m", ":", "np_vector", "}", ")", "output_np_vector", "=", "self", ".", "sess", ".", "run", "(", "output_vector_m", ",", "feed_dict", "=", "feed_dict", ")", "return", "output_np_vector", "linear_operator_m", "=", "LinearOperator", "(", "(", "self", ".", "matrix_m_dimension", ",", "self", ".", "matrix_m_dimension", ")", ",", "matvec", "=", "np_vector_prod_fn_m", ")", "# Performing shift invert scipy operation when eig val estimate is available", "min_eig_val_m_scipy", ",", "_", "=", "eigs", "(", "linear_operator_m", ",", "k", "=", "1", ",", "which", "=", "'SR'", ",", "tol", "=", "TOL", ")", "tf", ".", "logging", ".", "info", "(", "'min eig val m from scipy: '", "+", "str", "(", "min_eig_val_m_scipy", ")", ")", "if", "min_eig_val_m", "-", "TOL", ">", "0", ":", "tf", ".", "logging", ".", "info", "(", "'Found certificate of robustness!'", ")", "return", "True", "return", "False" ]
Function to compute the certificate based either current value or dual variables loaded from dual folder
[ "Function", "to", "compute", "the", "certificate", "based", "either", "current", "value", "or", "dual", "variables", "loaded", "from", "dual", "folder" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/dual_formulation.py#L483-L521
28,522
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
get_extract_command_template
def get_extract_command_template(filename): """Returns extraction command based on the filename extension.""" for k, v in iteritems(EXTRACT_COMMAND): if filename.endswith(k): return v return None
python
def get_extract_command_template(filename): """Returns extraction command based on the filename extension.""" for k, v in iteritems(EXTRACT_COMMAND): if filename.endswith(k): return v return None
[ "def", "get_extract_command_template", "(", "filename", ")", ":", "for", "k", ",", "v", "in", "iteritems", "(", "EXTRACT_COMMAND", ")", ":", "if", "filename", ".", "endswith", "(", "k", ")", ":", "return", "v", "return", "None" ]
Returns extraction command based on the filename extension.
[ "Returns", "extraction", "command", "based", "on", "the", "filename", "extension", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L45-L50
28,523
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
shell_call
def shell_call(command, **kwargs): """Calls shell command with parameter substitution. Args: command: command to run as a list of tokens **kwargs: dirctionary with substitutions Returns: whether command was successful, i.e. returned 0 status code Example of usage: shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file') will call shell command: cp src_file dst_file """ command = list(command) for i in range(len(command)): m = CMD_VARIABLE_RE.match(command[i]) if m: var_id = m.group(1) if var_id in kwargs: command[i] = kwargs[var_id] return subprocess.call(command) == 0
python
def shell_call(command, **kwargs): """Calls shell command with parameter substitution. Args: command: command to run as a list of tokens **kwargs: dirctionary with substitutions Returns: whether command was successful, i.e. returned 0 status code Example of usage: shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file') will call shell command: cp src_file dst_file """ command = list(command) for i in range(len(command)): m = CMD_VARIABLE_RE.match(command[i]) if m: var_id = m.group(1) if var_id in kwargs: command[i] = kwargs[var_id] return subprocess.call(command) == 0
[ "def", "shell_call", "(", "command", ",", "*", "*", "kwargs", ")", ":", "command", "=", "list", "(", "command", ")", "for", "i", "in", "range", "(", "len", "(", "command", ")", ")", ":", "m", "=", "CMD_VARIABLE_RE", ".", "match", "(", "command", "[", "i", "]", ")", "if", "m", ":", "var_id", "=", "m", ".", "group", "(", "1", ")", "if", "var_id", "in", "kwargs", ":", "command", "[", "i", "]", "=", "kwargs", "[", "var_id", "]", "return", "subprocess", ".", "call", "(", "command", ")", "==", "0" ]
Calls shell command with parameter substitution. Args: command: command to run as a list of tokens **kwargs: dirctionary with substitutions Returns: whether command was successful, i.e. returned 0 status code Example of usage: shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file') will call shell command: cp src_file dst_file
[ "Calls", "shell", "command", "with", "parameter", "substitution", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L53-L75
28,524
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
make_directory_writable
def make_directory_writable(dirname): """Makes directory readable and writable by everybody. Args: dirname: name of the directory Returns: True if operation was successfull If you run something inside Docker container and it writes files, then these files will be written as root user with restricted permissions. So to be able to read/modify these files outside of Docker you have to change permissions to be world readable and writable. """ retval = shell_call(['docker', 'run', '-v', '{0}:/output_dir'.format(dirname), 'busybox:1.27.2', 'chmod', '-R', 'a+rwx', '/output_dir']) if not retval: logging.error('Failed to change permissions on directory: %s', dirname) return retval
python
def make_directory_writable(dirname): """Makes directory readable and writable by everybody. Args: dirname: name of the directory Returns: True if operation was successfull If you run something inside Docker container and it writes files, then these files will be written as root user with restricted permissions. So to be able to read/modify these files outside of Docker you have to change permissions to be world readable and writable. """ retval = shell_call(['docker', 'run', '-v', '{0}:/output_dir'.format(dirname), 'busybox:1.27.2', 'chmod', '-R', 'a+rwx', '/output_dir']) if not retval: logging.error('Failed to change permissions on directory: %s', dirname) return retval
[ "def", "make_directory_writable", "(", "dirname", ")", ":", "retval", "=", "shell_call", "(", "[", "'docker'", ",", "'run'", ",", "'-v'", ",", "'{0}:/output_dir'", ".", "format", "(", "dirname", ")", ",", "'busybox:1.27.2'", ",", "'chmod'", ",", "'-R'", ",", "'a+rwx'", ",", "'/output_dir'", "]", ")", "if", "not", "retval", ":", "logging", ".", "error", "(", "'Failed to change permissions on directory: %s'", ",", "dirname", ")", "return", "retval" ]
Makes directory readable and writable by everybody. Args: dirname: name of the directory Returns: True if operation was successfull If you run something inside Docker container and it writes files, then these files will be written as root user with restricted permissions. So to be able to read/modify these files outside of Docker you have to change permissions to be world readable and writable.
[ "Makes", "directory", "readable", "and", "writable", "by", "everybody", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L78-L98
28,525
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
SubmissionValidator._extract_submission
def _extract_submission(self, filename): """Extracts submission and moves it into self._extracted_submission_dir.""" # verify filesize file_size = os.path.getsize(filename) if file_size > MAX_SUBMISSION_SIZE_ZIPPED: logging.error('Submission archive size %d is exceeding limit %d', file_size, MAX_SUBMISSION_SIZE_ZIPPED) return False # determime archive type exctract_command_tmpl = get_extract_command_template(filename) if not exctract_command_tmpl: logging.error('Input file has to be zip, tar or tar.gz archive; however ' 'found: %s', filename) return False # extract archive submission_dir = os.path.dirname(filename) submission_basename = os.path.basename(filename) logging.info('Extracting archive %s', filename) retval = shell_call( ['docker', 'run', '--network=none', '-v', '{0}:/input_dir'.format(submission_dir), '-v', '{0}:/output_dir'.format(self._tmp_extracted_dir), 'busybox:1.27.2'] + exctract_command_tmpl, src=os.path.join('/input_dir', submission_basename), dst='/output_dir') if not retval: logging.error('Failed to extract submission from file %s', filename) return False if not make_directory_writable(self._tmp_extracted_dir): return False # find submission root root_dir = self._tmp_extracted_dir root_dir_content = [d for d in os.listdir(root_dir) if d != '__MACOSX'] if (len(root_dir_content) == 1 and os.path.isdir(os.path.join(root_dir, root_dir_content[0]))): logging.info('Looks like submission root is in subdirectory "%s" of ' 'the archive', root_dir_content[0]) root_dir = os.path.join(root_dir, root_dir_content[0]) # Move files to self._extracted_submission_dir. # At this point self._extracted_submission_dir does not exist, # so following command will simply rename root_dir into # self._extracted_submission_dir if not shell_call(['mv', root_dir, self._extracted_submission_dir]): logging.error('Can''t move submission files from root directory') return False return True
python
def _extract_submission(self, filename): """Extracts submission and moves it into self._extracted_submission_dir.""" # verify filesize file_size = os.path.getsize(filename) if file_size > MAX_SUBMISSION_SIZE_ZIPPED: logging.error('Submission archive size %d is exceeding limit %d', file_size, MAX_SUBMISSION_SIZE_ZIPPED) return False # determime archive type exctract_command_tmpl = get_extract_command_template(filename) if not exctract_command_tmpl: logging.error('Input file has to be zip, tar or tar.gz archive; however ' 'found: %s', filename) return False # extract archive submission_dir = os.path.dirname(filename) submission_basename = os.path.basename(filename) logging.info('Extracting archive %s', filename) retval = shell_call( ['docker', 'run', '--network=none', '-v', '{0}:/input_dir'.format(submission_dir), '-v', '{0}:/output_dir'.format(self._tmp_extracted_dir), 'busybox:1.27.2'] + exctract_command_tmpl, src=os.path.join('/input_dir', submission_basename), dst='/output_dir') if not retval: logging.error('Failed to extract submission from file %s', filename) return False if not make_directory_writable(self._tmp_extracted_dir): return False # find submission root root_dir = self._tmp_extracted_dir root_dir_content = [d for d in os.listdir(root_dir) if d != '__MACOSX'] if (len(root_dir_content) == 1 and os.path.isdir(os.path.join(root_dir, root_dir_content[0]))): logging.info('Looks like submission root is in subdirectory "%s" of ' 'the archive', root_dir_content[0]) root_dir = os.path.join(root_dir, root_dir_content[0]) # Move files to self._extracted_submission_dir. # At this point self._extracted_submission_dir does not exist, # so following command will simply rename root_dir into # self._extracted_submission_dir if not shell_call(['mv', root_dir, self._extracted_submission_dir]): logging.error('Can''t move submission files from root directory') return False return True
[ "def", "_extract_submission", "(", "self", ",", "filename", ")", ":", "# verify filesize", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "filename", ")", "if", "file_size", ">", "MAX_SUBMISSION_SIZE_ZIPPED", ":", "logging", ".", "error", "(", "'Submission archive size %d is exceeding limit %d'", ",", "file_size", ",", "MAX_SUBMISSION_SIZE_ZIPPED", ")", "return", "False", "# determime archive type", "exctract_command_tmpl", "=", "get_extract_command_template", "(", "filename", ")", "if", "not", "exctract_command_tmpl", ":", "logging", ".", "error", "(", "'Input file has to be zip, tar or tar.gz archive; however '", "'found: %s'", ",", "filename", ")", "return", "False", "# extract archive", "submission_dir", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "submission_basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "logging", ".", "info", "(", "'Extracting archive %s'", ",", "filename", ")", "retval", "=", "shell_call", "(", "[", "'docker'", ",", "'run'", ",", "'--network=none'", ",", "'-v'", ",", "'{0}:/input_dir'", ".", "format", "(", "submission_dir", ")", ",", "'-v'", ",", "'{0}:/output_dir'", ".", "format", "(", "self", ".", "_tmp_extracted_dir", ")", ",", "'busybox:1.27.2'", "]", "+", "exctract_command_tmpl", ",", "src", "=", "os", ".", "path", ".", "join", "(", "'/input_dir'", ",", "submission_basename", ")", ",", "dst", "=", "'/output_dir'", ")", "if", "not", "retval", ":", "logging", ".", "error", "(", "'Failed to extract submission from file %s'", ",", "filename", ")", "return", "False", "if", "not", "make_directory_writable", "(", "self", ".", "_tmp_extracted_dir", ")", ":", "return", "False", "# find submission root", "root_dir", "=", "self", ".", "_tmp_extracted_dir", "root_dir_content", "=", "[", "d", "for", "d", "in", "os", ".", "listdir", "(", "root_dir", ")", "if", "d", "!=", "'__MACOSX'", "]", "if", "(", "len", "(", "root_dir_content", ")", "==", "1", "and", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "root_dir_content", "[", "0", "]", ")", ")", ")", ":", "logging", ".", "info", "(", "'Looks like submission root is in subdirectory \"%s\" of '", "'the archive'", ",", "root_dir_content", "[", "0", "]", ")", "root_dir", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "root_dir_content", "[", "0", "]", ")", "# Move files to self._extracted_submission_dir.", "# At this point self._extracted_submission_dir does not exist,", "# so following command will simply rename root_dir into", "# self._extracted_submission_dir", "if", "not", "shell_call", "(", "[", "'mv'", ",", "root_dir", ",", "self", ".", "_extracted_submission_dir", "]", ")", ":", "logging", ".", "error", "(", "'Can'", "'t move submission files from root directory'", ")", "return", "False", "return", "True" ]
Extracts submission and moves it into self._extracted_submission_dir.
[ "Extracts", "submission", "and", "moves", "it", "into", "self", ".", "_extracted_submission_dir", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L148-L194
28,526
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
SubmissionValidator._verify_docker_image_size
def _verify_docker_image_size(self, image_name): """Verifies size of Docker image. Args: image_name: name of the Docker image. Returns: True if image size is within the limits, False otherwise. """ shell_call(['docker', 'pull', image_name]) try: image_size = subprocess.check_output( ['docker', 'inspect', '--format={{.Size}}', image_name]).strip() image_size = int(image_size) except (ValueError, subprocess.CalledProcessError) as e: logging.error('Failed to determine docker image size: %s', e) return False logging.info('Size of docker image %s is %d', image_name, image_size) if image_size > MAX_DOCKER_IMAGE_SIZE: logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE) return image_size <= MAX_DOCKER_IMAGE_SIZE
python
def _verify_docker_image_size(self, image_name): """Verifies size of Docker image. Args: image_name: name of the Docker image. Returns: True if image size is within the limits, False otherwise. """ shell_call(['docker', 'pull', image_name]) try: image_size = subprocess.check_output( ['docker', 'inspect', '--format={{.Size}}', image_name]).strip() image_size = int(image_size) except (ValueError, subprocess.CalledProcessError) as e: logging.error('Failed to determine docker image size: %s', e) return False logging.info('Size of docker image %s is %d', image_name, image_size) if image_size > MAX_DOCKER_IMAGE_SIZE: logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE) return image_size <= MAX_DOCKER_IMAGE_SIZE
[ "def", "_verify_docker_image_size", "(", "self", ",", "image_name", ")", ":", "shell_call", "(", "[", "'docker'", ",", "'pull'", ",", "image_name", "]", ")", "try", ":", "image_size", "=", "subprocess", ".", "check_output", "(", "[", "'docker'", ",", "'inspect'", ",", "'--format={{.Size}}'", ",", "image_name", "]", ")", ".", "strip", "(", ")", "image_size", "=", "int", "(", "image_size", ")", "except", "(", "ValueError", ",", "subprocess", ".", "CalledProcessError", ")", "as", "e", ":", "logging", ".", "error", "(", "'Failed to determine docker image size: %s'", ",", "e", ")", "return", "False", "logging", ".", "info", "(", "'Size of docker image %s is %d'", ",", "image_name", ",", "image_size", ")", "if", "image_size", ">", "MAX_DOCKER_IMAGE_SIZE", ":", "logging", ".", "error", "(", "'Image size exceeds limit %d'", ",", "MAX_DOCKER_IMAGE_SIZE", ")", "return", "image_size", "<=", "MAX_DOCKER_IMAGE_SIZE" ]
Verifies size of Docker image. Args: image_name: name of the Docker image. Returns: True if image size is within the limits, False otherwise.
[ "Verifies", "size", "of", "Docker", "image", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L247-L267
28,527
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
SubmissionValidator._prepare_sample_data
def _prepare_sample_data(self, submission_type): """Prepares sample data for the submission. Args: submission_type: type of the submission. """ # write images images = np.random.randint(0, 256, size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8) for i in range(BATCH_SIZE): Image.fromarray(images[i, :, :, :]).save( os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i))) # write target class for targeted attacks if submission_type == 'targeted_attack': target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE]) target_class_filename = os.path.join(self._sample_input_dir, 'target_class.csv') with open(target_class_filename, 'w') as f: for i in range(BATCH_SIZE): f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i]))
python
def _prepare_sample_data(self, submission_type): """Prepares sample data for the submission. Args: submission_type: type of the submission. """ # write images images = np.random.randint(0, 256, size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8) for i in range(BATCH_SIZE): Image.fromarray(images[i, :, :, :]).save( os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i))) # write target class for targeted attacks if submission_type == 'targeted_attack': target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE]) target_class_filename = os.path.join(self._sample_input_dir, 'target_class.csv') with open(target_class_filename, 'w') as f: for i in range(BATCH_SIZE): f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i]))
[ "def", "_prepare_sample_data", "(", "self", ",", "submission_type", ")", ":", "# write images", "images", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "256", ",", "size", "=", "[", "BATCH_SIZE", ",", "299", ",", "299", ",", "3", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "for", "i", "in", "range", "(", "BATCH_SIZE", ")", ":", "Image", ".", "fromarray", "(", "images", "[", "i", ",", ":", ",", ":", ",", ":", "]", ")", ".", "save", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_sample_input_dir", ",", "IMAGE_NAME_PATTERN", ".", "format", "(", "i", ")", ")", ")", "# write target class for targeted attacks", "if", "submission_type", "==", "'targeted_attack'", ":", "target_classes", "=", "np", ".", "random", ".", "randint", "(", "1", ",", "1001", ",", "size", "=", "[", "BATCH_SIZE", "]", ")", "target_class_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_sample_input_dir", ",", "'target_class.csv'", ")", "with", "open", "(", "target_class_filename", ",", "'w'", ")", "as", "f", ":", "for", "i", "in", "range", "(", "BATCH_SIZE", ")", ":", "f", ".", "write", "(", "(", "IMAGE_NAME_PATTERN", "+", "',{1}\\n'", ")", ".", "format", "(", "i", ",", "target_classes", "[", "i", "]", ")", ")" ]
Prepares sample data for the submission. Args: submission_type: type of the submission.
[ "Prepares", "sample", "data", "for", "the", "submission", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L269-L288
28,528
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
SubmissionValidator._verify_output
def _verify_output(self, submission_type): """Verifies correctness of the submission output. Args: submission_type: type of the submission Returns: True if output looks valid """ result = True if submission_type == 'defense': try: image_classification = load_defense_output( os.path.join(self._sample_output_dir, 'result.csv')) expected_keys = [IMAGE_NAME_PATTERN.format(i) for i in range(BATCH_SIZE)] if set(image_classification.keys()) != set(expected_keys): logging.error('Classification results are not saved for all images') result = False except IOError as e: logging.error('Failed to read defense output file: %s', e) result = False else: for i in range(BATCH_SIZE): image_filename = os.path.join(self._sample_output_dir, IMAGE_NAME_PATTERN.format(i)) try: img = np.array(Image.open(image_filename).convert('RGB')) if list(img.shape) != [299, 299, 3]: logging.error('Invalid image size %s for image %s', str(img.shape), image_filename) result = False except IOError as e: result = False return result
python
def _verify_output(self, submission_type): """Verifies correctness of the submission output. Args: submission_type: type of the submission Returns: True if output looks valid """ result = True if submission_type == 'defense': try: image_classification = load_defense_output( os.path.join(self._sample_output_dir, 'result.csv')) expected_keys = [IMAGE_NAME_PATTERN.format(i) for i in range(BATCH_SIZE)] if set(image_classification.keys()) != set(expected_keys): logging.error('Classification results are not saved for all images') result = False except IOError as e: logging.error('Failed to read defense output file: %s', e) result = False else: for i in range(BATCH_SIZE): image_filename = os.path.join(self._sample_output_dir, IMAGE_NAME_PATTERN.format(i)) try: img = np.array(Image.open(image_filename).convert('RGB')) if list(img.shape) != [299, 299, 3]: logging.error('Invalid image size %s for image %s', str(img.shape), image_filename) result = False except IOError as e: result = False return result
[ "def", "_verify_output", "(", "self", ",", "submission_type", ")", ":", "result", "=", "True", "if", "submission_type", "==", "'defense'", ":", "try", ":", "image_classification", "=", "load_defense_output", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_sample_output_dir", ",", "'result.csv'", ")", ")", "expected_keys", "=", "[", "IMAGE_NAME_PATTERN", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "BATCH_SIZE", ")", "]", "if", "set", "(", "image_classification", ".", "keys", "(", ")", ")", "!=", "set", "(", "expected_keys", ")", ":", "logging", ".", "error", "(", "'Classification results are not saved for all images'", ")", "result", "=", "False", "except", "IOError", "as", "e", ":", "logging", ".", "error", "(", "'Failed to read defense output file: %s'", ",", "e", ")", "result", "=", "False", "else", ":", "for", "i", "in", "range", "(", "BATCH_SIZE", ")", ":", "image_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_sample_output_dir", ",", "IMAGE_NAME_PATTERN", ".", "format", "(", "i", ")", ")", "try", ":", "img", "=", "np", ".", "array", "(", "Image", ".", "open", "(", "image_filename", ")", ".", "convert", "(", "'RGB'", ")", ")", "if", "list", "(", "img", ".", "shape", ")", "!=", "[", "299", ",", "299", ",", "3", "]", ":", "logging", ".", "error", "(", "'Invalid image size %s for image %s'", ",", "str", "(", "img", ".", "shape", ")", ",", "image_filename", ")", "result", "=", "False", "except", "IOError", "as", "e", ":", "result", "=", "False", "return", "result" ]
Verifies correctness of the submission output. Args: submission_type: type of the submission Returns: True if output looks valid
[ "Verifies", "correctness", "of", "the", "submission", "output", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L336-L370
28,529
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py
SubmissionValidator.validate_submission
def validate_submission(self, filename): """Validates submission. Args: filename: submission filename Returns: submission metadata or None if submission is invalid """ self._prepare_temp_dir() # Convert filename to be absolute path, relative path might cause problems # with mounting directory in Docker filename = os.path.abspath(filename) # extract submission if not self._extract_submission(filename): return None # verify submission size if not self._verify_submission_size(): return None # Load metadata metadata = self._load_and_verify_metadata() if not metadata: return None submission_type = metadata['type'] # verify docker container size if not self._verify_docker_image_size(metadata['container_gpu']): return None # Try to run submission on sample data self._prepare_sample_data(submission_type) if not self._run_submission(metadata): logging.error('Failure while running submission') return None if not self._verify_output(submission_type): logging.warning('Some of the outputs of your submission are invalid or ' 'missing. You submission still will be evaluation ' 'but you might get lower score.') return metadata
python
def validate_submission(self, filename): """Validates submission. Args: filename: submission filename Returns: submission metadata or None if submission is invalid """ self._prepare_temp_dir() # Convert filename to be absolute path, relative path might cause problems # with mounting directory in Docker filename = os.path.abspath(filename) # extract submission if not self._extract_submission(filename): return None # verify submission size if not self._verify_submission_size(): return None # Load metadata metadata = self._load_and_verify_metadata() if not metadata: return None submission_type = metadata['type'] # verify docker container size if not self._verify_docker_image_size(metadata['container_gpu']): return None # Try to run submission on sample data self._prepare_sample_data(submission_type) if not self._run_submission(metadata): logging.error('Failure while running submission') return None if not self._verify_output(submission_type): logging.warning('Some of the outputs of your submission are invalid or ' 'missing. You submission still will be evaluation ' 'but you might get lower score.') return metadata
[ "def", "validate_submission", "(", "self", ",", "filename", ")", ":", "self", ".", "_prepare_temp_dir", "(", ")", "# Convert filename to be absolute path, relative path might cause problems", "# with mounting directory in Docker", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "# extract submission", "if", "not", "self", ".", "_extract_submission", "(", "filename", ")", ":", "return", "None", "# verify submission size", "if", "not", "self", ".", "_verify_submission_size", "(", ")", ":", "return", "None", "# Load metadata", "metadata", "=", "self", ".", "_load_and_verify_metadata", "(", ")", "if", "not", "metadata", ":", "return", "None", "submission_type", "=", "metadata", "[", "'type'", "]", "# verify docker container size", "if", "not", "self", ".", "_verify_docker_image_size", "(", "metadata", "[", "'container_gpu'", "]", ")", ":", "return", "None", "# Try to run submission on sample data", "self", ".", "_prepare_sample_data", "(", "submission_type", ")", "if", "not", "self", ".", "_run_submission", "(", "metadata", ")", ":", "logging", ".", "error", "(", "'Failure while running submission'", ")", "return", "None", "if", "not", "self", ".", "_verify_output", "(", "submission_type", ")", ":", "logging", ".", "warning", "(", "'Some of the outputs of your submission are invalid or '", "'missing. You submission still will be evaluation '", "'but you might get lower score.'", ")", "return", "metadata" ]
Validates submission. Args: filename: submission filename Returns: submission metadata or None if submission is invalid
[ "Validates", "submission", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_submission_lib.py#L372-L408
28,530
tensorflow/cleverhans
cleverhans/loss.py
Loss.save
def save(self, path): """Save loss in json format """ json.dump(dict(loss=self.__class__.__name__, params=self.hparams), open(os.path.join(path, 'loss.json'), 'wb'))
python
def save(self, path): """Save loss in json format """ json.dump(dict(loss=self.__class__.__name__, params=self.hparams), open(os.path.join(path, 'loss.json'), 'wb'))
[ "def", "save", "(", "self", ",", "path", ")", ":", "json", ".", "dump", "(", "dict", "(", "loss", "=", "self", ".", "__class__", ".", "__name__", ",", "params", "=", "self", ".", "hparams", ")", ",", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'loss.json'", ")", ",", "'wb'", ")", ")" ]
Save loss in json format
[ "Save", "loss", "in", "json", "format" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/loss.py#L66-L71
28,531
tensorflow/cleverhans
cleverhans/attacks_tfe.py
Attack.generate_np
def generate_np(self, x_val, **kwargs): """ Generate adversarial examples and return them as a NumPy array. :param x_val: A NumPy array with the original inputs. :param **kwargs: optional parameters used by child classes. :return: A NumPy array holding the adversarial examples. """ tfe = tf.contrib.eager x = tfe.Variable(x_val) adv_x = self.generate(x, **kwargs) return adv_x.numpy()
python
def generate_np(self, x_val, **kwargs): """ Generate adversarial examples and return them as a NumPy array. :param x_val: A NumPy array with the original inputs. :param **kwargs: optional parameters used by child classes. :return: A NumPy array holding the adversarial examples. """ tfe = tf.contrib.eager x = tfe.Variable(x_val) adv_x = self.generate(x, **kwargs) return adv_x.numpy()
[ "def", "generate_np", "(", "self", ",", "x_val", ",", "*", "*", "kwargs", ")", ":", "tfe", "=", "tf", ".", "contrib", ".", "eager", "x", "=", "tfe", ".", "Variable", "(", "x_val", ")", "adv_x", "=", "self", ".", "generate", "(", "x", ",", "*", "*", "kwargs", ")", "return", "adv_x", ".", "numpy", "(", ")" ]
Generate adversarial examples and return them as a NumPy array. :param x_val: A NumPy array with the original inputs. :param **kwargs: optional parameters used by child classes. :return: A NumPy array holding the adversarial examples.
[ "Generate", "adversarial", "examples", "and", "return", "them", "as", "a", "NumPy", "array", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks_tfe.py#L57-L68
28,532
tensorflow/cleverhans
cleverhans/devtools/list_files.py
list_files
def list_files(suffix=""): """ Returns a list of all files in CleverHans with the given suffix. Parameters ---------- suffix : str Returns ------- file_list : list A list of all files in CleverHans whose filepath ends with `suffix`. """ cleverhans_path = os.path.abspath(cleverhans.__path__[0]) # In some environments cleverhans_path does not point to a real directory. # In such case return empty list. if not os.path.isdir(cleverhans_path): return [] repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir)) file_list = _list_files(cleverhans_path, suffix) extra_dirs = ['cleverhans_tutorials', 'examples', 'scripts', 'tests_tf', 'tests_pytorch'] for extra_dir in extra_dirs: extra_path = os.path.join(repo_path, extra_dir) if os.path.isdir(extra_path): extra_files = _list_files(extra_path, suffix) extra_files = [os.path.join(os.pardir, path) for path in extra_files] file_list = file_list + extra_files return file_list
python
def list_files(suffix=""): """ Returns a list of all files in CleverHans with the given suffix. Parameters ---------- suffix : str Returns ------- file_list : list A list of all files in CleverHans whose filepath ends with `suffix`. """ cleverhans_path = os.path.abspath(cleverhans.__path__[0]) # In some environments cleverhans_path does not point to a real directory. # In such case return empty list. if not os.path.isdir(cleverhans_path): return [] repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir)) file_list = _list_files(cleverhans_path, suffix) extra_dirs = ['cleverhans_tutorials', 'examples', 'scripts', 'tests_tf', 'tests_pytorch'] for extra_dir in extra_dirs: extra_path = os.path.join(repo_path, extra_dir) if os.path.isdir(extra_path): extra_files = _list_files(extra_path, suffix) extra_files = [os.path.join(os.pardir, path) for path in extra_files] file_list = file_list + extra_files return file_list
[ "def", "list_files", "(", "suffix", "=", "\"\"", ")", ":", "cleverhans_path", "=", "os", ".", "path", ".", "abspath", "(", "cleverhans", ".", "__path__", "[", "0", "]", ")", "# In some environments cleverhans_path does not point to a real directory.", "# In such case return empty list.", "if", "not", "os", ".", "path", ".", "isdir", "(", "cleverhans_path", ")", ":", "return", "[", "]", "repo_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "cleverhans_path", ",", "os", ".", "pardir", ")", ")", "file_list", "=", "_list_files", "(", "cleverhans_path", ",", "suffix", ")", "extra_dirs", "=", "[", "'cleverhans_tutorials'", ",", "'examples'", ",", "'scripts'", ",", "'tests_tf'", ",", "'tests_pytorch'", "]", "for", "extra_dir", "in", "extra_dirs", ":", "extra_path", "=", "os", ".", "path", ".", "join", "(", "repo_path", ",", "extra_dir", ")", "if", "os", ".", "path", ".", "isdir", "(", "extra_path", ")", ":", "extra_files", "=", "_list_files", "(", "extra_path", ",", "suffix", ")", "extra_files", "=", "[", "os", ".", "path", ".", "join", "(", "os", ".", "pardir", ",", "path", ")", "for", "path", "in", "extra_files", "]", "file_list", "=", "file_list", "+", "extra_files", "return", "file_list" ]
Returns a list of all files in CleverHans with the given suffix. Parameters ---------- suffix : str Returns ------- file_list : list A list of all files in CleverHans whose filepath ends with `suffix`.
[ "Returns", "a", "list", "of", "all", "files", "in", "CleverHans", "with", "the", "given", "suffix", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/devtools/list_files.py#L6-L38
28,533
tensorflow/cleverhans
cleverhans/devtools/list_files.py
_list_files
def _list_files(path, suffix=""): """ Returns a list of all files ending in `suffix` contained within `path`. Parameters ---------- path : str a filepath suffix : str Returns ------- l : list A list of all files ending in `suffix` contained within `path`. (If `path` is a file rather than a directory, it is considered to "contain" itself) """ if os.path.isdir(path): incomplete = os.listdir(path) complete = [os.path.join(path, entry) for entry in incomplete] lists = [_list_files(subpath, suffix) for subpath in complete] flattened = [] for one_list in lists: for elem in one_list: flattened.append(elem) return flattened else: assert os.path.exists(path), "couldn't find file '%s'" % path if path.endswith(suffix): return [path] return []
python
def _list_files(path, suffix=""): """ Returns a list of all files ending in `suffix` contained within `path`. Parameters ---------- path : str a filepath suffix : str Returns ------- l : list A list of all files ending in `suffix` contained within `path`. (If `path` is a file rather than a directory, it is considered to "contain" itself) """ if os.path.isdir(path): incomplete = os.listdir(path) complete = [os.path.join(path, entry) for entry in incomplete] lists = [_list_files(subpath, suffix) for subpath in complete] flattened = [] for one_list in lists: for elem in one_list: flattened.append(elem) return flattened else: assert os.path.exists(path), "couldn't find file '%s'" % path if path.endswith(suffix): return [path] return []
[ "def", "_list_files", "(", "path", ",", "suffix", "=", "\"\"", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "incomplete", "=", "os", ".", "listdir", "(", "path", ")", "complete", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "entry", ")", "for", "entry", "in", "incomplete", "]", "lists", "=", "[", "_list_files", "(", "subpath", ",", "suffix", ")", "for", "subpath", "in", "complete", "]", "flattened", "=", "[", "]", "for", "one_list", "in", "lists", ":", "for", "elem", "in", "one_list", ":", "flattened", ".", "append", "(", "elem", ")", "return", "flattened", "else", ":", "assert", "os", ".", "path", ".", "exists", "(", "path", ")", ",", "\"couldn't find file '%s'\"", "%", "path", "if", "path", ".", "endswith", "(", "suffix", ")", ":", "return", "[", "path", "]", "return", "[", "]" ]
Returns a list of all files ending in `suffix` contained within `path`. Parameters ---------- path : str a filepath suffix : str Returns ------- l : list A list of all files ending in `suffix` contained within `path`. (If `path` is a file rather than a directory, it is considered to "contain" itself)
[ "Returns", "a", "list", "of", "all", "files", "ending", "in", "suffix", "contained", "within", "path", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/devtools/list_files.py#L41-L71
28,534
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
save_dict_to_file
def save_dict_to_file(filename, dictionary): """Saves dictionary as CSV file.""" with open(filename, 'w') as f: writer = csv.writer(f) for k, v in iteritems(dictionary): writer.writerow([str(k), str(v)])
python
def save_dict_to_file(filename, dictionary): """Saves dictionary as CSV file.""" with open(filename, 'w') as f: writer = csv.writer(f) for k, v in iteritems(dictionary): writer.writerow([str(k), str(v)])
[ "def", "save_dict_to_file", "(", "filename", ",", "dictionary", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ")", "for", "k", ",", "v", "in", "iteritems", "(", "dictionary", ")", ":", "writer", ".", "writerow", "(", "[", "str", "(", "k", ")", ",", "str", "(", "v", ")", "]", ")" ]
Saves dictionary as CSV file.
[ "Saves", "dictionary", "as", "CSV", "file", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L49-L54
28,535
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
main
def main(args): """Main function which runs master.""" if args.blacklisted_submissions: logging.warning('BLACKLISTED SUBMISSIONS: %s', args.blacklisted_submissions) if args.limited_dataset: logging.info('Using limited dataset: 3 batches * 10 images') max_dataset_num_images = 30 batch_size = 10 else: logging.info('Using full dataset. Batch size: %d', DEFAULT_BATCH_SIZE) max_dataset_num_images = None batch_size = DEFAULT_BATCH_SIZE random.seed() print('\nRound: {0}\n'.format(args.round_name)) eval_master = EvaluationMaster( storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, results_dir=args.results_dir, num_defense_shards=args.num_defense_shards, verbose=args.verbose, batch_size=batch_size, max_dataset_num_images=max_dataset_num_images) if args.command == 'attack': eval_master.prepare_attacks() elif args.command == 'defense': eval_master.prepare_defenses() elif args.command == 'cleanup_defenses': eval_master.cleanup_defenses() elif args.command == 'results': eval_master.compute_results() elif args.command == 'status': eval_master.show_status() elif args.command == 'cleanup_datastore': eval_master.cleanup_datastore() elif args.command == 'cleanup_failed_attacks': eval_master.cleanup_failed_attacks() elif args.command == 'cleanup_attacks_with_zero_images': eval_master.cleanup_attacks_with_zero_images() else: print('Invalid command: ', args.command) print('') print(USAGE)
python
def main(args): """Main function which runs master.""" if args.blacklisted_submissions: logging.warning('BLACKLISTED SUBMISSIONS: %s', args.blacklisted_submissions) if args.limited_dataset: logging.info('Using limited dataset: 3 batches * 10 images') max_dataset_num_images = 30 batch_size = 10 else: logging.info('Using full dataset. Batch size: %d', DEFAULT_BATCH_SIZE) max_dataset_num_images = None batch_size = DEFAULT_BATCH_SIZE random.seed() print('\nRound: {0}\n'.format(args.round_name)) eval_master = EvaluationMaster( storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, results_dir=args.results_dir, num_defense_shards=args.num_defense_shards, verbose=args.verbose, batch_size=batch_size, max_dataset_num_images=max_dataset_num_images) if args.command == 'attack': eval_master.prepare_attacks() elif args.command == 'defense': eval_master.prepare_defenses() elif args.command == 'cleanup_defenses': eval_master.cleanup_defenses() elif args.command == 'results': eval_master.compute_results() elif args.command == 'status': eval_master.show_status() elif args.command == 'cleanup_datastore': eval_master.cleanup_datastore() elif args.command == 'cleanup_failed_attacks': eval_master.cleanup_failed_attacks() elif args.command == 'cleanup_attacks_with_zero_images': eval_master.cleanup_attacks_with_zero_images() else: print('Invalid command: ', args.command) print('') print(USAGE)
[ "def", "main", "(", "args", ")", ":", "if", "args", ".", "blacklisted_submissions", ":", "logging", ".", "warning", "(", "'BLACKLISTED SUBMISSIONS: %s'", ",", "args", ".", "blacklisted_submissions", ")", "if", "args", ".", "limited_dataset", ":", "logging", ".", "info", "(", "'Using limited dataset: 3 batches * 10 images'", ")", "max_dataset_num_images", "=", "30", "batch_size", "=", "10", "else", ":", "logging", ".", "info", "(", "'Using full dataset. Batch size: %d'", ",", "DEFAULT_BATCH_SIZE", ")", "max_dataset_num_images", "=", "None", "batch_size", "=", "DEFAULT_BATCH_SIZE", "random", ".", "seed", "(", ")", "print", "(", "'\\nRound: {0}\\n'", ".", "format", "(", "args", ".", "round_name", ")", ")", "eval_master", "=", "EvaluationMaster", "(", "storage_client", "=", "eval_lib", ".", "CompetitionStorageClient", "(", "args", ".", "project_id", ",", "args", ".", "storage_bucket", ")", ",", "datastore_client", "=", "eval_lib", ".", "CompetitionDatastoreClient", "(", "args", ".", "project_id", ",", "args", ".", "round_name", ")", ",", "round_name", "=", "args", ".", "round_name", ",", "dataset_name", "=", "args", ".", "dataset_name", ",", "blacklisted_submissions", "=", "args", ".", "blacklisted_submissions", ",", "results_dir", "=", "args", ".", "results_dir", ",", "num_defense_shards", "=", "args", ".", "num_defense_shards", ",", "verbose", "=", "args", ".", "verbose", ",", "batch_size", "=", "batch_size", ",", "max_dataset_num_images", "=", "max_dataset_num_images", ")", "if", "args", ".", "command", "==", "'attack'", ":", "eval_master", ".", "prepare_attacks", "(", ")", "elif", "args", ".", "command", "==", "'defense'", ":", "eval_master", ".", "prepare_defenses", "(", ")", "elif", "args", ".", "command", "==", "'cleanup_defenses'", ":", "eval_master", ".", "cleanup_defenses", "(", ")", "elif", "args", ".", "command", "==", "'results'", ":", "eval_master", ".", "compute_results", "(", ")", "elif", "args", ".", "command", "==", "'status'", ":", "eval_master", ".", "show_status", "(", ")", "elif", "args", ".", "command", "==", "'cleanup_datastore'", ":", "eval_master", ".", "cleanup_datastore", "(", ")", "elif", "args", ".", "command", "==", "'cleanup_failed_attacks'", ":", "eval_master", ".", "cleanup_failed_attacks", "(", ")", "elif", "args", ".", "command", "==", "'cleanup_attacks_with_zero_images'", ":", "eval_master", ".", "cleanup_attacks_with_zero_images", "(", ")", "else", ":", "print", "(", "'Invalid command: '", ",", "args", ".", "command", ")", "print", "(", "''", ")", "print", "(", "USAGE", ")" ]
Main function which runs master.
[ "Main", "function", "which", "runs", "master", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L688-L735
28,536
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.ask_when_work_is_populated
def ask_when_work_is_populated(self, work): """When work is already populated asks whether we should continue. This method prints warning message that work is populated and asks whether user wants to continue or not. Args: work: instance of WorkPiecesBase Returns: True if we should continue and populate datastore, False if we should stop """ work.read_all_from_datastore() if work.work: print('Work is already written to datastore.\n' 'If you continue these data will be overwritten and ' 'possible corrupted.') inp = input_str('Do you want to continue? ' '(type "yes" without quotes to confirm): ') return inp == 'yes' else: return True
python
def ask_when_work_is_populated(self, work): """When work is already populated asks whether we should continue. This method prints warning message that work is populated and asks whether user wants to continue or not. Args: work: instance of WorkPiecesBase Returns: True if we should continue and populate datastore, False if we should stop """ work.read_all_from_datastore() if work.work: print('Work is already written to datastore.\n' 'If you continue these data will be overwritten and ' 'possible corrupted.') inp = input_str('Do you want to continue? ' '(type "yes" without quotes to confirm): ') return inp == 'yes' else: return True
[ "def", "ask_when_work_is_populated", "(", "self", ",", "work", ")", ":", "work", ".", "read_all_from_datastore", "(", ")", "if", "work", ".", "work", ":", "print", "(", "'Work is already written to datastore.\\n'", "'If you continue these data will be overwritten and '", "'possible corrupted.'", ")", "inp", "=", "input_str", "(", "'Do you want to continue? '", "'(type \"yes\" without quotes to confirm): '", ")", "return", "inp", "==", "'yes'", "else", ":", "return", "True" ]
When work is already populated asks whether we should continue. This method prints warning message that work is populated and asks whether user wants to continue or not. Args: work: instance of WorkPiecesBase Returns: True if we should continue and populate datastore, False if we should stop
[ "When", "work", "is", "already", "populated", "asks", "whether", "we", "should", "continue", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L116-L137
28,537
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.prepare_attacks
def prepare_attacks(self): """Prepares all data needed for evaluation of attacks.""" print_header('PREPARING ATTACKS DATA') # verify that attacks data not written yet if not self.ask_when_work_is_populated(self.attack_work): return self.attack_work = eval_lib.AttackWorkPieces( datastore_client=self.datastore_client) # prepare submissions print_header('Initializing submissions') self.submissions.init_from_storage_write_to_datastore() if self.verbose: print(self.submissions) # prepare dataset batches print_header('Initializing dataset batches') self.dataset_batches.init_from_storage_write_to_datastore( batch_size=self.batch_size, allowed_epsilon=ALLOWED_EPS, skip_image_ids=[], max_num_images=self.max_dataset_num_images) if self.verbose: print(self.dataset_batches) # prepare adversarial batches print_header('Initializing adversarial batches') self.adv_batches.init_from_dataset_and_submissions_write_to_datastore( dataset_batches=self.dataset_batches, attack_submission_ids=self.submissions.get_all_attack_ids()) if self.verbose: print(self.adv_batches) # prepare work pieces print_header('Preparing attack work pieces') self.attack_work.init_from_adversarial_batches(self.adv_batches.data) self.attack_work.write_all_to_datastore() if self.verbose: print(self.attack_work)
python
def prepare_attacks(self): """Prepares all data needed for evaluation of attacks.""" print_header('PREPARING ATTACKS DATA') # verify that attacks data not written yet if not self.ask_when_work_is_populated(self.attack_work): return self.attack_work = eval_lib.AttackWorkPieces( datastore_client=self.datastore_client) # prepare submissions print_header('Initializing submissions') self.submissions.init_from_storage_write_to_datastore() if self.verbose: print(self.submissions) # prepare dataset batches print_header('Initializing dataset batches') self.dataset_batches.init_from_storage_write_to_datastore( batch_size=self.batch_size, allowed_epsilon=ALLOWED_EPS, skip_image_ids=[], max_num_images=self.max_dataset_num_images) if self.verbose: print(self.dataset_batches) # prepare adversarial batches print_header('Initializing adversarial batches') self.adv_batches.init_from_dataset_and_submissions_write_to_datastore( dataset_batches=self.dataset_batches, attack_submission_ids=self.submissions.get_all_attack_ids()) if self.verbose: print(self.adv_batches) # prepare work pieces print_header('Preparing attack work pieces') self.attack_work.init_from_adversarial_batches(self.adv_batches.data) self.attack_work.write_all_to_datastore() if self.verbose: print(self.attack_work)
[ "def", "prepare_attacks", "(", "self", ")", ":", "print_header", "(", "'PREPARING ATTACKS DATA'", ")", "# verify that attacks data not written yet", "if", "not", "self", ".", "ask_when_work_is_populated", "(", "self", ".", "attack_work", ")", ":", "return", "self", ".", "attack_work", "=", "eval_lib", ".", "AttackWorkPieces", "(", "datastore_client", "=", "self", ".", "datastore_client", ")", "# prepare submissions", "print_header", "(", "'Initializing submissions'", ")", "self", ".", "submissions", ".", "init_from_storage_write_to_datastore", "(", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "submissions", ")", "# prepare dataset batches", "print_header", "(", "'Initializing dataset batches'", ")", "self", ".", "dataset_batches", ".", "init_from_storage_write_to_datastore", "(", "batch_size", "=", "self", ".", "batch_size", ",", "allowed_epsilon", "=", "ALLOWED_EPS", ",", "skip_image_ids", "=", "[", "]", ",", "max_num_images", "=", "self", ".", "max_dataset_num_images", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "dataset_batches", ")", "# prepare adversarial batches", "print_header", "(", "'Initializing adversarial batches'", ")", "self", ".", "adv_batches", ".", "init_from_dataset_and_submissions_write_to_datastore", "(", "dataset_batches", "=", "self", ".", "dataset_batches", ",", "attack_submission_ids", "=", "self", ".", "submissions", ".", "get_all_attack_ids", "(", ")", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "adv_batches", ")", "# prepare work pieces", "print_header", "(", "'Preparing attack work pieces'", ")", "self", ".", "attack_work", ".", "init_from_adversarial_batches", "(", "self", ".", "adv_batches", ".", "data", ")", "self", ".", "attack_work", ".", "write_all_to_datastore", "(", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "attack_work", ")" ]
Prepares all data needed for evaluation of attacks.
[ "Prepares", "all", "data", "needed", "for", "evaluation", "of", "attacks", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L139-L173
28,538
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.prepare_defenses
def prepare_defenses(self): """Prepares all data needed for evaluation of defenses.""" print_header('PREPARING DEFENSE DATA') # verify that defense data not written yet if not self.ask_when_work_is_populated(self.defense_work): return self.defense_work = eval_lib.DefenseWorkPieces( datastore_client=self.datastore_client) # load results of attacks self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() self.attack_work.read_all_from_datastore() # populate classification results print_header('Initializing classification batches') self.class_batches.init_from_adversarial_batches_write_to_datastore( self.submissions, self.adv_batches) if self.verbose: print(self.class_batches) # populate work pieces print_header('Preparing defense work pieces') self.defense_work.init_from_class_batches( self.class_batches.data, num_shards=self.num_defense_shards) self.defense_work.write_all_to_datastore() if self.verbose: print(self.defense_work)
python
def prepare_defenses(self): """Prepares all data needed for evaluation of defenses.""" print_header('PREPARING DEFENSE DATA') # verify that defense data not written yet if not self.ask_when_work_is_populated(self.defense_work): return self.defense_work = eval_lib.DefenseWorkPieces( datastore_client=self.datastore_client) # load results of attacks self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() self.attack_work.read_all_from_datastore() # populate classification results print_header('Initializing classification batches') self.class_batches.init_from_adversarial_batches_write_to_datastore( self.submissions, self.adv_batches) if self.verbose: print(self.class_batches) # populate work pieces print_header('Preparing defense work pieces') self.defense_work.init_from_class_batches( self.class_batches.data, num_shards=self.num_defense_shards) self.defense_work.write_all_to_datastore() if self.verbose: print(self.defense_work)
[ "def", "prepare_defenses", "(", "self", ")", ":", "print_header", "(", "'PREPARING DEFENSE DATA'", ")", "# verify that defense data not written yet", "if", "not", "self", ".", "ask_when_work_is_populated", "(", "self", ".", "defense_work", ")", ":", "return", "self", ".", "defense_work", "=", "eval_lib", ".", "DefenseWorkPieces", "(", "datastore_client", "=", "self", ".", "datastore_client", ")", "# load results of attacks", "self", ".", "submissions", ".", "init_from_datastore", "(", ")", "self", ".", "dataset_batches", ".", "init_from_datastore", "(", ")", "self", ".", "adv_batches", ".", "init_from_datastore", "(", ")", "self", ".", "attack_work", ".", "read_all_from_datastore", "(", ")", "# populate classification results", "print_header", "(", "'Initializing classification batches'", ")", "self", ".", "class_batches", ".", "init_from_adversarial_batches_write_to_datastore", "(", "self", ".", "submissions", ",", "self", ".", "adv_batches", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "class_batches", ")", "# populate work pieces", "print_header", "(", "'Preparing defense work pieces'", ")", "self", ".", "defense_work", ".", "init_from_class_batches", "(", "self", ".", "class_batches", ".", "data", ",", "num_shards", "=", "self", ".", "num_defense_shards", ")", "self", ".", "defense_work", ".", "write_all_to_datastore", "(", ")", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "defense_work", ")" ]
Prepares all data needed for evaluation of defenses.
[ "Prepares", "all", "data", "needed", "for", "evaluation", "of", "defenses", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L175-L200
28,539
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster._save_work_results
def _save_work_results(self, run_stats, scores, num_processed_images, filename): """Saves statistics about each submission. Saved statistics include score; number of completed and failed batches; min, max, average and median time needed to run one batch. Args: run_stats: dictionary with runtime statistics for submissions, can be generated by WorkPiecesBase.compute_work_statistics scores: dictionary mapping submission ids to scores num_processed_images: dictionary with number of successfully processed images by each submission, one of the outputs of ClassificationBatches.compute_classification_results filename: output filename """ with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow( ['SubmissionID', 'ExternalSubmissionId', 'Score', 'CompletedBatches', 'BatchesWithError', 'ProcessedImages', 'MinEvalTime', 'MaxEvalTime', 'MedianEvalTime', 'MeanEvalTime', 'ErrorMsg']) for submission_id in sorted(iterkeys(run_stats)): stat = run_stats.get( submission_id, collections.defaultdict(lambda: float('NaN'))) external_id = self.submissions.get_external_id(submission_id) error_msg = '' while not error_msg and stat['error_messages']: error_msg = stat['error_messages'].pop() if error_msg.startswith('Cant copy adversarial batch locally'): error_msg = '' writer.writerow([ submission_id, external_id, scores.get(submission_id, None), stat['completed'], stat['num_errors'], num_processed_images.get(submission_id, None), stat['min_eval_time'], stat['max_eval_time'], stat['median_eval_time'], stat['mean_eval_time'], error_msg ])
python
def _save_work_results(self, run_stats, scores, num_processed_images, filename): """Saves statistics about each submission. Saved statistics include score; number of completed and failed batches; min, max, average and median time needed to run one batch. Args: run_stats: dictionary with runtime statistics for submissions, can be generated by WorkPiecesBase.compute_work_statistics scores: dictionary mapping submission ids to scores num_processed_images: dictionary with number of successfully processed images by each submission, one of the outputs of ClassificationBatches.compute_classification_results filename: output filename """ with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow( ['SubmissionID', 'ExternalSubmissionId', 'Score', 'CompletedBatches', 'BatchesWithError', 'ProcessedImages', 'MinEvalTime', 'MaxEvalTime', 'MedianEvalTime', 'MeanEvalTime', 'ErrorMsg']) for submission_id in sorted(iterkeys(run_stats)): stat = run_stats.get( submission_id, collections.defaultdict(lambda: float('NaN'))) external_id = self.submissions.get_external_id(submission_id) error_msg = '' while not error_msg and stat['error_messages']: error_msg = stat['error_messages'].pop() if error_msg.startswith('Cant copy adversarial batch locally'): error_msg = '' writer.writerow([ submission_id, external_id, scores.get(submission_id, None), stat['completed'], stat['num_errors'], num_processed_images.get(submission_id, None), stat['min_eval_time'], stat['max_eval_time'], stat['median_eval_time'], stat['mean_eval_time'], error_msg ])
[ "def", "_save_work_results", "(", "self", ",", "run_stats", ",", "scores", ",", "num_processed_images", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ")", "writer", ".", "writerow", "(", "[", "'SubmissionID'", ",", "'ExternalSubmissionId'", ",", "'Score'", ",", "'CompletedBatches'", ",", "'BatchesWithError'", ",", "'ProcessedImages'", ",", "'MinEvalTime'", ",", "'MaxEvalTime'", ",", "'MedianEvalTime'", ",", "'MeanEvalTime'", ",", "'ErrorMsg'", "]", ")", "for", "submission_id", "in", "sorted", "(", "iterkeys", "(", "run_stats", ")", ")", ":", "stat", "=", "run_stats", ".", "get", "(", "submission_id", ",", "collections", ".", "defaultdict", "(", "lambda", ":", "float", "(", "'NaN'", ")", ")", ")", "external_id", "=", "self", ".", "submissions", ".", "get_external_id", "(", "submission_id", ")", "error_msg", "=", "''", "while", "not", "error_msg", "and", "stat", "[", "'error_messages'", "]", ":", "error_msg", "=", "stat", "[", "'error_messages'", "]", ".", "pop", "(", ")", "if", "error_msg", ".", "startswith", "(", "'Cant copy adversarial batch locally'", ")", ":", "error_msg", "=", "''", "writer", ".", "writerow", "(", "[", "submission_id", ",", "external_id", ",", "scores", ".", "get", "(", "submission_id", ",", "None", ")", ",", "stat", "[", "'completed'", "]", ",", "stat", "[", "'num_errors'", "]", ",", "num_processed_images", ".", "get", "(", "submission_id", ",", "None", ")", ",", "stat", "[", "'min_eval_time'", "]", ",", "stat", "[", "'max_eval_time'", "]", ",", "stat", "[", "'median_eval_time'", "]", ",", "stat", "[", "'mean_eval_time'", "]", ",", "error_msg", "]", ")" ]
Saves statistics about each submission. Saved statistics include score; number of completed and failed batches; min, max, average and median time needed to run one batch. Args: run_stats: dictionary with runtime statistics for submissions, can be generated by WorkPiecesBase.compute_work_statistics scores: dictionary mapping submission ids to scores num_processed_images: dictionary with number of successfully processed images by each submission, one of the outputs of ClassificationBatches.compute_classification_results filename: output filename
[ "Saves", "statistics", "about", "each", "submission", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L202-L243
28,540
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster._read_dataset_metadata
def _read_dataset_metadata(self): """Reads dataset metadata. Returns: instance of DatasetMetadata """ blob = self.storage_client.get_blob( 'dataset/' + self.dataset_name + '_dataset.csv') buf = BytesIO() blob.download_to_file(buf) buf.seek(0) return eval_lib.DatasetMetadata(buf)
python
def _read_dataset_metadata(self): """Reads dataset metadata. Returns: instance of DatasetMetadata """ blob = self.storage_client.get_blob( 'dataset/' + self.dataset_name + '_dataset.csv') buf = BytesIO() blob.download_to_file(buf) buf.seek(0) return eval_lib.DatasetMetadata(buf)
[ "def", "_read_dataset_metadata", "(", "self", ")", ":", "blob", "=", "self", ".", "storage_client", ".", "get_blob", "(", "'dataset/'", "+", "self", ".", "dataset_name", "+", "'_dataset.csv'", ")", "buf", "=", "BytesIO", "(", ")", "blob", ".", "download_to_file", "(", "buf", ")", "buf", ".", "seek", "(", "0", ")", "return", "eval_lib", ".", "DatasetMetadata", "(", "buf", ")" ]
Reads dataset metadata. Returns: instance of DatasetMetadata
[ "Reads", "dataset", "metadata", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L272-L283
28,541
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster._show_status_for_work
def _show_status_for_work(self, work): """Shows status for given work pieces. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces """ work_count = len(work.work) work_completed = {} work_completed_count = 0 for v in itervalues(work.work): if v['is_completed']: work_completed_count += 1 worker_id = v['claimed_worker_id'] if worker_id not in work_completed: work_completed[worker_id] = { 'completed_count': 0, 'last_update': 0.0, } work_completed[worker_id]['completed_count'] += 1 work_completed[worker_id]['last_update'] = max( work_completed[worker_id]['last_update'], v['claimed_worker_start_time']) print('Completed {0}/{1} work'.format(work_completed_count, work_count)) for k in sorted(iterkeys(work_completed)): last_update_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(work_completed[k]['last_update'])) print('Worker {0}: completed {1} last claimed work at {2}'.format( k, work_completed[k]['completed_count'], last_update_time))
python
def _show_status_for_work(self, work): """Shows status for given work pieces. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces """ work_count = len(work.work) work_completed = {} work_completed_count = 0 for v in itervalues(work.work): if v['is_completed']: work_completed_count += 1 worker_id = v['claimed_worker_id'] if worker_id not in work_completed: work_completed[worker_id] = { 'completed_count': 0, 'last_update': 0.0, } work_completed[worker_id]['completed_count'] += 1 work_completed[worker_id]['last_update'] = max( work_completed[worker_id]['last_update'], v['claimed_worker_start_time']) print('Completed {0}/{1} work'.format(work_completed_count, work_count)) for k in sorted(iterkeys(work_completed)): last_update_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(work_completed[k]['last_update'])) print('Worker {0}: completed {1} last claimed work at {2}'.format( k, work_completed[k]['completed_count'], last_update_time))
[ "def", "_show_status_for_work", "(", "self", ",", "work", ")", ":", "work_count", "=", "len", "(", "work", ".", "work", ")", "work_completed", "=", "{", "}", "work_completed_count", "=", "0", "for", "v", "in", "itervalues", "(", "work", ".", "work", ")", ":", "if", "v", "[", "'is_completed'", "]", ":", "work_completed_count", "+=", "1", "worker_id", "=", "v", "[", "'claimed_worker_id'", "]", "if", "worker_id", "not", "in", "work_completed", ":", "work_completed", "[", "worker_id", "]", "=", "{", "'completed_count'", ":", "0", ",", "'last_update'", ":", "0.0", ",", "}", "work_completed", "[", "worker_id", "]", "[", "'completed_count'", "]", "+=", "1", "work_completed", "[", "worker_id", "]", "[", "'last_update'", "]", "=", "max", "(", "work_completed", "[", "worker_id", "]", "[", "'last_update'", "]", ",", "v", "[", "'claimed_worker_start_time'", "]", ")", "print", "(", "'Completed {0}/{1} work'", ".", "format", "(", "work_completed_count", ",", "work_count", ")", ")", "for", "k", "in", "sorted", "(", "iterkeys", "(", "work_completed", ")", ")", ":", "last_update_time", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "work_completed", "[", "k", "]", "[", "'last_update'", "]", ")", ")", "print", "(", "'Worker {0}: completed {1} last claimed work at {2}'", ".", "format", "(", "k", ",", "work_completed", "[", "k", "]", "[", "'completed_count'", "]", ",", "last_update_time", ")", ")" ]
Shows status for given work pieces. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces
[ "Shows", "status", "for", "given", "work", "pieces", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L448-L477
28,542
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster._export_work_errors
def _export_work_errors(self, work, output_file): """Saves errors for given work pieces into file. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces output_file: name of the output file """ errors = set() for v in itervalues(work.work): if v['is_completed'] and v['error'] is not None: errors.add(v['error']) with open(output_file, 'w') as f: for e in sorted(errors): f.write(e) f.write('\n')
python
def _export_work_errors(self, work, output_file): """Saves errors for given work pieces into file. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces output_file: name of the output file """ errors = set() for v in itervalues(work.work): if v['is_completed'] and v['error'] is not None: errors.add(v['error']) with open(output_file, 'w') as f: for e in sorted(errors): f.write(e) f.write('\n')
[ "def", "_export_work_errors", "(", "self", ",", "work", ",", "output_file", ")", ":", "errors", "=", "set", "(", ")", "for", "v", "in", "itervalues", "(", "work", ".", "work", ")", ":", "if", "v", "[", "'is_completed'", "]", "and", "v", "[", "'error'", "]", "is", "not", "None", ":", "errors", ".", "add", "(", "v", "[", "'error'", "]", ")", "with", "open", "(", "output_file", ",", "'w'", ")", "as", "f", ":", "for", "e", "in", "sorted", "(", "errors", ")", ":", "f", ".", "write", "(", "e", ")", "f", ".", "write", "(", "'\\n'", ")" ]
Saves errors for given work pieces into file. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces output_file: name of the output file
[ "Saves", "errors", "for", "given", "work", "pieces", "into", "file", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L479-L493
28,543
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.show_status
def show_status(self): """Shows current status of competition evaluation. Also this method saves error messages generated by attacks and defenses into attack_errors.txt and defense_errors.txt. """ print_header('Attack work statistics') self.attack_work.read_all_from_datastore() self._show_status_for_work(self.attack_work) self._export_work_errors( self.attack_work, os.path.join(self.results_dir, 'attack_errors.txt')) print_header('Defense work statistics') self.defense_work.read_all_from_datastore() self._show_status_for_work(self.defense_work) self._export_work_errors( self.defense_work, os.path.join(self.results_dir, 'defense_errors.txt'))
python
def show_status(self): """Shows current status of competition evaluation. Also this method saves error messages generated by attacks and defenses into attack_errors.txt and defense_errors.txt. """ print_header('Attack work statistics') self.attack_work.read_all_from_datastore() self._show_status_for_work(self.attack_work) self._export_work_errors( self.attack_work, os.path.join(self.results_dir, 'attack_errors.txt')) print_header('Defense work statistics') self.defense_work.read_all_from_datastore() self._show_status_for_work(self.defense_work) self._export_work_errors( self.defense_work, os.path.join(self.results_dir, 'defense_errors.txt'))
[ "def", "show_status", "(", "self", ")", ":", "print_header", "(", "'Attack work statistics'", ")", "self", ".", "attack_work", ".", "read_all_from_datastore", "(", ")", "self", ".", "_show_status_for_work", "(", "self", ".", "attack_work", ")", "self", ".", "_export_work_errors", "(", "self", ".", "attack_work", ",", "os", ".", "path", ".", "join", "(", "self", ".", "results_dir", ",", "'attack_errors.txt'", ")", ")", "print_header", "(", "'Defense work statistics'", ")", "self", ".", "defense_work", ".", "read_all_from_datastore", "(", ")", "self", ".", "_show_status_for_work", "(", "self", ".", "defense_work", ")", "self", ".", "_export_work_errors", "(", "self", ".", "defense_work", ",", "os", ".", "path", ".", "join", "(", "self", ".", "results_dir", ",", "'defense_errors.txt'", ")", ")" ]
Shows current status of competition evaluation. Also this method saves error messages generated by attacks and defenses into attack_errors.txt and defense_errors.txt.
[ "Shows", "current", "status", "of", "competition", "evaluation", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L495-L512
28,544
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.cleanup_failed_attacks
def cleanup_failed_attacks(self): """Cleans up data of failed attacks.""" print_header('Cleaning up failed attacks') attacks_to_replace = {} self.attack_work.read_all_from_datastore() failed_submissions = set() error_msg = set() for k, v in iteritems(self.attack_work.work): if v['error'] is not None: attacks_to_replace[k] = dict(v) failed_submissions.add(v['submission_id']) error_msg.add(v['error']) attacks_to_replace[k].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, }) self.attack_work.replace_work(attacks_to_replace) print('Affected submissions:') print(' '.join(sorted(failed_submissions))) print('Error messages:') print(' '.join(sorted(error_msg))) print('') inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return self.attack_work.write_all_to_datastore() print('Work cleaned up')
python
def cleanup_failed_attacks(self): """Cleans up data of failed attacks.""" print_header('Cleaning up failed attacks') attacks_to_replace = {} self.attack_work.read_all_from_datastore() failed_submissions = set() error_msg = set() for k, v in iteritems(self.attack_work.work): if v['error'] is not None: attacks_to_replace[k] = dict(v) failed_submissions.add(v['submission_id']) error_msg.add(v['error']) attacks_to_replace[k].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, }) self.attack_work.replace_work(attacks_to_replace) print('Affected submissions:') print(' '.join(sorted(failed_submissions))) print('Error messages:') print(' '.join(sorted(error_msg))) print('') inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return self.attack_work.write_all_to_datastore() print('Work cleaned up')
[ "def", "cleanup_failed_attacks", "(", "self", ")", ":", "print_header", "(", "'Cleaning up failed attacks'", ")", "attacks_to_replace", "=", "{", "}", "self", ".", "attack_work", ".", "read_all_from_datastore", "(", ")", "failed_submissions", "=", "set", "(", ")", "error_msg", "=", "set", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "attack_work", ".", "work", ")", ":", "if", "v", "[", "'error'", "]", "is", "not", "None", ":", "attacks_to_replace", "[", "k", "]", "=", "dict", "(", "v", ")", "failed_submissions", ".", "add", "(", "v", "[", "'submission_id'", "]", ")", "error_msg", ".", "add", "(", "v", "[", "'error'", "]", ")", "attacks_to_replace", "[", "k", "]", ".", "update", "(", "{", "'claimed_worker_id'", ":", "None", ",", "'claimed_worker_start_time'", ":", "None", ",", "'is_completed'", ":", "False", ",", "'error'", ":", "None", ",", "'elapsed_time'", ":", "None", ",", "}", ")", "self", ".", "attack_work", ".", "replace_work", "(", "attacks_to_replace", ")", "print", "(", "'Affected submissions:'", ")", "print", "(", "' '", ".", "join", "(", "sorted", "(", "failed_submissions", ")", ")", ")", "print", "(", "'Error messages:'", ")", "print", "(", "' '", ".", "join", "(", "sorted", "(", "error_msg", ")", ")", ")", "print", "(", "''", ")", "inp", "=", "input_str", "(", "'Are you sure? (type \"yes\" without quotes to confirm): '", ")", "if", "inp", "!=", "'yes'", ":", "return", "self", ".", "attack_work", ".", "write_all_to_datastore", "(", ")", "print", "(", "'Work cleaned up'", ")" ]
Cleans up data of failed attacks.
[ "Cleans", "up", "data", "of", "failed", "attacks", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L514-L544
28,545
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.cleanup_attacks_with_zero_images
def cleanup_attacks_with_zero_images(self): """Cleans up data about attacks which generated zero images.""" print_header('Cleaning up attacks which generated 0 images.') # find out attack work to cleanup self.adv_batches.init_from_datastore() self.attack_work.read_all_from_datastore() new_attack_work = {} affected_adversarial_batches = set() for work_id, work in iteritems(self.attack_work.work): adv_batch_id = work['output_adversarial_batch_id'] img_count_adv_batch = len(self.adv_batches.data[adv_batch_id]['images']) if (img_count_adv_batch < 100) and (work['elapsed_time'] < 500): affected_adversarial_batches.add(adv_batch_id) new_attack_work[work_id] = dict(work) new_attack_work[work_id].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, }) self.attack_work.replace_work(new_attack_work) print_header('Changes in attack works:') print(self.attack_work) # build list of classification batches self.class_batches.init_from_datastore() affected_class_batches = set() for k, v in iteritems(self.class_batches.data): if v['adversarial_batch_id'] in affected_adversarial_batches: affected_class_batches.add(k) # cleanup defense work on affected batches self.defense_work.read_all_from_datastore() new_defense_work = {} for k, v in iteritems(self.defense_work.work): if v['output_classification_batch_id'] in affected_class_batches: new_defense_work[k] = dict(v) new_defense_work[k].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'stat_correct': None, 'stat_error': None, 'stat_target_class': None, 'stat_num_images': None, }) self.defense_work.replace_work(new_defense_work) print_header('Changes in defense works:') print(self.defense_work) print('') print('Total number of affected attack work: ', len(self.attack_work)) print('Total number of affected defense work: ', len(self.defense_work)) inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return print('Writing attacks work') self.attack_work.write_all_to_datastore() print('Writing defenses work') self.defense_work.write_all_to_datastore() print('Done!')
python
def cleanup_attacks_with_zero_images(self): """Cleans up data about attacks which generated zero images.""" print_header('Cleaning up attacks which generated 0 images.') # find out attack work to cleanup self.adv_batches.init_from_datastore() self.attack_work.read_all_from_datastore() new_attack_work = {} affected_adversarial_batches = set() for work_id, work in iteritems(self.attack_work.work): adv_batch_id = work['output_adversarial_batch_id'] img_count_adv_batch = len(self.adv_batches.data[adv_batch_id]['images']) if (img_count_adv_batch < 100) and (work['elapsed_time'] < 500): affected_adversarial_batches.add(adv_batch_id) new_attack_work[work_id] = dict(work) new_attack_work[work_id].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, }) self.attack_work.replace_work(new_attack_work) print_header('Changes in attack works:') print(self.attack_work) # build list of classification batches self.class_batches.init_from_datastore() affected_class_batches = set() for k, v in iteritems(self.class_batches.data): if v['adversarial_batch_id'] in affected_adversarial_batches: affected_class_batches.add(k) # cleanup defense work on affected batches self.defense_work.read_all_from_datastore() new_defense_work = {} for k, v in iteritems(self.defense_work.work): if v['output_classification_batch_id'] in affected_class_batches: new_defense_work[k] = dict(v) new_defense_work[k].update( { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'stat_correct': None, 'stat_error': None, 'stat_target_class': None, 'stat_num_images': None, }) self.defense_work.replace_work(new_defense_work) print_header('Changes in defense works:') print(self.defense_work) print('') print('Total number of affected attack work: ', len(self.attack_work)) print('Total number of affected defense work: ', len(self.defense_work)) inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return print('Writing attacks work') self.attack_work.write_all_to_datastore() print('Writing defenses work') self.defense_work.write_all_to_datastore() print('Done!')
[ "def", "cleanup_attacks_with_zero_images", "(", "self", ")", ":", "print_header", "(", "'Cleaning up attacks which generated 0 images.'", ")", "# find out attack work to cleanup", "self", ".", "adv_batches", ".", "init_from_datastore", "(", ")", "self", ".", "attack_work", ".", "read_all_from_datastore", "(", ")", "new_attack_work", "=", "{", "}", "affected_adversarial_batches", "=", "set", "(", ")", "for", "work_id", ",", "work", "in", "iteritems", "(", "self", ".", "attack_work", ".", "work", ")", ":", "adv_batch_id", "=", "work", "[", "'output_adversarial_batch_id'", "]", "img_count_adv_batch", "=", "len", "(", "self", ".", "adv_batches", ".", "data", "[", "adv_batch_id", "]", "[", "'images'", "]", ")", "if", "(", "img_count_adv_batch", "<", "100", ")", "and", "(", "work", "[", "'elapsed_time'", "]", "<", "500", ")", ":", "affected_adversarial_batches", ".", "add", "(", "adv_batch_id", ")", "new_attack_work", "[", "work_id", "]", "=", "dict", "(", "work", ")", "new_attack_work", "[", "work_id", "]", ".", "update", "(", "{", "'claimed_worker_id'", ":", "None", ",", "'claimed_worker_start_time'", ":", "None", ",", "'is_completed'", ":", "False", ",", "'error'", ":", "None", ",", "'elapsed_time'", ":", "None", ",", "}", ")", "self", ".", "attack_work", ".", "replace_work", "(", "new_attack_work", ")", "print_header", "(", "'Changes in attack works:'", ")", "print", "(", "self", ".", "attack_work", ")", "# build list of classification batches", "self", ".", "class_batches", ".", "init_from_datastore", "(", ")", "affected_class_batches", "=", "set", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "class_batches", ".", "data", ")", ":", "if", "v", "[", "'adversarial_batch_id'", "]", "in", "affected_adversarial_batches", ":", "affected_class_batches", ".", "add", "(", "k", ")", "# cleanup defense work on affected batches", "self", ".", "defense_work", ".", "read_all_from_datastore", "(", ")", "new_defense_work", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "defense_work", ".", "work", ")", ":", "if", "v", "[", "'output_classification_batch_id'", "]", "in", "affected_class_batches", ":", "new_defense_work", "[", "k", "]", "=", "dict", "(", "v", ")", "new_defense_work", "[", "k", "]", ".", "update", "(", "{", "'claimed_worker_id'", ":", "None", ",", "'claimed_worker_start_time'", ":", "None", ",", "'is_completed'", ":", "False", ",", "'error'", ":", "None", ",", "'elapsed_time'", ":", "None", ",", "'stat_correct'", ":", "None", ",", "'stat_error'", ":", "None", ",", "'stat_target_class'", ":", "None", ",", "'stat_num_images'", ":", "None", ",", "}", ")", "self", ".", "defense_work", ".", "replace_work", "(", "new_defense_work", ")", "print_header", "(", "'Changes in defense works:'", ")", "print", "(", "self", ".", "defense_work", ")", "print", "(", "''", ")", "print", "(", "'Total number of affected attack work: '", ",", "len", "(", "self", ".", "attack_work", ")", ")", "print", "(", "'Total number of affected defense work: '", ",", "len", "(", "self", ".", "defense_work", ")", ")", "inp", "=", "input_str", "(", "'Are you sure? (type \"yes\" without quotes to confirm): '", ")", "if", "inp", "!=", "'yes'", ":", "return", "print", "(", "'Writing attacks work'", ")", "self", ".", "attack_work", ".", "write_all_to_datastore", "(", ")", "print", "(", "'Writing defenses work'", ")", "self", ".", "defense_work", ".", "write_all_to_datastore", "(", ")", "print", "(", "'Done!'", ")" ]
Cleans up data about attacks which generated zero images.
[ "Cleans", "up", "data", "about", "attacks", "which", "generated", "zero", "images", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L546-L608
28,546
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster._cleanup_keys_with_confirmation
def _cleanup_keys_with_confirmation(self, keys_to_delete): """Asks confirmation and then deletes entries with keys. Args: keys_to_delete: list of datastore keys for which entries should be deleted """ print('Round name: ', self.round_name) print('Number of entities to be deleted: ', len(keys_to_delete)) if not keys_to_delete: return if self.verbose: print('Entities to delete:') idx = 0 prev_key_prefix = None dots_printed_after_same_prefix = False for k in keys_to_delete: if idx >= 20: print(' ...') print(' ...') break key_prefix = (k.flat_path[0:1] if k.flat_path[0] in [u'SubmissionType', u'WorkType'] else k.flat_path[0]) if prev_key_prefix == key_prefix: if not dots_printed_after_same_prefix: print(' ...') dots_printed_after_same_prefix = True else: print(' ', k) dots_printed_after_same_prefix = False idx += 1 prev_key_prefix = key_prefix print() inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return with self.datastore_client.no_transact_batch() as batch: for k in keys_to_delete: batch.delete(k) print('Data deleted')
python
def _cleanup_keys_with_confirmation(self, keys_to_delete): """Asks confirmation and then deletes entries with keys. Args: keys_to_delete: list of datastore keys for which entries should be deleted """ print('Round name: ', self.round_name) print('Number of entities to be deleted: ', len(keys_to_delete)) if not keys_to_delete: return if self.verbose: print('Entities to delete:') idx = 0 prev_key_prefix = None dots_printed_after_same_prefix = False for k in keys_to_delete: if idx >= 20: print(' ...') print(' ...') break key_prefix = (k.flat_path[0:1] if k.flat_path[0] in [u'SubmissionType', u'WorkType'] else k.flat_path[0]) if prev_key_prefix == key_prefix: if not dots_printed_after_same_prefix: print(' ...') dots_printed_after_same_prefix = True else: print(' ', k) dots_printed_after_same_prefix = False idx += 1 prev_key_prefix = key_prefix print() inp = input_str('Are you sure? (type "yes" without quotes to confirm): ') if inp != 'yes': return with self.datastore_client.no_transact_batch() as batch: for k in keys_to_delete: batch.delete(k) print('Data deleted')
[ "def", "_cleanup_keys_with_confirmation", "(", "self", ",", "keys_to_delete", ")", ":", "print", "(", "'Round name: '", ",", "self", ".", "round_name", ")", "print", "(", "'Number of entities to be deleted: '", ",", "len", "(", "keys_to_delete", ")", ")", "if", "not", "keys_to_delete", ":", "return", "if", "self", ".", "verbose", ":", "print", "(", "'Entities to delete:'", ")", "idx", "=", "0", "prev_key_prefix", "=", "None", "dots_printed_after_same_prefix", "=", "False", "for", "k", "in", "keys_to_delete", ":", "if", "idx", ">=", "20", ":", "print", "(", "' ...'", ")", "print", "(", "' ...'", ")", "break", "key_prefix", "=", "(", "k", ".", "flat_path", "[", "0", ":", "1", "]", "if", "k", ".", "flat_path", "[", "0", "]", "in", "[", "u'SubmissionType'", ",", "u'WorkType'", "]", "else", "k", ".", "flat_path", "[", "0", "]", ")", "if", "prev_key_prefix", "==", "key_prefix", ":", "if", "not", "dots_printed_after_same_prefix", ":", "print", "(", "' ...'", ")", "dots_printed_after_same_prefix", "=", "True", "else", ":", "print", "(", "' '", ",", "k", ")", "dots_printed_after_same_prefix", "=", "False", "idx", "+=", "1", "prev_key_prefix", "=", "key_prefix", "print", "(", ")", "inp", "=", "input_str", "(", "'Are you sure? (type \"yes\" without quotes to confirm): '", ")", "if", "inp", "!=", "'yes'", ":", "return", "with", "self", ".", "datastore_client", ".", "no_transact_batch", "(", ")", "as", "batch", ":", "for", "k", "in", "keys_to_delete", ":", "batch", ".", "delete", "(", "k", ")", "print", "(", "'Data deleted'", ")" ]
Asks confirmation and then deletes entries with keys. Args: keys_to_delete: list of datastore keys for which entries should be deleted
[ "Asks", "confirmation", "and", "then", "deletes", "entries", "with", "keys", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L610-L649
28,547
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.cleanup_defenses
def cleanup_defenses(self): """Cleans up all data about defense work in current round.""" print_header('CLEANING UP DEFENSES DATA') work_ancestor_key = self.datastore_client.key('WorkType', 'AllDefenses') keys_to_delete = [ e.key for e in self.datastore_client.query_fetch(kind=u'ClassificationBatch') ] + [ e.key for e in self.datastore_client.query_fetch(kind=u'Work', ancestor=work_ancestor_key) ] self._cleanup_keys_with_confirmation(keys_to_delete)
python
def cleanup_defenses(self): """Cleans up all data about defense work in current round.""" print_header('CLEANING UP DEFENSES DATA') work_ancestor_key = self.datastore_client.key('WorkType', 'AllDefenses') keys_to_delete = [ e.key for e in self.datastore_client.query_fetch(kind=u'ClassificationBatch') ] + [ e.key for e in self.datastore_client.query_fetch(kind=u'Work', ancestor=work_ancestor_key) ] self._cleanup_keys_with_confirmation(keys_to_delete)
[ "def", "cleanup_defenses", "(", "self", ")", ":", "print_header", "(", "'CLEANING UP DEFENSES DATA'", ")", "work_ancestor_key", "=", "self", ".", "datastore_client", ".", "key", "(", "'WorkType'", ",", "'AllDefenses'", ")", "keys_to_delete", "=", "[", "e", ".", "key", "for", "e", "in", "self", ".", "datastore_client", ".", "query_fetch", "(", "kind", "=", "u'ClassificationBatch'", ")", "]", "+", "[", "e", ".", "key", "for", "e", "in", "self", ".", "datastore_client", ".", "query_fetch", "(", "kind", "=", "u'Work'", ",", "ancestor", "=", "work_ancestor_key", ")", "]", "self", ".", "_cleanup_keys_with_confirmation", "(", "keys_to_delete", ")" ]
Cleans up all data about defense work in current round.
[ "Cleans", "up", "all", "data", "about", "defense", "work", "in", "current", "round", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L651-L663
28,548
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
EvaluationMaster.cleanup_datastore
def cleanup_datastore(self): """Cleans up datastore and deletes all information about current round.""" print_header('CLEANING UP ENTIRE DATASTORE') kinds_to_delete = [u'Submission', u'SubmissionType', u'DatasetImage', u'DatasetBatch', u'AdversarialImage', u'AdversarialBatch', u'Work', u'WorkType', u'ClassificationBatch'] keys_to_delete = [e.key for k in kinds_to_delete for e in self.datastore_client.query_fetch(kind=k)] self._cleanup_keys_with_confirmation(keys_to_delete)
python
def cleanup_datastore(self): """Cleans up datastore and deletes all information about current round.""" print_header('CLEANING UP ENTIRE DATASTORE') kinds_to_delete = [u'Submission', u'SubmissionType', u'DatasetImage', u'DatasetBatch', u'AdversarialImage', u'AdversarialBatch', u'Work', u'WorkType', u'ClassificationBatch'] keys_to_delete = [e.key for k in kinds_to_delete for e in self.datastore_client.query_fetch(kind=k)] self._cleanup_keys_with_confirmation(keys_to_delete)
[ "def", "cleanup_datastore", "(", "self", ")", ":", "print_header", "(", "'CLEANING UP ENTIRE DATASTORE'", ")", "kinds_to_delete", "=", "[", "u'Submission'", ",", "u'SubmissionType'", ",", "u'DatasetImage'", ",", "u'DatasetBatch'", ",", "u'AdversarialImage'", ",", "u'AdversarialBatch'", ",", "u'Work'", ",", "u'WorkType'", ",", "u'ClassificationBatch'", "]", "keys_to_delete", "=", "[", "e", ".", "key", "for", "k", "in", "kinds_to_delete", "for", "e", "in", "self", ".", "datastore_client", ".", "query_fetch", "(", "kind", "=", "k", ")", "]", "self", ".", "_cleanup_keys_with_confirmation", "(", "keys_to_delete", ")" ]
Cleans up datastore and deletes all information about current round.
[ "Cleans", "up", "datastore", "and", "deletes", "all", "information", "about", "current", "round", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L665-L675
28,549
tensorflow/cleverhans
examples/multigpu_advtrain/make_model.py
make_basic_ngpu
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs): """ Create a multi-GPU model similar to the basic cnn in the tutorials. """ model = make_basic_cnn() layers = model.layers model = MLPnGPU(nb_classes, layers, input_shape) return model
python
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs): """ Create a multi-GPU model similar to the basic cnn in the tutorials. """ model = make_basic_cnn() layers = model.layers model = MLPnGPU(nb_classes, layers, input_shape) return model
[ "def", "make_basic_ngpu", "(", "nb_classes", "=", "10", ",", "input_shape", "=", "(", "None", ",", "28", ",", "28", ",", "1", ")", ",", "*", "*", "kwargs", ")", ":", "model", "=", "make_basic_cnn", "(", ")", "layers", "=", "model", ".", "layers", "model", "=", "MLPnGPU", "(", "nb_classes", ",", "layers", ",", "input_shape", ")", "return", "model" ]
Create a multi-GPU model similar to the basic cnn in the tutorials.
[ "Create", "a", "multi", "-", "GPU", "model", "similar", "to", "the", "basic", "cnn", "in", "the", "tutorials", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/make_model.py#L27-L35
28,550
tensorflow/cleverhans
examples/multigpu_advtrain/resnet_tf.py
ResNetTF.build_cost
def build_cost(self, labels, logits): """ Build the graph for cost from the logits if logits are provided. If predictions are provided, logits are extracted from the operation. """ op = logits.op if "softmax" in str(op).lower(): logits, = op.inputs with tf.variable_scope('costs'): xent = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels) cost = tf.reduce_mean(xent, name='xent') cost += self._decay() cost = cost return cost
python
def build_cost(self, labels, logits): """ Build the graph for cost from the logits if logits are provided. If predictions are provided, logits are extracted from the operation. """ op = logits.op if "softmax" in str(op).lower(): logits, = op.inputs with tf.variable_scope('costs'): xent = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels) cost = tf.reduce_mean(xent, name='xent') cost += self._decay() cost = cost return cost
[ "def", "build_cost", "(", "self", ",", "labels", ",", "logits", ")", ":", "op", "=", "logits", ".", "op", "if", "\"softmax\"", "in", "str", "(", "op", ")", ".", "lower", "(", ")", ":", "logits", ",", "=", "op", ".", "inputs", "with", "tf", ".", "variable_scope", "(", "'costs'", ")", ":", "xent", "=", "tf", ".", "nn", ".", "softmax_cross_entropy_with_logits", "(", "logits", "=", "logits", ",", "labels", "=", "labels", ")", "cost", "=", "tf", ".", "reduce_mean", "(", "xent", ",", "name", "=", "'xent'", ")", "cost", "+=", "self", ".", "_decay", "(", ")", "cost", "=", "cost", "return", "cost" ]
Build the graph for cost from the logits if logits are provided. If predictions are provided, logits are extracted from the operation.
[ "Build", "the", "graph", "for", "cost", "from", "the", "logits", "if", "logits", "are", "provided", ".", "If", "predictions", "are", "provided", "logits", "are", "extracted", "from", "the", "operation", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/resnet_tf.py#L142-L158
28,551
tensorflow/cleverhans
examples/multigpu_advtrain/resnet_tf.py
ResNetTF._layer_norm
def _layer_norm(self, name, x): """Layer normalization.""" if self.init_layers: bn = LayerNorm() bn.name = name self.layers += [bn] else: bn = self.layers[self.layer_idx] self.layer_idx += 1 bn.device_name = self.device_name bn.set_training(self.training) x = bn.fprop(x) return x
python
def _layer_norm(self, name, x): """Layer normalization.""" if self.init_layers: bn = LayerNorm() bn.name = name self.layers += [bn] else: bn = self.layers[self.layer_idx] self.layer_idx += 1 bn.device_name = self.device_name bn.set_training(self.training) x = bn.fprop(x) return x
[ "def", "_layer_norm", "(", "self", ",", "name", ",", "x", ")", ":", "if", "self", ".", "init_layers", ":", "bn", "=", "LayerNorm", "(", ")", "bn", ".", "name", "=", "name", "self", ".", "layers", "+=", "[", "bn", "]", "else", ":", "bn", "=", "self", ".", "layers", "[", "self", ".", "layer_idx", "]", "self", ".", "layer_idx", "+=", "1", "bn", ".", "device_name", "=", "self", ".", "device_name", "bn", ".", "set_training", "(", "self", ".", "training", ")", "x", "=", "bn", ".", "fprop", "(", "x", ")", "return", "x" ]
Layer normalization.
[ "Layer", "normalization", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/resnet_tf.py#L189-L201
28,552
tensorflow/cleverhans
examples/multigpu_advtrain/resnet_tf.py
ResNetTF._bottleneck_residual
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False): """Bottleneck residual unit with 3 sub layers.""" if activate_before_residual: with tf.variable_scope('common_bn_relu'): x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) orig_x = x else: with tf.variable_scope('residual_bn_relu'): orig_x = x x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) with tf.variable_scope('sub1'): x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride) with tf.variable_scope('sub2'): x = self._layer_norm('bn2', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4, [1, 1, 1, 1]) with tf.variable_scope('sub3'): x = self._layer_norm('bn3', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1]) with tf.variable_scope('sub_add'): if in_filter != out_filter: orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride) x += orig_x return x
python
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False): """Bottleneck residual unit with 3 sub layers.""" if activate_before_residual: with tf.variable_scope('common_bn_relu'): x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) orig_x = x else: with tf.variable_scope('residual_bn_relu'): orig_x = x x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) with tf.variable_scope('sub1'): x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride) with tf.variable_scope('sub2'): x = self._layer_norm('bn2', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4, [1, 1, 1, 1]) with tf.variable_scope('sub3'): x = self._layer_norm('bn3', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1]) with tf.variable_scope('sub_add'): if in_filter != out_filter: orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride) x += orig_x return x
[ "def", "_bottleneck_residual", "(", "self", ",", "x", ",", "in_filter", ",", "out_filter", ",", "stride", ",", "activate_before_residual", "=", "False", ")", ":", "if", "activate_before_residual", ":", "with", "tf", ".", "variable_scope", "(", "'common_bn_relu'", ")", ":", "x", "=", "self", ".", "_layer_norm", "(", "'init_bn'", ",", "x", ")", "x", "=", "self", ".", "_relu", "(", "x", ",", "self", ".", "hps", ".", "relu_leakiness", ")", "orig_x", "=", "x", "else", ":", "with", "tf", ".", "variable_scope", "(", "'residual_bn_relu'", ")", ":", "orig_x", "=", "x", "x", "=", "self", ".", "_layer_norm", "(", "'init_bn'", ",", "x", ")", "x", "=", "self", ".", "_relu", "(", "x", ",", "self", ".", "hps", ".", "relu_leakiness", ")", "with", "tf", ".", "variable_scope", "(", "'sub1'", ")", ":", "x", "=", "self", ".", "_conv", "(", "'conv1'", ",", "x", ",", "1", ",", "in_filter", ",", "out_filter", "/", "4", ",", "stride", ")", "with", "tf", ".", "variable_scope", "(", "'sub2'", ")", ":", "x", "=", "self", ".", "_layer_norm", "(", "'bn2'", ",", "x", ")", "x", "=", "self", ".", "_relu", "(", "x", ",", "self", ".", "hps", ".", "relu_leakiness", ")", "x", "=", "self", ".", "_conv", "(", "'conv2'", ",", "x", ",", "3", ",", "out_filter", "/", "4", ",", "out_filter", "/", "4", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ")", "with", "tf", ".", "variable_scope", "(", "'sub3'", ")", ":", "x", "=", "self", ".", "_layer_norm", "(", "'bn3'", ",", "x", ")", "x", "=", "self", ".", "_relu", "(", "x", ",", "self", ".", "hps", ".", "relu_leakiness", ")", "x", "=", "self", ".", "_conv", "(", "'conv3'", ",", "x", ",", "1", ",", "out_filter", "/", "4", ",", "out_filter", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ")", "with", "tf", ".", "variable_scope", "(", "'sub_add'", ")", ":", "if", "in_filter", "!=", "out_filter", ":", "orig_x", "=", "self", ".", "_conv", "(", "'project'", ",", "orig_x", ",", "1", ",", "in_filter", ",", "out_filter", ",", "stride", ")", "x", "+=", "orig_x", "return", "x" ]
Bottleneck residual unit with 3 sub layers.
[ "Bottleneck", "residual", "unit", "with", "3", "sub", "layers", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/resnet_tf.py#L236-L271
28,553
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
read_classification_results
def read_classification_results(storage_client, file_path): """Reads classification results from the file in Cloud Storage. This method reads file with classification results produced by running defense on singe batch of adversarial images. Args: storage_client: instance of CompetitionStorageClient or None for local file file_path: path of the file with results Returns: dictionary where keys are image names or IDs and values are classification labels """ if storage_client: # file on Cloud success = False retry_count = 0 while retry_count < 4: try: blob = storage_client.get_blob(file_path) if not blob: return {} if blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE: logging.warning('Skipping classification result because it''s too ' 'big: %d bytes for %s', blob.size, file_path) return None buf = BytesIO() blob.download_to_file(buf) buf.seek(0) success = True break except Exception: retry_count += 1 time.sleep(5) if not success: return None else: # local file try: with open(file_path, 'rb') as f: buf = BytesIO(f.read()) except IOError: return None result = {} if PY3: buf = StringIO(buf.read().decode('UTF-8')) for row in csv.reader(buf): try: image_filename = row[0] if image_filename.endswith('.png') or image_filename.endswith('.jpg'): image_filename = image_filename[:image_filename.rfind('.')] label = int(row[1]) except (IndexError, ValueError): continue result[image_filename] = label return result
python
def read_classification_results(storage_client, file_path): """Reads classification results from the file in Cloud Storage. This method reads file with classification results produced by running defense on singe batch of adversarial images. Args: storage_client: instance of CompetitionStorageClient or None for local file file_path: path of the file with results Returns: dictionary where keys are image names or IDs and values are classification labels """ if storage_client: # file on Cloud success = False retry_count = 0 while retry_count < 4: try: blob = storage_client.get_blob(file_path) if not blob: return {} if blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE: logging.warning('Skipping classification result because it''s too ' 'big: %d bytes for %s', blob.size, file_path) return None buf = BytesIO() blob.download_to_file(buf) buf.seek(0) success = True break except Exception: retry_count += 1 time.sleep(5) if not success: return None else: # local file try: with open(file_path, 'rb') as f: buf = BytesIO(f.read()) except IOError: return None result = {} if PY3: buf = StringIO(buf.read().decode('UTF-8')) for row in csv.reader(buf): try: image_filename = row[0] if image_filename.endswith('.png') or image_filename.endswith('.jpg'): image_filename = image_filename[:image_filename.rfind('.')] label = int(row[1]) except (IndexError, ValueError): continue result[image_filename] = label return result
[ "def", "read_classification_results", "(", "storage_client", ",", "file_path", ")", ":", "if", "storage_client", ":", "# file on Cloud", "success", "=", "False", "retry_count", "=", "0", "while", "retry_count", "<", "4", ":", "try", ":", "blob", "=", "storage_client", ".", "get_blob", "(", "file_path", ")", "if", "not", "blob", ":", "return", "{", "}", "if", "blob", ".", "size", ">", "MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE", ":", "logging", ".", "warning", "(", "'Skipping classification result because it'", "'s too '", "'big: %d bytes for %s'", ",", "blob", ".", "size", ",", "file_path", ")", "return", "None", "buf", "=", "BytesIO", "(", ")", "blob", ".", "download_to_file", "(", "buf", ")", "buf", ".", "seek", "(", "0", ")", "success", "=", "True", "break", "except", "Exception", ":", "retry_count", "+=", "1", "time", ".", "sleep", "(", "5", ")", "if", "not", "success", ":", "return", "None", "else", ":", "# local file", "try", ":", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "f", ":", "buf", "=", "BytesIO", "(", "f", ".", "read", "(", ")", ")", "except", "IOError", ":", "return", "None", "result", "=", "{", "}", "if", "PY3", ":", "buf", "=", "StringIO", "(", "buf", ".", "read", "(", ")", ".", "decode", "(", "'UTF-8'", ")", ")", "for", "row", "in", "csv", ".", "reader", "(", "buf", ")", ":", "try", ":", "image_filename", "=", "row", "[", "0", "]", "if", "image_filename", ".", "endswith", "(", "'.png'", ")", "or", "image_filename", ".", "endswith", "(", "'.jpg'", ")", ":", "image_filename", "=", "image_filename", "[", ":", "image_filename", ".", "rfind", "(", "'.'", ")", "]", "label", "=", "int", "(", "row", "[", "1", "]", ")", "except", "(", "IndexError", ",", "ValueError", ")", ":", "continue", "result", "[", "image_filename", "]", "=", "label", "return", "result" ]
Reads classification results from the file in Cloud Storage. This method reads file with classification results produced by running defense on singe batch of adversarial images. Args: storage_client: instance of CompetitionStorageClient or None for local file file_path: path of the file with results Returns: dictionary where keys are image names or IDs and values are classification labels
[ "Reads", "classification", "results", "from", "the", "file", "in", "Cloud", "Storage", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L30-L86
28,554
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
analyze_one_classification_result
def analyze_one_classification_result(storage_client, file_path, adv_batch, dataset_batches, dataset_meta): """Reads and analyzes one classification result. This method reads file with classification result and counts how many images were classified correctly and incorrectly, how many times target class was hit and total number of images. Args: storage_client: instance of CompetitionStorageClient file_path: result file path adv_batch: AversarialBatches.data[adv_batch_id] adv_batch_id is stored in each ClassificationBatch entity dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata Returns: Tuple of (count_correctly_classified, count_errors, count_hit_target_class, num_images) """ class_result = read_classification_results(storage_client, file_path) if class_result is None: return 0, 0, 0, 0 adv_images = adv_batch['images'] dataset_batch_images = ( dataset_batches.data[adv_batch['dataset_batch_id']]['images']) count_correctly_classified = 0 count_errors = 0 count_hit_target_class = 0 num_images = 0 for adv_img_id, label in iteritems(class_result): if adv_img_id not in adv_images: continue num_images += 1 clean_image_id = adv_images[adv_img_id]['clean_image_id'] dataset_image_id = ( dataset_batch_images[clean_image_id]['dataset_image_id']) if label == dataset_meta.get_true_label(dataset_image_id): count_correctly_classified += 1 else: count_errors += 1 if label == dataset_meta.get_target_class(dataset_image_id): count_hit_target_class += 1 return (count_correctly_classified, count_errors, count_hit_target_class, num_images)
python
def analyze_one_classification_result(storage_client, file_path, adv_batch, dataset_batches, dataset_meta): """Reads and analyzes one classification result. This method reads file with classification result and counts how many images were classified correctly and incorrectly, how many times target class was hit and total number of images. Args: storage_client: instance of CompetitionStorageClient file_path: result file path adv_batch: AversarialBatches.data[adv_batch_id] adv_batch_id is stored in each ClassificationBatch entity dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata Returns: Tuple of (count_correctly_classified, count_errors, count_hit_target_class, num_images) """ class_result = read_classification_results(storage_client, file_path) if class_result is None: return 0, 0, 0, 0 adv_images = adv_batch['images'] dataset_batch_images = ( dataset_batches.data[adv_batch['dataset_batch_id']]['images']) count_correctly_classified = 0 count_errors = 0 count_hit_target_class = 0 num_images = 0 for adv_img_id, label in iteritems(class_result): if adv_img_id not in adv_images: continue num_images += 1 clean_image_id = adv_images[adv_img_id]['clean_image_id'] dataset_image_id = ( dataset_batch_images[clean_image_id]['dataset_image_id']) if label == dataset_meta.get_true_label(dataset_image_id): count_correctly_classified += 1 else: count_errors += 1 if label == dataset_meta.get_target_class(dataset_image_id): count_hit_target_class += 1 return (count_correctly_classified, count_errors, count_hit_target_class, num_images)
[ "def", "analyze_one_classification_result", "(", "storage_client", ",", "file_path", ",", "adv_batch", ",", "dataset_batches", ",", "dataset_meta", ")", ":", "class_result", "=", "read_classification_results", "(", "storage_client", ",", "file_path", ")", "if", "class_result", "is", "None", ":", "return", "0", ",", "0", ",", "0", ",", "0", "adv_images", "=", "adv_batch", "[", "'images'", "]", "dataset_batch_images", "=", "(", "dataset_batches", ".", "data", "[", "adv_batch", "[", "'dataset_batch_id'", "]", "]", "[", "'images'", "]", ")", "count_correctly_classified", "=", "0", "count_errors", "=", "0", "count_hit_target_class", "=", "0", "num_images", "=", "0", "for", "adv_img_id", ",", "label", "in", "iteritems", "(", "class_result", ")", ":", "if", "adv_img_id", "not", "in", "adv_images", ":", "continue", "num_images", "+=", "1", "clean_image_id", "=", "adv_images", "[", "adv_img_id", "]", "[", "'clean_image_id'", "]", "dataset_image_id", "=", "(", "dataset_batch_images", "[", "clean_image_id", "]", "[", "'dataset_image_id'", "]", ")", "if", "label", "==", "dataset_meta", ".", "get_true_label", "(", "dataset_image_id", ")", ":", "count_correctly_classified", "+=", "1", "else", ":", "count_errors", "+=", "1", "if", "label", "==", "dataset_meta", ".", "get_target_class", "(", "dataset_image_id", ")", ":", "count_hit_target_class", "+=", "1", "return", "(", "count_correctly_classified", ",", "count_errors", ",", "count_hit_target_class", ",", "num_images", ")" ]
Reads and analyzes one classification result. This method reads file with classification result and counts how many images were classified correctly and incorrectly, how many times target class was hit and total number of images. Args: storage_client: instance of CompetitionStorageClient file_path: result file path adv_batch: AversarialBatches.data[adv_batch_id] adv_batch_id is stored in each ClassificationBatch entity dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata Returns: Tuple of (count_correctly_classified, count_errors, count_hit_target_class, num_images)
[ "Reads", "and", "analyzes", "one", "classification", "result", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L89-L134
28,555
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
ResultMatrix.save_to_file
def save_to_file(self, filename, remap_dim0=None, remap_dim1=None): """Saves matrix to the file. Args: filename: name of the file where to save matrix remap_dim0: dictionary with mapping row indices to row names which should be saved to file. If none then indices will be used as names. remap_dim1: dictionary with mapping column indices to column names which should be saved to file. If none then indices will be used as names. """ # rows - first index # columns - second index with open(filename, 'w') as fobj: columns = list(sorted(self._dim1)) for col in columns: fobj.write(',') fobj.write(str(remap_dim1[col] if remap_dim1 else col)) fobj.write('\n') for row in sorted(self._dim0): fobj.write(str(remap_dim0[row] if remap_dim0 else row)) for col in columns: fobj.write(',') fobj.write(str(self[row, col])) fobj.write('\n')
python
def save_to_file(self, filename, remap_dim0=None, remap_dim1=None): """Saves matrix to the file. Args: filename: name of the file where to save matrix remap_dim0: dictionary with mapping row indices to row names which should be saved to file. If none then indices will be used as names. remap_dim1: dictionary with mapping column indices to column names which should be saved to file. If none then indices will be used as names. """ # rows - first index # columns - second index with open(filename, 'w') as fobj: columns = list(sorted(self._dim1)) for col in columns: fobj.write(',') fobj.write(str(remap_dim1[col] if remap_dim1 else col)) fobj.write('\n') for row in sorted(self._dim0): fobj.write(str(remap_dim0[row] if remap_dim0 else row)) for col in columns: fobj.write(',') fobj.write(str(self[row, col])) fobj.write('\n')
[ "def", "save_to_file", "(", "self", ",", "filename", ",", "remap_dim0", "=", "None", ",", "remap_dim1", "=", "None", ")", ":", "# rows - first index", "# columns - second index", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fobj", ":", "columns", "=", "list", "(", "sorted", "(", "self", ".", "_dim1", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "remap_dim1", "[", "col", "]", "if", "remap_dim1", "else", "col", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")", "for", "row", "in", "sorted", "(", "self", ".", "_dim0", ")", ":", "fobj", ".", "write", "(", "str", "(", "remap_dim0", "[", "row", "]", "if", "remap_dim0", "else", "row", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "self", "[", "row", ",", "col", "]", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")" ]
Saves matrix to the file. Args: filename: name of the file where to save matrix remap_dim0: dictionary with mapping row indices to row names which should be saved to file. If none then indices will be used as names. remap_dim1: dictionary with mapping column indices to column names which should be saved to file. If none then indices will be used as names.
[ "Saves", "matrix", "to", "the", "file", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L192-L215
28,556
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
ClassificationBatches.init_from_adversarial_batches_write_to_datastore
def init_from_adversarial_batches_write_to_datastore(self, submissions, adv_batches): """Populates data from adversarial batches and writes to datastore. Args: submissions: instance of CompetitionSubmissions adv_batches: instance of AversarialBatches """ # prepare classification batches idx = 0 for s_id in iterkeys(submissions.defenses): for adv_id in iterkeys(adv_batches.data): class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx) idx += 1 self.data[class_batch_id] = { 'adversarial_batch_id': adv_id, 'submission_id': s_id, 'result_path': os.path.join( self._round_name, CLASSIFICATION_BATCHES_SUBDIR, s_id + '_' + adv_id + '.csv') } # save them to datastore client = self._datastore_client with client.no_transact_batch() as batch: for key, value in iteritems(self.data): entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key)) entity.update(value) batch.put(entity)
python
def init_from_adversarial_batches_write_to_datastore(self, submissions, adv_batches): """Populates data from adversarial batches and writes to datastore. Args: submissions: instance of CompetitionSubmissions adv_batches: instance of AversarialBatches """ # prepare classification batches idx = 0 for s_id in iterkeys(submissions.defenses): for adv_id in iterkeys(adv_batches.data): class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx) idx += 1 self.data[class_batch_id] = { 'adversarial_batch_id': adv_id, 'submission_id': s_id, 'result_path': os.path.join( self._round_name, CLASSIFICATION_BATCHES_SUBDIR, s_id + '_' + adv_id + '.csv') } # save them to datastore client = self._datastore_client with client.no_transact_batch() as batch: for key, value in iteritems(self.data): entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key)) entity.update(value) batch.put(entity)
[ "def", "init_from_adversarial_batches_write_to_datastore", "(", "self", ",", "submissions", ",", "adv_batches", ")", ":", "# prepare classification batches", "idx", "=", "0", "for", "s_id", "in", "iterkeys", "(", "submissions", ".", "defenses", ")", ":", "for", "adv_id", "in", "iterkeys", "(", "adv_batches", ".", "data", ")", ":", "class_batch_id", "=", "CLASSIFICATION_BATCH_ID_PATTERN", ".", "format", "(", "idx", ")", "idx", "+=", "1", "self", ".", "data", "[", "class_batch_id", "]", "=", "{", "'adversarial_batch_id'", ":", "adv_id", ",", "'submission_id'", ":", "s_id", ",", "'result_path'", ":", "os", ".", "path", ".", "join", "(", "self", ".", "_round_name", ",", "CLASSIFICATION_BATCHES_SUBDIR", ",", "s_id", "+", "'_'", "+", "adv_id", "+", "'.csv'", ")", "}", "# save them to datastore", "client", "=", "self", ".", "_datastore_client", "with", "client", ".", "no_transact_batch", "(", ")", "as", "batch", ":", "for", "key", ",", "value", "in", "iteritems", "(", "self", ".", "data", ")", ":", "entity", "=", "client", ".", "entity", "(", "client", ".", "key", "(", "KIND_CLASSIFICATION_BATCH", ",", "key", ")", ")", "entity", ".", "update", "(", "value", ")", "batch", ".", "put", "(", "entity", ")" ]
Populates data from adversarial batches and writes to datastore. Args: submissions: instance of CompetitionSubmissions adv_batches: instance of AversarialBatches
[ "Populates", "data", "from", "adversarial", "batches", "and", "writes", "to", "datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L256-L284
28,557
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
ClassificationBatches.init_from_datastore
def init_from_datastore(self): """Initializes data by reading it from the datastore.""" self._data = {} client = self._datastore_client for entity in client.query_fetch(kind=KIND_CLASSIFICATION_BATCH): class_batch_id = entity.key.flat_path[-1] self.data[class_batch_id] = dict(entity)
python
def init_from_datastore(self): """Initializes data by reading it from the datastore.""" self._data = {} client = self._datastore_client for entity in client.query_fetch(kind=KIND_CLASSIFICATION_BATCH): class_batch_id = entity.key.flat_path[-1] self.data[class_batch_id] = dict(entity)
[ "def", "init_from_datastore", "(", "self", ")", ":", "self", ".", "_data", "=", "{", "}", "client", "=", "self", ".", "_datastore_client", "for", "entity", "in", "client", ".", "query_fetch", "(", "kind", "=", "KIND_CLASSIFICATION_BATCH", ")", ":", "class_batch_id", "=", "entity", ".", "key", ".", "flat_path", "[", "-", "1", "]", "self", ".", "data", "[", "class_batch_id", "]", "=", "dict", "(", "entity", ")" ]
Initializes data by reading it from the datastore.
[ "Initializes", "data", "by", "reading", "it", "from", "the", "datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L286-L292
28,558
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
ClassificationBatches.read_batch_from_datastore
def read_batch_from_datastore(self, class_batch_id): """Reads and returns single batch from the datastore.""" client = self._datastore_client key = client.key(KIND_CLASSIFICATION_BATCH, class_batch_id) result = client.get(key) if result is not None: return dict(result) else: raise KeyError( 'Key {0} not found in the datastore'.format(key.flat_path))
python
def read_batch_from_datastore(self, class_batch_id): """Reads and returns single batch from the datastore.""" client = self._datastore_client key = client.key(KIND_CLASSIFICATION_BATCH, class_batch_id) result = client.get(key) if result is not None: return dict(result) else: raise KeyError( 'Key {0} not found in the datastore'.format(key.flat_path))
[ "def", "read_batch_from_datastore", "(", "self", ",", "class_batch_id", ")", ":", "client", "=", "self", ".", "_datastore_client", "key", "=", "client", ".", "key", "(", "KIND_CLASSIFICATION_BATCH", ",", "class_batch_id", ")", "result", "=", "client", ".", "get", "(", "key", ")", "if", "result", "is", "not", "None", ":", "return", "dict", "(", "result", ")", "else", ":", "raise", "KeyError", "(", "'Key {0} not found in the datastore'", ".", "format", "(", "key", ".", "flat_path", ")", ")" ]
Reads and returns single batch from the datastore.
[ "Reads", "and", "returns", "single", "batch", "from", "the", "datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L294-L303
28,559
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py
ClassificationBatches.compute_classification_results
def compute_classification_results(self, adv_batches, dataset_batches, dataset_meta, defense_work=None): """Computes classification results. Args: adv_batches: instance of AversarialBatches dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata defense_work: instance of DefenseWorkPieces Returns: accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count """ class_batch_to_work = {} if defense_work: for v in itervalues(defense_work.work): class_batch_to_work[v['output_classification_batch_id']] = v # accuracy_matrix[defense_id, attack_id] = num correctly classified accuracy_matrix = ResultMatrix() # error_matrix[defense_id, attack_id] = num misclassfied error_matrix = ResultMatrix() # hit_target_class_matrix[defense_id, attack_id] = num hit target class hit_target_class_matrix = ResultMatrix() # processed_images_count[defense_id] = num processed images by defense processed_images_count = {} total_count = len(self.data) processed_count = 0 logging.info('Processing %d files with classification results', len(self.data)) for k, v in iteritems(self.data): if processed_count % 100 == 0: logging.info('Processed %d out of %d classification results', processed_count, total_count) processed_count += 1 defense_id = v['submission_id'] adv_batch = adv_batches.data[v['adversarial_batch_id']] attack_id = adv_batch['submission_id'] work_item = class_batch_to_work.get(k) required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class', 'stat_num_images'] if work_item and work_item['error']: # ignore batches with error continue if work_item and all(work_item.get(i) is not None for i in required_work_stats): count_correctly_classified = work_item['stat_correct'] count_errors = work_item['stat_error'] count_hit_target_class = work_item['stat_target_class'] num_images = work_item['stat_num_images'] else: logging.warning('Recomputing accuracy for classification batch %s', k) (count_correctly_classified, count_errors, count_hit_target_class, num_images) = analyze_one_classification_result( self._storage_client, v['result_path'], adv_batch, dataset_batches, dataset_meta) # update accuracy and hit target class accuracy_matrix[defense_id, attack_id] += count_correctly_classified error_matrix[defense_id, attack_id] += count_errors hit_target_class_matrix[defense_id, attack_id] += count_hit_target_class # update number of processed images processed_images_count[defense_id] = ( processed_images_count.get(defense_id, 0) + num_images) return (accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count)
python
def compute_classification_results(self, adv_batches, dataset_batches, dataset_meta, defense_work=None): """Computes classification results. Args: adv_batches: instance of AversarialBatches dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata defense_work: instance of DefenseWorkPieces Returns: accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count """ class_batch_to_work = {} if defense_work: for v in itervalues(defense_work.work): class_batch_to_work[v['output_classification_batch_id']] = v # accuracy_matrix[defense_id, attack_id] = num correctly classified accuracy_matrix = ResultMatrix() # error_matrix[defense_id, attack_id] = num misclassfied error_matrix = ResultMatrix() # hit_target_class_matrix[defense_id, attack_id] = num hit target class hit_target_class_matrix = ResultMatrix() # processed_images_count[defense_id] = num processed images by defense processed_images_count = {} total_count = len(self.data) processed_count = 0 logging.info('Processing %d files with classification results', len(self.data)) for k, v in iteritems(self.data): if processed_count % 100 == 0: logging.info('Processed %d out of %d classification results', processed_count, total_count) processed_count += 1 defense_id = v['submission_id'] adv_batch = adv_batches.data[v['adversarial_batch_id']] attack_id = adv_batch['submission_id'] work_item = class_batch_to_work.get(k) required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class', 'stat_num_images'] if work_item and work_item['error']: # ignore batches with error continue if work_item and all(work_item.get(i) is not None for i in required_work_stats): count_correctly_classified = work_item['stat_correct'] count_errors = work_item['stat_error'] count_hit_target_class = work_item['stat_target_class'] num_images = work_item['stat_num_images'] else: logging.warning('Recomputing accuracy for classification batch %s', k) (count_correctly_classified, count_errors, count_hit_target_class, num_images) = analyze_one_classification_result( self._storage_client, v['result_path'], adv_batch, dataset_batches, dataset_meta) # update accuracy and hit target class accuracy_matrix[defense_id, attack_id] += count_correctly_classified error_matrix[defense_id, attack_id] += count_errors hit_target_class_matrix[defense_id, attack_id] += count_hit_target_class # update number of processed images processed_images_count[defense_id] = ( processed_images_count.get(defense_id, 0) + num_images) return (accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count)
[ "def", "compute_classification_results", "(", "self", ",", "adv_batches", ",", "dataset_batches", ",", "dataset_meta", ",", "defense_work", "=", "None", ")", ":", "class_batch_to_work", "=", "{", "}", "if", "defense_work", ":", "for", "v", "in", "itervalues", "(", "defense_work", ".", "work", ")", ":", "class_batch_to_work", "[", "v", "[", "'output_classification_batch_id'", "]", "]", "=", "v", "# accuracy_matrix[defense_id, attack_id] = num correctly classified", "accuracy_matrix", "=", "ResultMatrix", "(", ")", "# error_matrix[defense_id, attack_id] = num misclassfied", "error_matrix", "=", "ResultMatrix", "(", ")", "# hit_target_class_matrix[defense_id, attack_id] = num hit target class", "hit_target_class_matrix", "=", "ResultMatrix", "(", ")", "# processed_images_count[defense_id] = num processed images by defense", "processed_images_count", "=", "{", "}", "total_count", "=", "len", "(", "self", ".", "data", ")", "processed_count", "=", "0", "logging", ".", "info", "(", "'Processing %d files with classification results'", ",", "len", "(", "self", ".", "data", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "data", ")", ":", "if", "processed_count", "%", "100", "==", "0", ":", "logging", ".", "info", "(", "'Processed %d out of %d classification results'", ",", "processed_count", ",", "total_count", ")", "processed_count", "+=", "1", "defense_id", "=", "v", "[", "'submission_id'", "]", "adv_batch", "=", "adv_batches", ".", "data", "[", "v", "[", "'adversarial_batch_id'", "]", "]", "attack_id", "=", "adv_batch", "[", "'submission_id'", "]", "work_item", "=", "class_batch_to_work", ".", "get", "(", "k", ")", "required_work_stats", "=", "[", "'stat_correct'", ",", "'stat_error'", ",", "'stat_target_class'", ",", "'stat_num_images'", "]", "if", "work_item", "and", "work_item", "[", "'error'", "]", ":", "# ignore batches with error", "continue", "if", "work_item", "and", "all", "(", "work_item", ".", "get", "(", "i", ")", "is", "not", "None", "for", "i", "in", "required_work_stats", ")", ":", "count_correctly_classified", "=", "work_item", "[", "'stat_correct'", "]", "count_errors", "=", "work_item", "[", "'stat_error'", "]", "count_hit_target_class", "=", "work_item", "[", "'stat_target_class'", "]", "num_images", "=", "work_item", "[", "'stat_num_images'", "]", "else", ":", "logging", ".", "warning", "(", "'Recomputing accuracy for classification batch %s'", ",", "k", ")", "(", "count_correctly_classified", ",", "count_errors", ",", "count_hit_target_class", ",", "num_images", ")", "=", "analyze_one_classification_result", "(", "self", ".", "_storage_client", ",", "v", "[", "'result_path'", "]", ",", "adv_batch", ",", "dataset_batches", ",", "dataset_meta", ")", "# update accuracy and hit target class", "accuracy_matrix", "[", "defense_id", ",", "attack_id", "]", "+=", "count_correctly_classified", "error_matrix", "[", "defense_id", ",", "attack_id", "]", "+=", "count_errors", "hit_target_class_matrix", "[", "defense_id", ",", "attack_id", "]", "+=", "count_hit_target_class", "# update number of processed images", "processed_images_count", "[", "defense_id", "]", "=", "(", "processed_images_count", ".", "get", "(", "defense_id", ",", "0", ")", "+", "num_images", ")", "return", "(", "accuracy_matrix", ",", "error_matrix", ",", "hit_target_class_matrix", ",", "processed_images_count", ")" ]
Computes classification results. Args: adv_batches: instance of AversarialBatches dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata defense_work: instance of DefenseWorkPieces Returns: accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count
[ "Computes", "classification", "results", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L305-L373
28,560
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
participant_from_submission_path
def participant_from_submission_path(submission_path): """Parses type of participant based on submission filename. Args: submission_path: path to the submission in Google Cloud Storage Returns: dict with one element. Element key correspond to type of participant (team, baseline), element value is ID of the participant. Raises: ValueError: is participant can't be determined based on submission path. """ basename = os.path.basename(submission_path) file_ext = None for e in ALLOWED_EXTENSIONS: if basename.endswith(e): file_ext = e break if not file_ext: raise ValueError('Invalid submission path: ' + submission_path) basename = basename[:-len(file_ext)] if basename.isdigit(): return {'team_id': int(basename)} if basename.startswith('baseline_'): return {'baseline_id': basename[len('baseline_'):]} raise ValueError('Invalid submission path: ' + submission_path)
python
def participant_from_submission_path(submission_path): """Parses type of participant based on submission filename. Args: submission_path: path to the submission in Google Cloud Storage Returns: dict with one element. Element key correspond to type of participant (team, baseline), element value is ID of the participant. Raises: ValueError: is participant can't be determined based on submission path. """ basename = os.path.basename(submission_path) file_ext = None for e in ALLOWED_EXTENSIONS: if basename.endswith(e): file_ext = e break if not file_ext: raise ValueError('Invalid submission path: ' + submission_path) basename = basename[:-len(file_ext)] if basename.isdigit(): return {'team_id': int(basename)} if basename.startswith('baseline_'): return {'baseline_id': basename[len('baseline_'):]} raise ValueError('Invalid submission path: ' + submission_path)
[ "def", "participant_from_submission_path", "(", "submission_path", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "submission_path", ")", "file_ext", "=", "None", "for", "e", "in", "ALLOWED_EXTENSIONS", ":", "if", "basename", ".", "endswith", "(", "e", ")", ":", "file_ext", "=", "e", "break", "if", "not", "file_ext", ":", "raise", "ValueError", "(", "'Invalid submission path: '", "+", "submission_path", ")", "basename", "=", "basename", "[", ":", "-", "len", "(", "file_ext", ")", "]", "if", "basename", ".", "isdigit", "(", ")", ":", "return", "{", "'team_id'", ":", "int", "(", "basename", ")", "}", "if", "basename", ".", "startswith", "(", "'baseline_'", ")", ":", "return", "{", "'baseline_id'", ":", "basename", "[", "len", "(", "'baseline_'", ")", ":", "]", "}", "raise", "ValueError", "(", "'Invalid submission path: '", "+", "submission_path", ")" ]
Parses type of participant based on submission filename. Args: submission_path: path to the submission in Google Cloud Storage Returns: dict with one element. Element key correspond to type of participant (team, baseline), element value is ID of the participant. Raises: ValueError: is participant can't be determined based on submission path.
[ "Parses", "type", "of", "participant", "based", "on", "submission", "filename", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L35-L61
28,561
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions._load_submissions_from_datastore_dir
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern): """Loads list of submissions from the directory. Args: dir_suffix: suffix of the directory where submissions are stored, one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR or DEFENSE_SUBDIR. id_pattern: pattern which is used to generate (internal) IDs for submissins. One of the following constants: ATTACK_ID_PATTERN, TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN. Returns: dictionary with all found submissions """ submissions = self._storage_client.list_blobs( prefix=os.path.join(self._round_name, dir_suffix)) return { id_pattern.format(idx): SubmissionDescriptor( path=s, participant_id=participant_from_submission_path(s)) for idx, s in enumerate(submissions) }
python
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern): """Loads list of submissions from the directory. Args: dir_suffix: suffix of the directory where submissions are stored, one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR or DEFENSE_SUBDIR. id_pattern: pattern which is used to generate (internal) IDs for submissins. One of the following constants: ATTACK_ID_PATTERN, TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN. Returns: dictionary with all found submissions """ submissions = self._storage_client.list_blobs( prefix=os.path.join(self._round_name, dir_suffix)) return { id_pattern.format(idx): SubmissionDescriptor( path=s, participant_id=participant_from_submission_path(s)) for idx, s in enumerate(submissions) }
[ "def", "_load_submissions_from_datastore_dir", "(", "self", ",", "dir_suffix", ",", "id_pattern", ")", ":", "submissions", "=", "self", ".", "_storage_client", ".", "list_blobs", "(", "prefix", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_round_name", ",", "dir_suffix", ")", ")", "return", "{", "id_pattern", ".", "format", "(", "idx", ")", ":", "SubmissionDescriptor", "(", "path", "=", "s", ",", "participant_id", "=", "participant_from_submission_path", "(", "s", ")", ")", "for", "idx", ",", "s", "in", "enumerate", "(", "submissions", ")", "}" ]
Loads list of submissions from the directory. Args: dir_suffix: suffix of the directory where submissions are stored, one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR or DEFENSE_SUBDIR. id_pattern: pattern which is used to generate (internal) IDs for submissins. One of the following constants: ATTACK_ID_PATTERN, TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN. Returns: dictionary with all found submissions
[ "Loads", "list", "of", "submissions", "from", "the", "directory", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L99-L119
28,562
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions.init_from_storage_write_to_datastore
def init_from_storage_write_to_datastore(self): """Init list of sumibssions from Storage and saves them to Datastore. Should be called only once (typically by master) during evaluation of the competition. """ # Load submissions self._attacks = self._load_submissions_from_datastore_dir( ATTACK_SUBDIR, ATTACK_ID_PATTERN) self._targeted_attacks = self._load_submissions_from_datastore_dir( TARGETED_ATTACK_SUBDIR, TARGETED_ATTACK_ID_PATTERN) self._defenses = self._load_submissions_from_datastore_dir( DEFENSE_SUBDIR, DEFENSE_ID_PATTERN) self._write_to_datastore()
python
def init_from_storage_write_to_datastore(self): """Init list of sumibssions from Storage and saves them to Datastore. Should be called only once (typically by master) during evaluation of the competition. """ # Load submissions self._attacks = self._load_submissions_from_datastore_dir( ATTACK_SUBDIR, ATTACK_ID_PATTERN) self._targeted_attacks = self._load_submissions_from_datastore_dir( TARGETED_ATTACK_SUBDIR, TARGETED_ATTACK_ID_PATTERN) self._defenses = self._load_submissions_from_datastore_dir( DEFENSE_SUBDIR, DEFENSE_ID_PATTERN) self._write_to_datastore()
[ "def", "init_from_storage_write_to_datastore", "(", "self", ")", ":", "# Load submissions", "self", ".", "_attacks", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "ATTACK_SUBDIR", ",", "ATTACK_ID_PATTERN", ")", "self", ".", "_targeted_attacks", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "TARGETED_ATTACK_SUBDIR", ",", "TARGETED_ATTACK_ID_PATTERN", ")", "self", ".", "_defenses", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "DEFENSE_SUBDIR", ",", "DEFENSE_ID_PATTERN", ")", "self", ".", "_write_to_datastore", "(", ")" ]
Init list of sumibssions from Storage and saves them to Datastore. Should be called only once (typically by master) during evaluation of the competition.
[ "Init", "list", "of", "sumibssions", "from", "Storage", "and", "saves", "them", "to", "Datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L121-L134
28,563
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions._write_to_datastore
def _write_to_datastore(self): """Writes all submissions to datastore.""" # Populate datastore roots_and_submissions = zip([ATTACKS_ENTITY_KEY, TARGET_ATTACKS_ENTITY_KEY, DEFENSES_ENTITY_KEY], [self._attacks, self._targeted_attacks, self._defenses]) client = self._datastore_client with client.no_transact_batch() as batch: for root_key, submissions in roots_and_submissions: batch.put(client.entity(client.key(*root_key))) for k, v in iteritems(submissions): entity = client.entity(client.key( *(root_key + [KIND_SUBMISSION, k]))) entity['submission_path'] = v.path entity.update(participant_from_submission_path(v.path)) batch.put(entity)
python
def _write_to_datastore(self): """Writes all submissions to datastore.""" # Populate datastore roots_and_submissions = zip([ATTACKS_ENTITY_KEY, TARGET_ATTACKS_ENTITY_KEY, DEFENSES_ENTITY_KEY], [self._attacks, self._targeted_attacks, self._defenses]) client = self._datastore_client with client.no_transact_batch() as batch: for root_key, submissions in roots_and_submissions: batch.put(client.entity(client.key(*root_key))) for k, v in iteritems(submissions): entity = client.entity(client.key( *(root_key + [KIND_SUBMISSION, k]))) entity['submission_path'] = v.path entity.update(participant_from_submission_path(v.path)) batch.put(entity)
[ "def", "_write_to_datastore", "(", "self", ")", ":", "# Populate datastore", "roots_and_submissions", "=", "zip", "(", "[", "ATTACKS_ENTITY_KEY", ",", "TARGET_ATTACKS_ENTITY_KEY", ",", "DEFENSES_ENTITY_KEY", "]", ",", "[", "self", ".", "_attacks", ",", "self", ".", "_targeted_attacks", ",", "self", ".", "_defenses", "]", ")", "client", "=", "self", ".", "_datastore_client", "with", "client", ".", "no_transact_batch", "(", ")", "as", "batch", ":", "for", "root_key", ",", "submissions", "in", "roots_and_submissions", ":", "batch", ".", "put", "(", "client", ".", "entity", "(", "client", ".", "key", "(", "*", "root_key", ")", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "submissions", ")", ":", "entity", "=", "client", ".", "entity", "(", "client", ".", "key", "(", "*", "(", "root_key", "+", "[", "KIND_SUBMISSION", ",", "k", "]", ")", ")", ")", "entity", "[", "'submission_path'", "]", "=", "v", ".", "path", "entity", ".", "update", "(", "participant_from_submission_path", "(", "v", ".", "path", ")", ")", "batch", ".", "put", "(", "entity", ")" ]
Writes all submissions to datastore.
[ "Writes", "all", "submissions", "to", "datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L136-L154
28,564
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions.init_from_datastore
def init_from_datastore(self): """Init list of submission from Datastore. Should be called by each worker during initialization. """ self._attacks = {} self._targeted_attacks = {} self._defenses = {} for entity in self._datastore_client.query_fetch(kind=KIND_SUBMISSION): submission_id = entity.key.flat_path[-1] submission_path = entity['submission_path'] participant_id = {k: entity[k] for k in ['team_id', 'baseline_id'] if k in entity} submission_descr = SubmissionDescriptor(path=submission_path, participant_id=participant_id) if list(entity.key.flat_path[0:2]) == ATTACKS_ENTITY_KEY: self._attacks[submission_id] = submission_descr elif list(entity.key.flat_path[0:2]) == TARGET_ATTACKS_ENTITY_KEY: self._targeted_attacks[submission_id] = submission_descr elif list(entity.key.flat_path[0:2]) == DEFENSES_ENTITY_KEY: self._defenses[submission_id] = submission_descr
python
def init_from_datastore(self): """Init list of submission from Datastore. Should be called by each worker during initialization. """ self._attacks = {} self._targeted_attacks = {} self._defenses = {} for entity in self._datastore_client.query_fetch(kind=KIND_SUBMISSION): submission_id = entity.key.flat_path[-1] submission_path = entity['submission_path'] participant_id = {k: entity[k] for k in ['team_id', 'baseline_id'] if k in entity} submission_descr = SubmissionDescriptor(path=submission_path, participant_id=participant_id) if list(entity.key.flat_path[0:2]) == ATTACKS_ENTITY_KEY: self._attacks[submission_id] = submission_descr elif list(entity.key.flat_path[0:2]) == TARGET_ATTACKS_ENTITY_KEY: self._targeted_attacks[submission_id] = submission_descr elif list(entity.key.flat_path[0:2]) == DEFENSES_ENTITY_KEY: self._defenses[submission_id] = submission_descr
[ "def", "init_from_datastore", "(", "self", ")", ":", "self", ".", "_attacks", "=", "{", "}", "self", ".", "_targeted_attacks", "=", "{", "}", "self", ".", "_defenses", "=", "{", "}", "for", "entity", "in", "self", ".", "_datastore_client", ".", "query_fetch", "(", "kind", "=", "KIND_SUBMISSION", ")", ":", "submission_id", "=", "entity", ".", "key", ".", "flat_path", "[", "-", "1", "]", "submission_path", "=", "entity", "[", "'submission_path'", "]", "participant_id", "=", "{", "k", ":", "entity", "[", "k", "]", "for", "k", "in", "[", "'team_id'", ",", "'baseline_id'", "]", "if", "k", "in", "entity", "}", "submission_descr", "=", "SubmissionDescriptor", "(", "path", "=", "submission_path", ",", "participant_id", "=", "participant_id", ")", "if", "list", "(", "entity", ".", "key", ".", "flat_path", "[", "0", ":", "2", "]", ")", "==", "ATTACKS_ENTITY_KEY", ":", "self", ".", "_attacks", "[", "submission_id", "]", "=", "submission_descr", "elif", "list", "(", "entity", ".", "key", ".", "flat_path", "[", "0", ":", "2", "]", ")", "==", "TARGET_ATTACKS_ENTITY_KEY", ":", "self", ".", "_targeted_attacks", "[", "submission_id", "]", "=", "submission_descr", "elif", "list", "(", "entity", ".", "key", ".", "flat_path", "[", "0", ":", "2", "]", ")", "==", "DEFENSES_ENTITY_KEY", ":", "self", ".", "_defenses", "[", "submission_id", "]", "=", "submission_descr" ]
Init list of submission from Datastore. Should be called by each worker during initialization.
[ "Init", "list", "of", "submission", "from", "Datastore", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L156-L177
28,565
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions.find_by_id
def find_by_id(self, submission_id): """Finds submission by ID. Args: submission_id: ID of the submission Returns: SubmissionDescriptor with information about submission or None if submission is not found. """ return self._attacks.get( submission_id, self._defenses.get( submission_id, self._targeted_attacks.get(submission_id, None)))
python
def find_by_id(self, submission_id): """Finds submission by ID. Args: submission_id: ID of the submission Returns: SubmissionDescriptor with information about submission or None if submission is not found. """ return self._attacks.get( submission_id, self._defenses.get( submission_id, self._targeted_attacks.get(submission_id, None)))
[ "def", "find_by_id", "(", "self", ",", "submission_id", ")", ":", "return", "self", ".", "_attacks", ".", "get", "(", "submission_id", ",", "self", ".", "_defenses", ".", "get", "(", "submission_id", ",", "self", ".", "_targeted_attacks", ".", "get", "(", "submission_id", ",", "None", ")", ")", ")" ]
Finds submission by ID. Args: submission_id: ID of the submission Returns: SubmissionDescriptor with information about submission or None if submission is not found.
[ "Finds", "submission", "by", "ID", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L198-L212
28,566
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
CompetitionSubmissions.get_external_id
def get_external_id(self, submission_id): """Returns human readable submission external ID. Args: submission_id: internal submission ID. Returns: human readable ID. """ submission = self.find_by_id(submission_id) if not submission: return None if 'team_id' in submission.participant_id: return submission.participant_id['team_id'] elif 'baseline_id' in submission.participant_id: return 'baseline_' + submission.participant_id['baseline_id'] else: return ''
python
def get_external_id(self, submission_id): """Returns human readable submission external ID. Args: submission_id: internal submission ID. Returns: human readable ID. """ submission = self.find_by_id(submission_id) if not submission: return None if 'team_id' in submission.participant_id: return submission.participant_id['team_id'] elif 'baseline_id' in submission.participant_id: return 'baseline_' + submission.participant_id['baseline_id'] else: return ''
[ "def", "get_external_id", "(", "self", ",", "submission_id", ")", ":", "submission", "=", "self", ".", "find_by_id", "(", "submission_id", ")", "if", "not", "submission", ":", "return", "None", "if", "'team_id'", "in", "submission", ".", "participant_id", ":", "return", "submission", ".", "participant_id", "[", "'team_id'", "]", "elif", "'baseline_id'", "in", "submission", ".", "participant_id", ":", "return", "'baseline_'", "+", "submission", ".", "participant_id", "[", "'baseline_id'", "]", "else", ":", "return", "''" ]
Returns human readable submission external ID. Args: submission_id: internal submission ID. Returns: human readable ID.
[ "Returns", "human", "readable", "submission", "external", "ID", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L214-L231
28,567
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/validation_tool/submission_validator_lib.py
SubmissionValidator._load_and_verify_metadata
def _load_and_verify_metadata(self, submission_type): """Loads and verifies metadata. Args: submission_type: type of the submission Returns: dictionaty with metadata or None if metadata not found or invalid """ metadata_filename = os.path.join(self._extracted_submission_dir, 'metadata.json') if not os.path.isfile(metadata_filename): logging.error('metadata.json not found') return None try: with open(metadata_filename, 'r') as f: metadata = json.load(f) except IOError as e: logging.error('Failed to load metadata: %s', e) return None for field_name in REQUIRED_METADATA_JSON_FIELDS: if field_name not in metadata: logging.error('Field %s not found in metadata', field_name) return None # Verify submission type if submission_type != metadata['type']: logging.error('Invalid submission type in metadata, expected "%s", ' 'actual "%s"', submission_type, metadata['type']) return None # Check submission entry point entry_point = metadata['entry_point'] if not os.path.isfile(os.path.join(self._extracted_submission_dir, entry_point)): logging.error('Entry point not found: %s', entry_point) return None if not entry_point.endswith('.sh'): logging.warning('Entry point is not an .sh script. ' 'This is not necessarily a problem, but if submission ' 'won''t run double check entry point first: %s', entry_point) # Metadata verified return metadata
python
def _load_and_verify_metadata(self, submission_type): """Loads and verifies metadata. Args: submission_type: type of the submission Returns: dictionaty with metadata or None if metadata not found or invalid """ metadata_filename = os.path.join(self._extracted_submission_dir, 'metadata.json') if not os.path.isfile(metadata_filename): logging.error('metadata.json not found') return None try: with open(metadata_filename, 'r') as f: metadata = json.load(f) except IOError as e: logging.error('Failed to load metadata: %s', e) return None for field_name in REQUIRED_METADATA_JSON_FIELDS: if field_name not in metadata: logging.error('Field %s not found in metadata', field_name) return None # Verify submission type if submission_type != metadata['type']: logging.error('Invalid submission type in metadata, expected "%s", ' 'actual "%s"', submission_type, metadata['type']) return None # Check submission entry point entry_point = metadata['entry_point'] if not os.path.isfile(os.path.join(self._extracted_submission_dir, entry_point)): logging.error('Entry point not found: %s', entry_point) return None if not entry_point.endswith('.sh'): logging.warning('Entry point is not an .sh script. ' 'This is not necessarily a problem, but if submission ' 'won''t run double check entry point first: %s', entry_point) # Metadata verified return metadata
[ "def", "_load_and_verify_metadata", "(", "self", ",", "submission_type", ")", ":", "metadata_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_extracted_submission_dir", ",", "'metadata.json'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "metadata_filename", ")", ":", "logging", ".", "error", "(", "'metadata.json not found'", ")", "return", "None", "try", ":", "with", "open", "(", "metadata_filename", ",", "'r'", ")", "as", "f", ":", "metadata", "=", "json", ".", "load", "(", "f", ")", "except", "IOError", "as", "e", ":", "logging", ".", "error", "(", "'Failed to load metadata: %s'", ",", "e", ")", "return", "None", "for", "field_name", "in", "REQUIRED_METADATA_JSON_FIELDS", ":", "if", "field_name", "not", "in", "metadata", ":", "logging", ".", "error", "(", "'Field %s not found in metadata'", ",", "field_name", ")", "return", "None", "# Verify submission type", "if", "submission_type", "!=", "metadata", "[", "'type'", "]", ":", "logging", ".", "error", "(", "'Invalid submission type in metadata, expected \"%s\", '", "'actual \"%s\"'", ",", "submission_type", ",", "metadata", "[", "'type'", "]", ")", "return", "None", "# Check submission entry point", "entry_point", "=", "metadata", "[", "'entry_point'", "]", "if", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_extracted_submission_dir", ",", "entry_point", ")", ")", ":", "logging", ".", "error", "(", "'Entry point not found: %s'", ",", "entry_point", ")", "return", "None", "if", "not", "entry_point", ".", "endswith", "(", "'.sh'", ")", ":", "logging", ".", "warning", "(", "'Entry point is not an .sh script. '", "'This is not necessarily a problem, but if submission '", "'won'", "'t run double check entry point first: %s'", ",", "entry_point", ")", "# Metadata verified", "return", "metadata" ]
Loads and verifies metadata. Args: submission_type: type of the submission Returns: dictionaty with metadata or None if metadata not found or invalid
[ "Loads", "and", "verifies", "metadata", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/validation_tool/submission_validator_lib.py#L204-L245
28,568
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/validation_tool/submission_validator_lib.py
SubmissionValidator._run_submission
def _run_submission(self, metadata): """Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise. """ if self._use_gpu: docker_binary = 'nvidia-docker' container_name = metadata['container_gpu'] else: docker_binary = 'docker' container_name = metadata['container'] if metadata['type'] == 'defense': cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_data'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_data/result.csv'] else: epsilon = np.random.choice(ALLOWED_EPS) cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_images'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_images', str(epsilon)] logging.info('Command to run submission: %s', ' '.join(cmd)) return shell_call(cmd)
python
def _run_submission(self, metadata): """Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise. """ if self._use_gpu: docker_binary = 'nvidia-docker' container_name = metadata['container_gpu'] else: docker_binary = 'docker' container_name = metadata['container'] if metadata['type'] == 'defense': cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_data'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_data/result.csv'] else: epsilon = np.random.choice(ALLOWED_EPS) cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_images'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, './' + metadata['entry_point'], '/input_images', '/output_images', str(epsilon)] logging.info('Command to run submission: %s', ' '.join(cmd)) return shell_call(cmd)
[ "def", "_run_submission", "(", "self", ",", "metadata", ")", ":", "if", "self", ".", "_use_gpu", ":", "docker_binary", "=", "'nvidia-docker'", "container_name", "=", "metadata", "[", "'container_gpu'", "]", "else", ":", "docker_binary", "=", "'docker'", "container_name", "=", "metadata", "[", "'container'", "]", "if", "metadata", "[", "'type'", "]", "==", "'defense'", ":", "cmd", "=", "[", "docker_binary", ",", "'run'", ",", "'--network=none'", ",", "'-m=24g'", ",", "'-v'", ",", "'{0}:/input_images:ro'", ".", "format", "(", "self", ".", "_sample_input_dir", ")", ",", "'-v'", ",", "'{0}:/output_data'", ".", "format", "(", "self", ".", "_sample_output_dir", ")", ",", "'-v'", ",", "'{0}:/code'", ".", "format", "(", "self", ".", "_extracted_submission_dir", ")", ",", "'-w'", ",", "'/code'", ",", "container_name", ",", "'./'", "+", "metadata", "[", "'entry_point'", "]", ",", "'/input_images'", ",", "'/output_data/result.csv'", "]", "else", ":", "epsilon", "=", "np", ".", "random", ".", "choice", "(", "ALLOWED_EPS", ")", "cmd", "=", "[", "docker_binary", ",", "'run'", ",", "'--network=none'", ",", "'-m=24g'", ",", "'-v'", ",", "'{0}:/input_images:ro'", ".", "format", "(", "self", ".", "_sample_input_dir", ")", ",", "'-v'", ",", "'{0}:/output_images'", ".", "format", "(", "self", ".", "_sample_output_dir", ")", ",", "'-v'", ",", "'{0}:/code'", ".", "format", "(", "self", ".", "_extracted_submission_dir", ")", ",", "'-w'", ",", "'/code'", ",", "container_name", ",", "'./'", "+", "metadata", "[", "'entry_point'", "]", ",", "'/input_images'", ",", "'/output_images'", ",", "str", "(", "epsilon", ")", "]", "logging", ".", "info", "(", "'Command to run submission: %s'", ",", "' '", ".", "join", "(", "cmd", ")", ")", "return", "shell_call", "(", "cmd", ")" ]
Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise.
[ "Runs", "submission", "inside", "Docker", "container", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/validation_tool/submission_validator_lib.py#L290-L333
28,569
tensorflow/cleverhans
cleverhans/plot/save_pdf.py
save_pdf
def save_pdf(path): """ Saves a pdf of the current matplotlib figure. :param path: str, filepath to save to """ pp = PdfPages(path) pp.savefig(pyplot.gcf()) pp.close()
python
def save_pdf(path): """ Saves a pdf of the current matplotlib figure. :param path: str, filepath to save to """ pp = PdfPages(path) pp.savefig(pyplot.gcf()) pp.close()
[ "def", "save_pdf", "(", "path", ")", ":", "pp", "=", "PdfPages", "(", "path", ")", "pp", ".", "savefig", "(", "pyplot", ".", "gcf", "(", ")", ")", "pp", ".", "close", "(", ")" ]
Saves a pdf of the current matplotlib figure. :param path: str, filepath to save to
[ "Saves", "a", "pdf", "of", "the", "current", "matplotlib", "figure", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/save_pdf.py#L8-L17
28,570
tensorflow/cleverhans
cleverhans/augmentation.py
random_shift
def random_shift(x, pad=(4, 4), mode='REFLECT'): """Pad a single image and then crop to the original size with a random offset.""" assert mode in 'REFLECT SYMMETRIC CONSTANT'.split() assert x.get_shape().ndims == 3 xp = tf.pad(x, [[pad[0], pad[0]], [pad[1], pad[1]], [0, 0]], mode) return tf.random_crop(xp, tf.shape(x))
python
def random_shift(x, pad=(4, 4), mode='REFLECT'): """Pad a single image and then crop to the original size with a random offset.""" assert mode in 'REFLECT SYMMETRIC CONSTANT'.split() assert x.get_shape().ndims == 3 xp = tf.pad(x, [[pad[0], pad[0]], [pad[1], pad[1]], [0, 0]], mode) return tf.random_crop(xp, tf.shape(x))
[ "def", "random_shift", "(", "x", ",", "pad", "=", "(", "4", ",", "4", ")", ",", "mode", "=", "'REFLECT'", ")", ":", "assert", "mode", "in", "'REFLECT SYMMETRIC CONSTANT'", ".", "split", "(", ")", "assert", "x", ".", "get_shape", "(", ")", ".", "ndims", "==", "3", "xp", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "pad", "[", "0", "]", ",", "pad", "[", "0", "]", "]", ",", "[", "pad", "[", "1", "]", ",", "pad", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", "]", ",", "mode", ")", "return", "tf", ".", "random_crop", "(", "xp", ",", "tf", ".", "shape", "(", "x", ")", ")" ]
Pad a single image and then crop to the original size with a random offset.
[ "Pad", "a", "single", "image", "and", "then", "crop", "to", "the", "original", "size", "with", "a", "random", "offset", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/augmentation.py#L19-L25
28,571
tensorflow/cleverhans
cleverhans/augmentation.py
random_crop_and_flip
def random_crop_and_flip(x, pad_rows=4, pad_cols=4): """Augment a batch by randomly cropping and horizontally flipping it.""" rows = tf.shape(x)[1] cols = tf.shape(x)[2] channels = x.get_shape()[3] def _rand_crop_img(img): """Randomly crop an individual image""" return tf.random_crop(img, [rows, cols, channels]) # Some of these ops are only on CPU. # This function will often be called with the device set to GPU. # We need to set it to CPU temporarily to avoid an exception. with tf.device('/CPU:0'): x = tf.image.resize_image_with_crop_or_pad(x, rows + pad_rows, cols + pad_cols) x = tf.map_fn(_rand_crop_img, x) x = tf.image.random_flip_left_right(x) return x
python
def random_crop_and_flip(x, pad_rows=4, pad_cols=4): """Augment a batch by randomly cropping and horizontally flipping it.""" rows = tf.shape(x)[1] cols = tf.shape(x)[2] channels = x.get_shape()[3] def _rand_crop_img(img): """Randomly crop an individual image""" return tf.random_crop(img, [rows, cols, channels]) # Some of these ops are only on CPU. # This function will often be called with the device set to GPU. # We need to set it to CPU temporarily to avoid an exception. with tf.device('/CPU:0'): x = tf.image.resize_image_with_crop_or_pad(x, rows + pad_rows, cols + pad_cols) x = tf.map_fn(_rand_crop_img, x) x = tf.image.random_flip_left_right(x) return x
[ "def", "random_crop_and_flip", "(", "x", ",", "pad_rows", "=", "4", ",", "pad_cols", "=", "4", ")", ":", "rows", "=", "tf", ".", "shape", "(", "x", ")", "[", "1", "]", "cols", "=", "tf", ".", "shape", "(", "x", ")", "[", "2", "]", "channels", "=", "x", ".", "get_shape", "(", ")", "[", "3", "]", "def", "_rand_crop_img", "(", "img", ")", ":", "\"\"\"Randomly crop an individual image\"\"\"", "return", "tf", ".", "random_crop", "(", "img", ",", "[", "rows", ",", "cols", ",", "channels", "]", ")", "# Some of these ops are only on CPU.", "# This function will often be called with the device set to GPU.", "# We need to set it to CPU temporarily to avoid an exception.", "with", "tf", ".", "device", "(", "'/CPU:0'", ")", ":", "x", "=", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "x", ",", "rows", "+", "pad_rows", ",", "cols", "+", "pad_cols", ")", "x", "=", "tf", ".", "map_fn", "(", "_rand_crop_img", ",", "x", ")", "x", "=", "tf", ".", "image", ".", "random_flip_left_right", "(", "x", ")", "return", "x" ]
Augment a batch by randomly cropping and horizontally flipping it.
[ "Augment", "a", "batch", "by", "randomly", "cropping", "and", "horizontally", "flipping", "it", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/augmentation.py#L40-L58
28,572
tensorflow/cleverhans
cleverhans/attacks/spsa.py
_project_perturbation
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None, clip_max=None): """Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such that the resulting adversarial example is between clip_min and clip_max, if applicable. """ if clip_min is None or clip_max is None: raise NotImplementedError("_project_perturbation currently has clipping " "hard-coded in.") # Ensure inputs are in the correct range with tf.control_dependencies([ utils_tf.assert_less_equal(input_image, tf.cast(clip_max, input_image.dtype)), utils_tf.assert_greater_equal(input_image, tf.cast(clip_min, input_image.dtype)) ]): clipped_perturbation = utils_tf.clip_by_value( perturbation, -epsilon, epsilon) new_image = utils_tf.clip_by_value( input_image + clipped_perturbation, clip_min, clip_max) return new_image - input_image
python
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None, clip_max=None): """Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such that the resulting adversarial example is between clip_min and clip_max, if applicable. """ if clip_min is None or clip_max is None: raise NotImplementedError("_project_perturbation currently has clipping " "hard-coded in.") # Ensure inputs are in the correct range with tf.control_dependencies([ utils_tf.assert_less_equal(input_image, tf.cast(clip_max, input_image.dtype)), utils_tf.assert_greater_equal(input_image, tf.cast(clip_min, input_image.dtype)) ]): clipped_perturbation = utils_tf.clip_by_value( perturbation, -epsilon, epsilon) new_image = utils_tf.clip_by_value( input_image + clipped_perturbation, clip_min, clip_max) return new_image - input_image
[ "def", "_project_perturbation", "(", "perturbation", ",", "epsilon", ",", "input_image", ",", "clip_min", "=", "None", ",", "clip_max", "=", "None", ")", ":", "if", "clip_min", "is", "None", "or", "clip_max", "is", "None", ":", "raise", "NotImplementedError", "(", "\"_project_perturbation currently has clipping \"", "\"hard-coded in.\"", ")", "# Ensure inputs are in the correct range", "with", "tf", ".", "control_dependencies", "(", "[", "utils_tf", ".", "assert_less_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_max", ",", "input_image", ".", "dtype", ")", ")", ",", "utils_tf", ".", "assert_greater_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_min", ",", "input_image", ".", "dtype", ")", ")", "]", ")", ":", "clipped_perturbation", "=", "utils_tf", ".", "clip_by_value", "(", "perturbation", ",", "-", "epsilon", ",", "epsilon", ")", "new_image", "=", "utils_tf", ".", "clip_by_value", "(", "input_image", "+", "clipped_perturbation", ",", "clip_min", ",", "clip_max", ")", "return", "new_image", "-", "input_image" ]
Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such that the resulting adversarial example is between clip_min and clip_max, if applicable.
[ "Project", "perturbation", "onto", "L", "-", "infinity", "ball", "of", "radius", "epsilon", ".", "Also", "project", "into", "hypercube", "such", "that", "the", "resulting", "adversarial", "example", "is", "between", "clip_min", "and", "clip_max", "if", "applicable", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L209-L231
28,573
tensorflow/cleverhans
cleverhans/attacks/spsa.py
margin_logit_loss
def margin_logit_loss(model_logits, label, nb_classes=10, num_classes=None): """Computes difference between logit for `label` and next highest logit. The loss is high when `label` is unlikely (targeted by default). This follows the same interface as `loss_fn` for TensorOptimizer and projected_optimization, i.e. it returns a batch of loss values. """ if num_classes is not None: warnings.warn("`num_classes` is depreciated. Switch to `nb_classes`." " `num_classes` may be removed on or after 2019-04-23.") nb_classes = num_classes del num_classes if 'int' in str(label.dtype): logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1) else: logit_mask = label if 'int' in str(logit_mask.dtype): logit_mask = tf.to_float(logit_mask) try: label_logits = reduce_sum(logit_mask * model_logits, axis=-1) except TypeError: raise TypeError("Could not take row-wise dot product between " "logit mask, of dtype " + str(logit_mask.dtype) + " and model_logits, of dtype " + str(model_logits.dtype)) logits_with_target_label_neg_inf = model_logits - logit_mask * 99999 highest_nonlabel_logits = reduce_max( logits_with_target_label_neg_inf, axis=-1) loss = highest_nonlabel_logits - label_logits return loss
python
def margin_logit_loss(model_logits, label, nb_classes=10, num_classes=None): """Computes difference between logit for `label` and next highest logit. The loss is high when `label` is unlikely (targeted by default). This follows the same interface as `loss_fn` for TensorOptimizer and projected_optimization, i.e. it returns a batch of loss values. """ if num_classes is not None: warnings.warn("`num_classes` is depreciated. Switch to `nb_classes`." " `num_classes` may be removed on or after 2019-04-23.") nb_classes = num_classes del num_classes if 'int' in str(label.dtype): logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1) else: logit_mask = label if 'int' in str(logit_mask.dtype): logit_mask = tf.to_float(logit_mask) try: label_logits = reduce_sum(logit_mask * model_logits, axis=-1) except TypeError: raise TypeError("Could not take row-wise dot product between " "logit mask, of dtype " + str(logit_mask.dtype) + " and model_logits, of dtype " + str(model_logits.dtype)) logits_with_target_label_neg_inf = model_logits - logit_mask * 99999 highest_nonlabel_logits = reduce_max( logits_with_target_label_neg_inf, axis=-1) loss = highest_nonlabel_logits - label_logits return loss
[ "def", "margin_logit_loss", "(", "model_logits", ",", "label", ",", "nb_classes", "=", "10", ",", "num_classes", "=", "None", ")", ":", "if", "num_classes", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"`num_classes` is depreciated. Switch to `nb_classes`.\"", "\" `num_classes` may be removed on or after 2019-04-23.\"", ")", "nb_classes", "=", "num_classes", "del", "num_classes", "if", "'int'", "in", "str", "(", "label", ".", "dtype", ")", ":", "logit_mask", "=", "tf", ".", "one_hot", "(", "label", ",", "depth", "=", "nb_classes", ",", "axis", "=", "-", "1", ")", "else", ":", "logit_mask", "=", "label", "if", "'int'", "in", "str", "(", "logit_mask", ".", "dtype", ")", ":", "logit_mask", "=", "tf", ".", "to_float", "(", "logit_mask", ")", "try", ":", "label_logits", "=", "reduce_sum", "(", "logit_mask", "*", "model_logits", ",", "axis", "=", "-", "1", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"Could not take row-wise dot product between \"", "\"logit mask, of dtype \"", "+", "str", "(", "logit_mask", ".", "dtype", ")", "+", "\" and model_logits, of dtype \"", "+", "str", "(", "model_logits", ".", "dtype", ")", ")", "logits_with_target_label_neg_inf", "=", "model_logits", "-", "logit_mask", "*", "99999", "highest_nonlabel_logits", "=", "reduce_max", "(", "logits_with_target_label_neg_inf", ",", "axis", "=", "-", "1", ")", "loss", "=", "highest_nonlabel_logits", "-", "label_logits", "return", "loss" ]
Computes difference between logit for `label` and next highest logit. The loss is high when `label` is unlikely (targeted by default). This follows the same interface as `loss_fn` for TensorOptimizer and projected_optimization, i.e. it returns a batch of loss values.
[ "Computes", "difference", "between", "logit", "for", "label", "and", "next", "highest", "logit", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L444-L473
28,574
tensorflow/cleverhans
cleverhans/attacks/spsa.py
TensorOptimizer._compute_gradients
def _compute_gradients(self, loss_fn, x, unused_optim_state): """Compute a new value of `x` to minimize `loss_fn`. Args: loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values. `x` will be optimized to minimize `loss_fn(x)`. x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument in standard TF Optimizer. unused_optim_state: A (possibly nested) dict, containing any state info needed for the optimizer. Returns: new_x: A list of Tensors, the same length as `x`, which are updated new_optim_state: A dict, with the same structure as `optim_state`, which have been updated. """ # Assumes `x` is a list, # and contains a tensor representing a batch of images assert len(x) == 1 and isinstance(x, list), \ 'x should be a list and contain only one image tensor' x = x[0] loss = reduce_mean(loss_fn(x), axis=0) return tf.gradients(loss, x)
python
def _compute_gradients(self, loss_fn, x, unused_optim_state): """Compute a new value of `x` to minimize `loss_fn`. Args: loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values. `x` will be optimized to minimize `loss_fn(x)`. x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument in standard TF Optimizer. unused_optim_state: A (possibly nested) dict, containing any state info needed for the optimizer. Returns: new_x: A list of Tensors, the same length as `x`, which are updated new_optim_state: A dict, with the same structure as `optim_state`, which have been updated. """ # Assumes `x` is a list, # and contains a tensor representing a batch of images assert len(x) == 1 and isinstance(x, list), \ 'x should be a list and contain only one image tensor' x = x[0] loss = reduce_mean(loss_fn(x), axis=0) return tf.gradients(loss, x)
[ "def", "_compute_gradients", "(", "self", ",", "loss_fn", ",", "x", ",", "unused_optim_state", ")", ":", "# Assumes `x` is a list,", "# and contains a tensor representing a batch of images", "assert", "len", "(", "x", ")", "==", "1", "and", "isinstance", "(", "x", ",", "list", ")", ",", "'x should be a list and contain only one image tensor'", "x", "=", "x", "[", "0", "]", "loss", "=", "reduce_mean", "(", "loss_fn", "(", "x", ")", ",", "axis", "=", "0", ")", "return", "tf", ".", "gradients", "(", "loss", ",", "x", ")" ]
Compute a new value of `x` to minimize `loss_fn`. Args: loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values. `x` will be optimized to minimize `loss_fn(x)`. x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument in standard TF Optimizer. unused_optim_state: A (possibly nested) dict, containing any state info needed for the optimizer. Returns: new_x: A list of Tensors, the same length as `x`, which are updated new_optim_state: A dict, with the same structure as `optim_state`, which have been updated.
[ "Compute", "a", "new", "value", "of", "x", "to", "minimize", "loss_fn", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L246-L270
28,575
tensorflow/cleverhans
cleverhans/attacks/spsa.py
TensorOptimizer.minimize
def minimize(self, loss_fn, x, optim_state): """ Analogous to tf.Optimizer.minimize :param loss_fn: tf Tensor, representing the loss to minimize :param x: list of Tensor, analogous to tf.Optimizer's var_list :param optim_state: A possibly nested dict, containing any optimizer state. Returns: new_x: list of Tensor, updated version of `x` new_optim_state: dict, updated version of `optim_state` """ grads = self._compute_gradients(loss_fn, x, optim_state) return self._apply_gradients(grads, x, optim_state)
python
def minimize(self, loss_fn, x, optim_state): """ Analogous to tf.Optimizer.minimize :param loss_fn: tf Tensor, representing the loss to minimize :param x: list of Tensor, analogous to tf.Optimizer's var_list :param optim_state: A possibly nested dict, containing any optimizer state. Returns: new_x: list of Tensor, updated version of `x` new_optim_state: dict, updated version of `optim_state` """ grads = self._compute_gradients(loss_fn, x, optim_state) return self._apply_gradients(grads, x, optim_state)
[ "def", "minimize", "(", "self", ",", "loss_fn", ",", "x", ",", "optim_state", ")", ":", "grads", "=", "self", ".", "_compute_gradients", "(", "loss_fn", ",", "x", ",", "optim_state", ")", "return", "self", ".", "_apply_gradients", "(", "grads", ",", "x", ",", "optim_state", ")" ]
Analogous to tf.Optimizer.minimize :param loss_fn: tf Tensor, representing the loss to minimize :param x: list of Tensor, analogous to tf.Optimizer's var_list :param optim_state: A possibly nested dict, containing any optimizer state. Returns: new_x: list of Tensor, updated version of `x` new_optim_state: dict, updated version of `optim_state`
[ "Analogous", "to", "tf", ".", "Optimizer", ".", "minimize" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L287-L300
28,576
tensorflow/cleverhans
cleverhans/attacks/spsa.py
TensorAdam.init_state
def init_state(self, x): """ Initialize t, m, and u """ optim_state = {} optim_state["t"] = 0. optim_state["m"] = [tf.zeros_like(v) for v in x] optim_state["u"] = [tf.zeros_like(v) for v in x] return optim_state
python
def init_state(self, x): """ Initialize t, m, and u """ optim_state = {} optim_state["t"] = 0. optim_state["m"] = [tf.zeros_like(v) for v in x] optim_state["u"] = [tf.zeros_like(v) for v in x] return optim_state
[ "def", "init_state", "(", "self", ",", "x", ")", ":", "optim_state", "=", "{", "}", "optim_state", "[", "\"t\"", "]", "=", "0.", "optim_state", "[", "\"m\"", "]", "=", "[", "tf", ".", "zeros_like", "(", "v", ")", "for", "v", "in", "x", "]", "optim_state", "[", "\"u\"", "]", "=", "[", "tf", ".", "zeros_like", "(", "v", ")", "for", "v", "in", "x", "]", "return", "optim_state" ]
Initialize t, m, and u
[ "Initialize", "t", "m", "and", "u" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L340-L348
28,577
tensorflow/cleverhans
cleverhans/attacks/spsa.py
TensorAdam._apply_gradients
def _apply_gradients(self, grads, x, optim_state): """Refer to parent class documentation.""" new_x = [None] * len(x) new_optim_state = { "t": optim_state["t"] + 1., "m": [None] * len(x), "u": [None] * len(x) } t = new_optim_state["t"] for i in xrange(len(x)): g = grads[i] m_old = optim_state["m"][i] u_old = optim_state["u"][i] new_optim_state["m"][i] = ( self._beta1 * m_old + (1. - self._beta1) * g) new_optim_state["u"][i] = ( self._beta2 * u_old + (1. - self._beta2) * g * g) m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t)) u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t)) new_x[i] = ( x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)) return new_x, new_optim_state
python
def _apply_gradients(self, grads, x, optim_state): """Refer to parent class documentation.""" new_x = [None] * len(x) new_optim_state = { "t": optim_state["t"] + 1., "m": [None] * len(x), "u": [None] * len(x) } t = new_optim_state["t"] for i in xrange(len(x)): g = grads[i] m_old = optim_state["m"][i] u_old = optim_state["u"][i] new_optim_state["m"][i] = ( self._beta1 * m_old + (1. - self._beta1) * g) new_optim_state["u"][i] = ( self._beta2 * u_old + (1. - self._beta2) * g * g) m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t)) u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t)) new_x[i] = ( x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)) return new_x, new_optim_state
[ "def", "_apply_gradients", "(", "self", ",", "grads", ",", "x", ",", "optim_state", ")", ":", "new_x", "=", "[", "None", "]", "*", "len", "(", "x", ")", "new_optim_state", "=", "{", "\"t\"", ":", "optim_state", "[", "\"t\"", "]", "+", "1.", ",", "\"m\"", ":", "[", "None", "]", "*", "len", "(", "x", ")", ",", "\"u\"", ":", "[", "None", "]", "*", "len", "(", "x", ")", "}", "t", "=", "new_optim_state", "[", "\"t\"", "]", "for", "i", "in", "xrange", "(", "len", "(", "x", ")", ")", ":", "g", "=", "grads", "[", "i", "]", "m_old", "=", "optim_state", "[", "\"m\"", "]", "[", "i", "]", "u_old", "=", "optim_state", "[", "\"u\"", "]", "[", "i", "]", "new_optim_state", "[", "\"m\"", "]", "[", "i", "]", "=", "(", "self", ".", "_beta1", "*", "m_old", "+", "(", "1.", "-", "self", ".", "_beta1", ")", "*", "g", ")", "new_optim_state", "[", "\"u\"", "]", "[", "i", "]", "=", "(", "self", ".", "_beta2", "*", "u_old", "+", "(", "1.", "-", "self", ".", "_beta2", ")", "*", "g", "*", "g", ")", "m_hat", "=", "new_optim_state", "[", "\"m\"", "]", "[", "i", "]", "/", "(", "1.", "-", "tf", ".", "pow", "(", "self", ".", "_beta1", ",", "t", ")", ")", "u_hat", "=", "new_optim_state", "[", "\"u\"", "]", "[", "i", "]", "/", "(", "1.", "-", "tf", ".", "pow", "(", "self", ".", "_beta2", ",", "t", ")", ")", "new_x", "[", "i", "]", "=", "(", "x", "[", "i", "]", "-", "self", ".", "_lr", "*", "m_hat", "/", "(", "tf", ".", "sqrt", "(", "u_hat", ")", "+", "self", ".", "_epsilon", ")", ")", "return", "new_x", ",", "new_optim_state" ]
Refer to parent class documentation.
[ "Refer", "to", "parent", "class", "documentation", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L350-L371
28,578
tensorflow/cleverhans
cleverhans/attacks/spsa.py
SPSAAdam._compute_gradients
def _compute_gradients(self, loss_fn, x, unused_optim_state): """Compute gradient estimates using SPSA.""" # Assumes `x` is a list, containing a [1, H, W, C] image # If static batch dimension is None, tf.reshape to batch size 1 # so that static shape can be inferred assert len(x) == 1 static_x_shape = x[0].get_shape().as_list() if static_x_shape[0] is None: x[0] = tf.reshape(x[0], [1] + static_x_shape[1:]) assert x[0].get_shape().as_list()[0] == 1 x = x[0] x_shape = x.get_shape().as_list() def body(i, grad_array): delta = self._delta delta_x = self._get_delta(x, delta) delta_x = tf.concat([delta_x, -delta_x], axis=0) loss_vals = tf.reshape( loss_fn(x + delta_x), [2 * self._num_samples] + [1] * (len(x_shape) - 1)) avg_grad = reduce_mean(loss_vals * delta_x, axis=0) / delta avg_grad = tf.expand_dims(avg_grad, axis=0) new_grad_array = grad_array.write(i, avg_grad) return i + 1, new_grad_array def cond(i, _): return i < self._num_iters _, all_grads = tf.while_loop( cond, body, loop_vars=[ 0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype) ], back_prop=False, parallel_iterations=1) avg_grad = reduce_sum(all_grads.stack(), axis=0) return [avg_grad]
python
def _compute_gradients(self, loss_fn, x, unused_optim_state): """Compute gradient estimates using SPSA.""" # Assumes `x` is a list, containing a [1, H, W, C] image # If static batch dimension is None, tf.reshape to batch size 1 # so that static shape can be inferred assert len(x) == 1 static_x_shape = x[0].get_shape().as_list() if static_x_shape[0] is None: x[0] = tf.reshape(x[0], [1] + static_x_shape[1:]) assert x[0].get_shape().as_list()[0] == 1 x = x[0] x_shape = x.get_shape().as_list() def body(i, grad_array): delta = self._delta delta_x = self._get_delta(x, delta) delta_x = tf.concat([delta_x, -delta_x], axis=0) loss_vals = tf.reshape( loss_fn(x + delta_x), [2 * self._num_samples] + [1] * (len(x_shape) - 1)) avg_grad = reduce_mean(loss_vals * delta_x, axis=0) / delta avg_grad = tf.expand_dims(avg_grad, axis=0) new_grad_array = grad_array.write(i, avg_grad) return i + 1, new_grad_array def cond(i, _): return i < self._num_iters _, all_grads = tf.while_loop( cond, body, loop_vars=[ 0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype) ], back_prop=False, parallel_iterations=1) avg_grad = reduce_sum(all_grads.stack(), axis=0) return [avg_grad]
[ "def", "_compute_gradients", "(", "self", ",", "loss_fn", ",", "x", ",", "unused_optim_state", ")", ":", "# Assumes `x` is a list, containing a [1, H, W, C] image", "# If static batch dimension is None, tf.reshape to batch size 1", "# so that static shape can be inferred", "assert", "len", "(", "x", ")", "==", "1", "static_x_shape", "=", "x", "[", "0", "]", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "static_x_shape", "[", "0", "]", "is", "None", ":", "x", "[", "0", "]", "=", "tf", ".", "reshape", "(", "x", "[", "0", "]", ",", "[", "1", "]", "+", "static_x_shape", "[", "1", ":", "]", ")", "assert", "x", "[", "0", "]", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "==", "1", "x", "=", "x", "[", "0", "]", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "def", "body", "(", "i", ",", "grad_array", ")", ":", "delta", "=", "self", ".", "_delta", "delta_x", "=", "self", ".", "_get_delta", "(", "x", ",", "delta", ")", "delta_x", "=", "tf", ".", "concat", "(", "[", "delta_x", ",", "-", "delta_x", "]", ",", "axis", "=", "0", ")", "loss_vals", "=", "tf", ".", "reshape", "(", "loss_fn", "(", "x", "+", "delta_x", ")", ",", "[", "2", "*", "self", ".", "_num_samples", "]", "+", "[", "1", "]", "*", "(", "len", "(", "x_shape", ")", "-", "1", ")", ")", "avg_grad", "=", "reduce_mean", "(", "loss_vals", "*", "delta_x", ",", "axis", "=", "0", ")", "/", "delta", "avg_grad", "=", "tf", ".", "expand_dims", "(", "avg_grad", ",", "axis", "=", "0", ")", "new_grad_array", "=", "grad_array", ".", "write", "(", "i", ",", "avg_grad", ")", "return", "i", "+", "1", ",", "new_grad_array", "def", "cond", "(", "i", ",", "_", ")", ":", "return", "i", "<", "self", ".", "_num_iters", "_", ",", "all_grads", "=", "tf", ".", "while_loop", "(", "cond", ",", "body", ",", "loop_vars", "=", "[", "0", ",", "tf", ".", "TensorArray", "(", "size", "=", "self", ".", "_num_iters", ",", "dtype", "=", "tf_dtype", ")", "]", ",", "back_prop", "=", "False", ",", "parallel_iterations", "=", "1", ")", "avg_grad", "=", "reduce_sum", "(", "all_grads", ".", "stack", "(", ")", ",", "axis", "=", "0", ")", "return", "[", "avg_grad", "]" ]
Compute gradient estimates using SPSA.
[ "Compute", "gradient", "estimates", "using", "SPSA", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L404-L441
28,579
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
read_submissions_from_directory
def read_submissions_from_directory(dirname, use_gpu): """Scans directory and read all submissions. Args: dirname: directory to scan. use_gpu: whether submissions should use GPU. This argument is used to pick proper Docker container for each submission and create instance of Attack or Defense class. Returns: List with submissions (subclasses of Submission class). """ result = [] for sub_dir in os.listdir(dirname): submission_path = os.path.join(dirname, sub_dir) try: if not os.path.isdir(submission_path): continue if not os.path.exists(os.path.join(submission_path, 'metadata.json')): continue with open(os.path.join(submission_path, 'metadata.json')) as f: metadata = json.load(f) if use_gpu and ('container_gpu' in metadata): container = metadata['container_gpu'] else: container = metadata['container'] entry_point = metadata['entry_point'] submission_type = metadata['type'] if submission_type == 'attack' or submission_type == 'targeted_attack': submission = Attack(submission_path, container, entry_point, use_gpu) elif submission_type == 'defense': submission = Defense(submission_path, container, entry_point, use_gpu) else: raise ValueError('Invalid type of submission: %s' % submission_type) result.append(submission) except (IOError, KeyError, ValueError): print('Failed to read submission from directory ', submission_path) return result
python
def read_submissions_from_directory(dirname, use_gpu): """Scans directory and read all submissions. Args: dirname: directory to scan. use_gpu: whether submissions should use GPU. This argument is used to pick proper Docker container for each submission and create instance of Attack or Defense class. Returns: List with submissions (subclasses of Submission class). """ result = [] for sub_dir in os.listdir(dirname): submission_path = os.path.join(dirname, sub_dir) try: if not os.path.isdir(submission_path): continue if not os.path.exists(os.path.join(submission_path, 'metadata.json')): continue with open(os.path.join(submission_path, 'metadata.json')) as f: metadata = json.load(f) if use_gpu and ('container_gpu' in metadata): container = metadata['container_gpu'] else: container = metadata['container'] entry_point = metadata['entry_point'] submission_type = metadata['type'] if submission_type == 'attack' or submission_type == 'targeted_attack': submission = Attack(submission_path, container, entry_point, use_gpu) elif submission_type == 'defense': submission = Defense(submission_path, container, entry_point, use_gpu) else: raise ValueError('Invalid type of submission: %s' % submission_type) result.append(submission) except (IOError, KeyError, ValueError): print('Failed to read submission from directory ', submission_path) return result
[ "def", "read_submissions_from_directory", "(", "dirname", ",", "use_gpu", ")", ":", "result", "=", "[", "]", "for", "sub_dir", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "submission_path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "sub_dir", ")", "try", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "submission_path", ")", ":", "continue", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "submission_path", ",", "'metadata.json'", ")", ")", ":", "continue", "with", "open", "(", "os", ".", "path", ".", "join", "(", "submission_path", ",", "'metadata.json'", ")", ")", "as", "f", ":", "metadata", "=", "json", ".", "load", "(", "f", ")", "if", "use_gpu", "and", "(", "'container_gpu'", "in", "metadata", ")", ":", "container", "=", "metadata", "[", "'container_gpu'", "]", "else", ":", "container", "=", "metadata", "[", "'container'", "]", "entry_point", "=", "metadata", "[", "'entry_point'", "]", "submission_type", "=", "metadata", "[", "'type'", "]", "if", "submission_type", "==", "'attack'", "or", "submission_type", "==", "'targeted_attack'", ":", "submission", "=", "Attack", "(", "submission_path", ",", "container", ",", "entry_point", ",", "use_gpu", ")", "elif", "submission_type", "==", "'defense'", ":", "submission", "=", "Defense", "(", "submission_path", ",", "container", ",", "entry_point", ",", "use_gpu", ")", "else", ":", "raise", "ValueError", "(", "'Invalid type of submission: %s'", "%", "submission_type", ")", "result", ".", "append", "(", "submission", ")", "except", "(", "IOError", ",", "KeyError", ",", "ValueError", ")", ":", "print", "(", "'Failed to read submission from directory '", ",", "submission_path", ")", "return", "result" ]
Scans directory and read all submissions. Args: dirname: directory to scan. use_gpu: whether submissions should use GPU. This argument is used to pick proper Docker container for each submission and create instance of Attack or Defense class. Returns: List with submissions (subclasses of Submission class).
[ "Scans", "directory", "and", "read", "all", "submissions", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L121-L158
28,580
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
load_defense_output
def load_defense_output(filename): """Loads output of defense from given file.""" result = {} with open(filename) as f: for row in csv.reader(f): try: image_filename = row[0] if image_filename.endswith('.png') or image_filename.endswith('.jpg'): image_filename = image_filename[:image_filename.rfind('.')] label = int(row[1]) except (IndexError, ValueError): continue result[image_filename] = label return result
python
def load_defense_output(filename): """Loads output of defense from given file.""" result = {} with open(filename) as f: for row in csv.reader(f): try: image_filename = row[0] if image_filename.endswith('.png') or image_filename.endswith('.jpg'): image_filename = image_filename[:image_filename.rfind('.')] label = int(row[1]) except (IndexError, ValueError): continue result[image_filename] = label return result
[ "def", "load_defense_output", "(", "filename", ")", ":", "result", "=", "{", "}", "with", "open", "(", "filename", ")", "as", "f", ":", "for", "row", "in", "csv", ".", "reader", "(", "f", ")", ":", "try", ":", "image_filename", "=", "row", "[", "0", "]", "if", "image_filename", ".", "endswith", "(", "'.png'", ")", "or", "image_filename", ".", "endswith", "(", "'.jpg'", ")", ":", "image_filename", "=", "image_filename", "[", ":", "image_filename", ".", "rfind", "(", "'.'", ")", "]", "label", "=", "int", "(", "row", "[", "1", "]", ")", "except", "(", "IndexError", ",", "ValueError", ")", ":", "continue", "result", "[", "image_filename", "]", "=", "label", "return", "result" ]
Loads output of defense from given file.
[ "Loads", "output", "of", "defense", "from", "given", "file", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L328-L341
28,581
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
main
def main(): """Run all attacks against all defenses and compute results. """ args = parse_args() attacks_output_dir = os.path.join(args.intermediate_results_dir, 'attacks_output') targeted_attacks_output_dir = os.path.join(args.intermediate_results_dir, 'targeted_attacks_output') defenses_output_dir = os.path.join(args.intermediate_results_dir, 'defenses_output') all_adv_examples_dir = os.path.join(args.intermediate_results_dir, 'all_adv_examples') # Load dataset metadata. dataset_meta = DatasetMetadata(args.dataset_metadata) # Load attacks and defenses. attacks = [ a for a in read_submissions_from_directory(args.attacks_dir, args.use_gpu) if isinstance(a, Attack) ] targeted_attacks = [ a for a in read_submissions_from_directory(args.targeted_attacks_dir, args.use_gpu) if isinstance(a, Attack) ] defenses = [ d for d in read_submissions_from_directory(args.defenses_dir, args.use_gpu) if isinstance(d, Defense) ] print('Found attacks: ', [a.name for a in attacks]) print('Found tageted attacks: ', [a.name for a in targeted_attacks]) print('Found defenses: ', [d.name for d in defenses]) # Prepare subdirectories for intermediate results. os.mkdir(attacks_output_dir) os.mkdir(targeted_attacks_output_dir) os.mkdir(defenses_output_dir) os.mkdir(all_adv_examples_dir) for a in attacks: os.mkdir(os.path.join(attacks_output_dir, a.name)) for a in targeted_attacks: os.mkdir(os.path.join(targeted_attacks_output_dir, a.name)) for d in defenses: os.mkdir(os.path.join(defenses_output_dir, d.name)) # Run all non-targeted attacks. attacks_output = AttacksOutput(args.dataset_dir, attacks_output_dir, targeted_attacks_output_dir, all_adv_examples_dir, args.epsilon) for a in attacks: a.run(args.dataset_dir, os.path.join(attacks_output_dir, a.name), args.epsilon) attacks_output.clip_and_copy_attack_outputs(a.name, False) # Run all targeted attacks. dataset_meta.save_target_classes(os.path.join(args.dataset_dir, 'target_class.csv')) for a in targeted_attacks: a.run(args.dataset_dir, os.path.join(targeted_attacks_output_dir, a.name), args.epsilon) attacks_output.clip_and_copy_attack_outputs(a.name, True) # Run all defenses. defenses_output = {} for d in defenses: d.run(all_adv_examples_dir, os.path.join(defenses_output_dir, d.name)) defenses_output[d.name] = load_defense_output( os.path.join(defenses_output_dir, d.name, 'result.csv')) # Compute and save scoring. compute_and_save_scores_and_ranking(attacks_output, defenses_output, dataset_meta, args.output_dir, args.save_all_classification)
python
def main(): """Run all attacks against all defenses and compute results. """ args = parse_args() attacks_output_dir = os.path.join(args.intermediate_results_dir, 'attacks_output') targeted_attacks_output_dir = os.path.join(args.intermediate_results_dir, 'targeted_attacks_output') defenses_output_dir = os.path.join(args.intermediate_results_dir, 'defenses_output') all_adv_examples_dir = os.path.join(args.intermediate_results_dir, 'all_adv_examples') # Load dataset metadata. dataset_meta = DatasetMetadata(args.dataset_metadata) # Load attacks and defenses. attacks = [ a for a in read_submissions_from_directory(args.attacks_dir, args.use_gpu) if isinstance(a, Attack) ] targeted_attacks = [ a for a in read_submissions_from_directory(args.targeted_attacks_dir, args.use_gpu) if isinstance(a, Attack) ] defenses = [ d for d in read_submissions_from_directory(args.defenses_dir, args.use_gpu) if isinstance(d, Defense) ] print('Found attacks: ', [a.name for a in attacks]) print('Found tageted attacks: ', [a.name for a in targeted_attacks]) print('Found defenses: ', [d.name for d in defenses]) # Prepare subdirectories for intermediate results. os.mkdir(attacks_output_dir) os.mkdir(targeted_attacks_output_dir) os.mkdir(defenses_output_dir) os.mkdir(all_adv_examples_dir) for a in attacks: os.mkdir(os.path.join(attacks_output_dir, a.name)) for a in targeted_attacks: os.mkdir(os.path.join(targeted_attacks_output_dir, a.name)) for d in defenses: os.mkdir(os.path.join(defenses_output_dir, d.name)) # Run all non-targeted attacks. attacks_output = AttacksOutput(args.dataset_dir, attacks_output_dir, targeted_attacks_output_dir, all_adv_examples_dir, args.epsilon) for a in attacks: a.run(args.dataset_dir, os.path.join(attacks_output_dir, a.name), args.epsilon) attacks_output.clip_and_copy_attack_outputs(a.name, False) # Run all targeted attacks. dataset_meta.save_target_classes(os.path.join(args.dataset_dir, 'target_class.csv')) for a in targeted_attacks: a.run(args.dataset_dir, os.path.join(targeted_attacks_output_dir, a.name), args.epsilon) attacks_output.clip_and_copy_attack_outputs(a.name, True) # Run all defenses. defenses_output = {} for d in defenses: d.run(all_adv_examples_dir, os.path.join(defenses_output_dir, d.name)) defenses_output[d.name] = load_defense_output( os.path.join(defenses_output_dir, d.name, 'result.csv')) # Compute and save scoring. compute_and_save_scores_and_ranking(attacks_output, defenses_output, dataset_meta, args.output_dir, args.save_all_classification)
[ "def", "main", "(", ")", ":", "args", "=", "parse_args", "(", ")", "attacks_output_dir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "intermediate_results_dir", ",", "'attacks_output'", ")", "targeted_attacks_output_dir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "intermediate_results_dir", ",", "'targeted_attacks_output'", ")", "defenses_output_dir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "intermediate_results_dir", ",", "'defenses_output'", ")", "all_adv_examples_dir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "intermediate_results_dir", ",", "'all_adv_examples'", ")", "# Load dataset metadata.", "dataset_meta", "=", "DatasetMetadata", "(", "args", ".", "dataset_metadata", ")", "# Load attacks and defenses.", "attacks", "=", "[", "a", "for", "a", "in", "read_submissions_from_directory", "(", "args", ".", "attacks_dir", ",", "args", ".", "use_gpu", ")", "if", "isinstance", "(", "a", ",", "Attack", ")", "]", "targeted_attacks", "=", "[", "a", "for", "a", "in", "read_submissions_from_directory", "(", "args", ".", "targeted_attacks_dir", ",", "args", ".", "use_gpu", ")", "if", "isinstance", "(", "a", ",", "Attack", ")", "]", "defenses", "=", "[", "d", "for", "d", "in", "read_submissions_from_directory", "(", "args", ".", "defenses_dir", ",", "args", ".", "use_gpu", ")", "if", "isinstance", "(", "d", ",", "Defense", ")", "]", "print", "(", "'Found attacks: '", ",", "[", "a", ".", "name", "for", "a", "in", "attacks", "]", ")", "print", "(", "'Found tageted attacks: '", ",", "[", "a", ".", "name", "for", "a", "in", "targeted_attacks", "]", ")", "print", "(", "'Found defenses: '", ",", "[", "d", ".", "name", "for", "d", "in", "defenses", "]", ")", "# Prepare subdirectories for intermediate results.", "os", ".", "mkdir", "(", "attacks_output_dir", ")", "os", ".", "mkdir", "(", "targeted_attacks_output_dir", ")", "os", ".", "mkdir", "(", "defenses_output_dir", ")", "os", ".", "mkdir", "(", "all_adv_examples_dir", ")", "for", "a", "in", "attacks", ":", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "attacks_output_dir", ",", "a", ".", "name", ")", ")", "for", "a", "in", "targeted_attacks", ":", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "targeted_attacks_output_dir", ",", "a", ".", "name", ")", ")", "for", "d", "in", "defenses", ":", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "defenses_output_dir", ",", "d", ".", "name", ")", ")", "# Run all non-targeted attacks.", "attacks_output", "=", "AttacksOutput", "(", "args", ".", "dataset_dir", ",", "attacks_output_dir", ",", "targeted_attacks_output_dir", ",", "all_adv_examples_dir", ",", "args", ".", "epsilon", ")", "for", "a", "in", "attacks", ":", "a", ".", "run", "(", "args", ".", "dataset_dir", ",", "os", ".", "path", ".", "join", "(", "attacks_output_dir", ",", "a", ".", "name", ")", ",", "args", ".", "epsilon", ")", "attacks_output", ".", "clip_and_copy_attack_outputs", "(", "a", ".", "name", ",", "False", ")", "# Run all targeted attacks.", "dataset_meta", ".", "save_target_classes", "(", "os", ".", "path", ".", "join", "(", "args", ".", "dataset_dir", ",", "'target_class.csv'", ")", ")", "for", "a", "in", "targeted_attacks", ":", "a", ".", "run", "(", "args", ".", "dataset_dir", ",", "os", ".", "path", ".", "join", "(", "targeted_attacks_output_dir", ",", "a", ".", "name", ")", ",", "args", ".", "epsilon", ")", "attacks_output", ".", "clip_and_copy_attack_outputs", "(", "a", ".", "name", ",", "True", ")", "# Run all defenses.", "defenses_output", "=", "{", "}", "for", "d", "in", "defenses", ":", "d", ".", "run", "(", "all_adv_examples_dir", ",", "os", ".", "path", ".", "join", "(", "defenses_output_dir", ",", "d", ".", "name", ")", ")", "defenses_output", "[", "d", ".", "name", "]", "=", "load_defense_output", "(", "os", ".", "path", ".", "join", "(", "defenses_output_dir", ",", "d", ".", "name", ",", "'result.csv'", ")", ")", "# Compute and save scoring.", "compute_and_save_scores_and_ranking", "(", "attacks_output", ",", "defenses_output", ",", "dataset_meta", ",", "args", ".", "output_dir", ",", "args", ".", "save_all_classification", ")" ]
Run all attacks against all defenses and compute results.
[ "Run", "all", "attacks", "against", "all", "defenses", "and", "compute", "results", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L468-L547
28,582
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
AttacksOutput._load_dataset_clipping
def _load_dataset_clipping(self, dataset_dir, epsilon): """Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation. """ self.dataset_max_clip = {} self.dataset_min_clip = {} self._dataset_image_count = 0 for fname in os.listdir(dataset_dir): if not fname.endswith('.png'): continue image_id = fname[:-4] image = np.array( Image.open(os.path.join(dataset_dir, fname)).convert('RGB')) image = image.astype('int32') self._dataset_image_count += 1 self.dataset_max_clip[image_id] = np.clip(image + epsilon, 0, 255).astype('uint8') self.dataset_min_clip[image_id] = np.clip(image - epsilon, 0, 255).astype('uint8')
python
def _load_dataset_clipping(self, dataset_dir, epsilon): """Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation. """ self.dataset_max_clip = {} self.dataset_min_clip = {} self._dataset_image_count = 0 for fname in os.listdir(dataset_dir): if not fname.endswith('.png'): continue image_id = fname[:-4] image = np.array( Image.open(os.path.join(dataset_dir, fname)).convert('RGB')) image = image.astype('int32') self._dataset_image_count += 1 self.dataset_max_clip[image_id] = np.clip(image + epsilon, 0, 255).astype('uint8') self.dataset_min_clip[image_id] = np.clip(image - epsilon, 0, 255).astype('uint8')
[ "def", "_load_dataset_clipping", "(", "self", ",", "dataset_dir", ",", "epsilon", ")", ":", "self", ".", "dataset_max_clip", "=", "{", "}", "self", ".", "dataset_min_clip", "=", "{", "}", "self", ".", "_dataset_image_count", "=", "0", "for", "fname", "in", "os", ".", "listdir", "(", "dataset_dir", ")", ":", "if", "not", "fname", ".", "endswith", "(", "'.png'", ")", ":", "continue", "image_id", "=", "fname", "[", ":", "-", "4", "]", "image", "=", "np", ".", "array", "(", "Image", ".", "open", "(", "os", ".", "path", ".", "join", "(", "dataset_dir", ",", "fname", ")", ")", ".", "convert", "(", "'RGB'", ")", ")", "image", "=", "image", ".", "astype", "(", "'int32'", ")", "self", ".", "_dataset_image_count", "+=", "1", "self", ".", "dataset_max_clip", "[", "image_id", "]", "=", "np", ".", "clip", "(", "image", "+", "epsilon", ",", "0", ",", "255", ")", ".", "astype", "(", "'uint8'", ")", "self", ".", "dataset_min_clip", "[", "image_id", "]", "=", "np", ".", "clip", "(", "image", "-", "epsilon", ",", "0", ",", "255", ")", ".", "astype", "(", "'uint8'", ")" ]
Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation.
[ "Helper", "method", "which", "loads", "dataset", "and", "determines", "clipping", "range", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L191-L214
28,583
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
AttacksOutput.clip_and_copy_attack_outputs
def clip_and_copy_attack_outputs(self, attack_name, is_targeted): """Clips results of attack and copy it to directory with all images. Args: attack_name: name of the attack. is_targeted: if True then attack is targeted, otherwise non-targeted. """ if is_targeted: self._targeted_attack_names.add(attack_name) else: self._attack_names.add(attack_name) attack_dir = os.path.join(self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir, attack_name) for fname in os.listdir(attack_dir): if not (fname.endswith('.png') or fname.endswith('.jpg')): continue image_id = fname[:-4] if image_id not in self.dataset_max_clip: continue image_max_clip = self.dataset_max_clip[image_id] image_min_clip = self.dataset_min_clip[image_id] adversarial_image = np.array( Image.open(os.path.join(attack_dir, fname)).convert('RGB')) clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip) output_basename = '{0:08d}'.format(self._output_image_idx) self._output_image_idx += 1 self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id) if is_targeted: self._targeted_attack_image_count += 1 else: self._attack_image_count += 1 Image.fromarray(clipped_adv_image).save( os.path.join(self.all_adv_examples_dir, output_basename + '.png'))
python
def clip_and_copy_attack_outputs(self, attack_name, is_targeted): """Clips results of attack and copy it to directory with all images. Args: attack_name: name of the attack. is_targeted: if True then attack is targeted, otherwise non-targeted. """ if is_targeted: self._targeted_attack_names.add(attack_name) else: self._attack_names.add(attack_name) attack_dir = os.path.join(self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir, attack_name) for fname in os.listdir(attack_dir): if not (fname.endswith('.png') or fname.endswith('.jpg')): continue image_id = fname[:-4] if image_id not in self.dataset_max_clip: continue image_max_clip = self.dataset_max_clip[image_id] image_min_clip = self.dataset_min_clip[image_id] adversarial_image = np.array( Image.open(os.path.join(attack_dir, fname)).convert('RGB')) clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip) output_basename = '{0:08d}'.format(self._output_image_idx) self._output_image_idx += 1 self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id) if is_targeted: self._targeted_attack_image_count += 1 else: self._attack_image_count += 1 Image.fromarray(clipped_adv_image).save( os.path.join(self.all_adv_examples_dir, output_basename + '.png'))
[ "def", "clip_and_copy_attack_outputs", "(", "self", ",", "attack_name", ",", "is_targeted", ")", ":", "if", "is_targeted", ":", "self", ".", "_targeted_attack_names", ".", "add", "(", "attack_name", ")", "else", ":", "self", ".", "_attack_names", ".", "add", "(", "attack_name", ")", "attack_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "targeted_attacks_output_dir", "if", "is_targeted", "else", "self", ".", "attacks_output_dir", ",", "attack_name", ")", "for", "fname", "in", "os", ".", "listdir", "(", "attack_dir", ")", ":", "if", "not", "(", "fname", ".", "endswith", "(", "'.png'", ")", "or", "fname", ".", "endswith", "(", "'.jpg'", ")", ")", ":", "continue", "image_id", "=", "fname", "[", ":", "-", "4", "]", "if", "image_id", "not", "in", "self", ".", "dataset_max_clip", ":", "continue", "image_max_clip", "=", "self", ".", "dataset_max_clip", "[", "image_id", "]", "image_min_clip", "=", "self", ".", "dataset_min_clip", "[", "image_id", "]", "adversarial_image", "=", "np", ".", "array", "(", "Image", ".", "open", "(", "os", ".", "path", ".", "join", "(", "attack_dir", ",", "fname", ")", ")", ".", "convert", "(", "'RGB'", ")", ")", "clipped_adv_image", "=", "np", ".", "clip", "(", "adversarial_image", ",", "image_min_clip", ",", "image_max_clip", ")", "output_basename", "=", "'{0:08d}'", ".", "format", "(", "self", ".", "_output_image_idx", ")", "self", ".", "_output_image_idx", "+=", "1", "self", ".", "_output_to_attack_mapping", "[", "output_basename", "]", "=", "(", "attack_name", ",", "is_targeted", ",", "image_id", ")", "if", "is_targeted", ":", "self", ".", "_targeted_attack_image_count", "+=", "1", "else", ":", "self", ".", "_attack_image_count", "+=", "1", "Image", ".", "fromarray", "(", "clipped_adv_image", ")", ".", "save", "(", "os", ".", "path", ".", "join", "(", "self", ".", "all_adv_examples_dir", ",", "output_basename", "+", "'.png'", ")", ")" ]
Clips results of attack and copy it to directory with all images. Args: attack_name: name of the attack. is_targeted: if True then attack is targeted, otherwise non-targeted.
[ "Clips", "results", "of", "attack", "and", "copy", "it", "to", "directory", "with", "all", "images", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L216-L254
28,584
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
DatasetMetadata.save_target_classes
def save_target_classes(self, filename): """Saves target classed for all dataset images into given file.""" with open(filename, 'w') as f: for k, v in self._target_classes.items(): f.write('{0}.png,{1}\n'.format(k, v))
python
def save_target_classes(self, filename): """Saves target classed for all dataset images into given file.""" with open(filename, 'w') as f: for k, v in self._target_classes.items(): f.write('{0}.png,{1}\n'.format(k, v))
[ "def", "save_target_classes", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "for", "k", ",", "v", "in", "self", ".", "_target_classes", ".", "items", "(", ")", ":", "f", ".", "write", "(", "'{0}.png,{1}\\n'", ".", "format", "(", "k", ",", "v", ")", ")" ]
Saves target classed for all dataset images into given file.
[ "Saves", "target", "classed", "for", "all", "dataset", "images", "into", "given", "file", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L321-L325
28,585
tensorflow/cleverhans
cleverhans/attack_bundling.py
single_run_max_confidence_recipe
def single_run_max_confidence_recipe(sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None): """A reasonable attack bundling recipe for a max norm threat model and a defender that uses confidence thresholding. This recipe uses both uniform noise and randomly-initialized PGD targeted attacks. References: https://openreview.net/forum?id=H1g0piA9tQ This version runs each attack (noise, targeted PGD for each class with nb_iter iterations, target PGD for each class with 25X more iterations) just once and then stops. See `basic_max_confidence_recipe` for a version that runs indefinitely. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with eps_iter_small step size) :param nb_iter: int, number of iterations for the cheaper PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param batch_size: int, the total number of examples to run simultaneously :param eps_iter_small: optional, float. The second version of the PGD attack is run with 25 * nb_iter iterations and eps_iter_small step size. If eps_iter_small is not specified it is set to eps_iter / 25. """ noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params, "noise") attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25. for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25. expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls)) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=batch_size)
python
def single_run_max_confidence_recipe(sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None): """A reasonable attack bundling recipe for a max norm threat model and a defender that uses confidence thresholding. This recipe uses both uniform noise and randomly-initialized PGD targeted attacks. References: https://openreview.net/forum?id=H1g0piA9tQ This version runs each attack (noise, targeted PGD for each class with nb_iter iterations, target PGD for each class with 25X more iterations) just once and then stops. See `basic_max_confidence_recipe` for a version that runs indefinitely. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with eps_iter_small step size) :param nb_iter: int, number of iterations for the cheaper PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param batch_size: int, the total number of examples to run simultaneously :param eps_iter_small: optional, float. The second version of the PGD attack is run with 25 * nb_iter iterations and eps_iter_small step size. If eps_iter_small is not specified it is set to eps_iter / 25. """ noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params, "noise") attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25. for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25. expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls)) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=batch_size)
[ "def", "single_run_max_confidence_recipe", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "nb_classes", ",", "eps", ",", "clip_min", ",", "clip_max", ",", "eps_iter", ",", "nb_iter", ",", "report_path", ",", "batch_size", "=", "BATCH_SIZE", ",", "eps_iter_small", "=", "None", ")", ":", "noise_attack", "=", "Noise", "(", "model", ",", "sess", ")", "pgd_attack", "=", "ProjectedGradientDescent", "(", "model", ",", "sess", ")", "threat_params", "=", "{", "\"eps\"", ":", "eps", ",", "\"clip_min\"", ":", "clip_min", ",", "\"clip_max\"", ":", "clip_max", "}", "noise_attack_config", "=", "AttackConfig", "(", "noise_attack", ",", "threat_params", ",", "\"noise\"", ")", "attack_configs", "=", "[", "noise_attack_config", "]", "pgd_attack_configs", "=", "[", "]", "pgd_params", "=", "copy", ".", "copy", "(", "threat_params", ")", "pgd_params", "[", "\"eps_iter\"", "]", "=", "eps_iter", "pgd_params", "[", "\"nb_iter\"", "]", "=", "nb_iter", "assert", "batch_size", "%", "num_devices", "==", "0", "dev_batch_size", "=", "batch_size", "//", "num_devices", "ones", "=", "tf", ".", "ones", "(", "dev_batch_size", ",", "tf", ".", "int32", ")", "expensive_pgd", "=", "[", "]", "if", "eps_iter_small", "is", "None", ":", "eps_iter_small", "=", "eps_iter", "/", "25.", "for", "cls", "in", "range", "(", "nb_classes", ")", ":", "cls_params", "=", "copy", ".", "copy", "(", "pgd_params", ")", "cls_params", "[", "'y_target'", "]", "=", "tf", ".", "to_float", "(", "tf", ".", "one_hot", "(", "ones", "*", "cls", ",", "nb_classes", ")", ")", "cls_attack_config", "=", "AttackConfig", "(", "pgd_attack", ",", "cls_params", ",", "\"pgd_\"", "+", "str", "(", "cls", ")", ")", "pgd_attack_configs", ".", "append", "(", "cls_attack_config", ")", "expensive_params", "=", "copy", ".", "copy", "(", "cls_params", ")", "expensive_params", "[", "\"eps_iter\"", "]", "=", "eps_iter_small", "expensive_params", "[", "\"nb_iter\"", "]", "*=", "25.", "expensive_config", "=", "AttackConfig", "(", "pgd_attack", ",", "expensive_params", ",", "\"expensive_pgd_\"", "+", "str", "(", "cls", ")", ")", "expensive_pgd", ".", "append", "(", "expensive_config", ")", "attack_configs", "=", "[", "noise_attack_config", "]", "+", "pgd_attack_configs", "+", "expensive_pgd", "new_work_goal", "=", "{", "config", ":", "1", "for", "config", "in", "attack_configs", "}", "goals", "=", "[", "MaxConfidence", "(", "t", "=", "1.", ",", "new_work_goal", "=", "new_work_goal", ")", "]", "bundle_attacks", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "attack_configs", ",", "goals", ",", "report_path", ",", "attack_batch_size", "=", "batch_size", ",", "eval_batch_size", "=", "batch_size", ")" ]
A reasonable attack bundling recipe for a max norm threat model and a defender that uses confidence thresholding. This recipe uses both uniform noise and randomly-initialized PGD targeted attacks. References: https://openreview.net/forum?id=H1g0piA9tQ This version runs each attack (noise, targeted PGD for each class with nb_iter iterations, target PGD for each class with 25X more iterations) just once and then stops. See `basic_max_confidence_recipe` for a version that runs indefinitely. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with eps_iter_small step size) :param nb_iter: int, number of iterations for the cheaper PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param batch_size: int, the total number of examples to run simultaneously :param eps_iter_small: optional, float. The second version of the PGD attack is run with 25 * nb_iter iterations and eps_iter_small step size. If eps_iter_small is not specified it is set to eps_iter / 25.
[ "A", "reasonable", "attack", "bundling", "recipe", "for", "a", "max", "norm", "threat", "model", "and", "a", "defender", "that", "uses", "confidence", "thresholding", ".", "This", "recipe", "uses", "both", "uniform", "noise", "and", "randomly", "-", "initialized", "PGD", "targeted", "attacks", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L43-L107
28,586
tensorflow/cleverhans
cleverhans/attack_bundling.py
random_search_max_confidence_recipe
def random_search_max_confidence_recipe(sess, model, x, y, eps, clip_min, clip_max, report_path, batch_size=BATCH_SIZE, num_noise_points=10000): """Max confidence using random search. References: https://openreview.net/forum?id=H1g0piA9tQ Describes the max_confidence procedure used for the bundling in this recipe https://arxiv.org/abs/1802.00420 Describes using random search with 1e5 or more random points to avoid gradient masking. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with 25X smaller step size) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :batch_size: int, the total number of examples to run simultaneously """ noise_attack = Noise(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] assert batch_size % num_devices == 0 new_work_goal = {noise_attack_config: num_noise_points} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path)
python
def random_search_max_confidence_recipe(sess, model, x, y, eps, clip_min, clip_max, report_path, batch_size=BATCH_SIZE, num_noise_points=10000): """Max confidence using random search. References: https://openreview.net/forum?id=H1g0piA9tQ Describes the max_confidence procedure used for the bundling in this recipe https://arxiv.org/abs/1802.00420 Describes using random search with 1e5 or more random points to avoid gradient masking. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with 25X smaller step size) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :batch_size: int, the total number of examples to run simultaneously """ noise_attack = Noise(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] assert batch_size % num_devices == 0 new_work_goal = {noise_attack_config: num_noise_points} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path)
[ "def", "random_search_max_confidence_recipe", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "eps", ",", "clip_min", ",", "clip_max", ",", "report_path", ",", "batch_size", "=", "BATCH_SIZE", ",", "num_noise_points", "=", "10000", ")", ":", "noise_attack", "=", "Noise", "(", "model", ",", "sess", ")", "threat_params", "=", "{", "\"eps\"", ":", "eps", ",", "\"clip_min\"", ":", "clip_min", ",", "\"clip_max\"", ":", "clip_max", "}", "noise_attack_config", "=", "AttackConfig", "(", "noise_attack", ",", "threat_params", ")", "attack_configs", "=", "[", "noise_attack_config", "]", "assert", "batch_size", "%", "num_devices", "==", "0", "new_work_goal", "=", "{", "noise_attack_config", ":", "num_noise_points", "}", "goals", "=", "[", "MaxConfidence", "(", "t", "=", "1.", ",", "new_work_goal", "=", "new_work_goal", ")", "]", "bundle_attacks", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "attack_configs", ",", "goals", ",", "report_path", ")" ]
Max confidence using random search. References: https://openreview.net/forum?id=H1g0piA9tQ Describes the max_confidence procedure used for the bundling in this recipe https://arxiv.org/abs/1802.00420 Describes using random search with 1e5 or more random points to avoid gradient masking. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with 25X smaller step size) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :batch_size: int, the total number of examples to run simultaneously
[ "Max", "confidence", "using", "random", "search", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L256-L289
28,587
tensorflow/cleverhans
cleverhans/attack_bundling.py
bundle_attacks
def bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE): """ Runs attack bundling. Users of cleverhans may call this function but are more likely to call one of the recipes above. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param attack_configs: list of AttackConfigs to run :param goals: list of AttackGoals to run The bundler works through the goals in order, until each is satisfied. Some goals may never be satisfied, in which case the bundler will run forever, updating the report on disk as it goes. :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on clean / adversarial examples :returns: adv_x: The adversarial examples, in the same format as `x` run_counts: dict mapping each AttackConfig to a numpy array reporting how many times that AttackConfig was run on each example """ assert isinstance(sess, tf.Session) assert isinstance(model, Model) assert all(isinstance(attack_config, AttackConfig) for attack_config in attack_configs) assert all(isinstance(goal, AttackGoal) for goal in goals) assert isinstance(report_path, six.string_types) if x.shape[0] != y.shape[0]: raise ValueError("Number of input examples does not match number of labels") # Note: no need to precompile attacks, correctness_and_confidence # caches them run_counts = {} for attack_config in attack_configs: run_counts[attack_config] = np.zeros(x.shape[0], dtype=np.int64) # TODO: make an interface to pass this in if it has already been computed # elsewhere _logger.info("Running on clean data to initialize the report...") packed = correctness_and_confidence(sess, model, x, y, batch_size=eval_batch_size, devices=devices) _logger.info("...done") correctness, confidence = packed _logger.info("Accuracy: " + str(correctness.mean())) report = ConfidenceReport() report['clean'] = ConfidenceReportEntry(correctness, confidence) adv_x = x.copy() for goal in goals: bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size, eval_batch_size=eval_batch_size) # Many users will set `goals` to make this run forever, so the return # statement is not the primary way to get information out. return adv_x, run_counts
python
def bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE): """ Runs attack bundling. Users of cleverhans may call this function but are more likely to call one of the recipes above. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param attack_configs: list of AttackConfigs to run :param goals: list of AttackGoals to run The bundler works through the goals in order, until each is satisfied. Some goals may never be satisfied, in which case the bundler will run forever, updating the report on disk as it goes. :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on clean / adversarial examples :returns: adv_x: The adversarial examples, in the same format as `x` run_counts: dict mapping each AttackConfig to a numpy array reporting how many times that AttackConfig was run on each example """ assert isinstance(sess, tf.Session) assert isinstance(model, Model) assert all(isinstance(attack_config, AttackConfig) for attack_config in attack_configs) assert all(isinstance(goal, AttackGoal) for goal in goals) assert isinstance(report_path, six.string_types) if x.shape[0] != y.shape[0]: raise ValueError("Number of input examples does not match number of labels") # Note: no need to precompile attacks, correctness_and_confidence # caches them run_counts = {} for attack_config in attack_configs: run_counts[attack_config] = np.zeros(x.shape[0], dtype=np.int64) # TODO: make an interface to pass this in if it has already been computed # elsewhere _logger.info("Running on clean data to initialize the report...") packed = correctness_and_confidence(sess, model, x, y, batch_size=eval_batch_size, devices=devices) _logger.info("...done") correctness, confidence = packed _logger.info("Accuracy: " + str(correctness.mean())) report = ConfidenceReport() report['clean'] = ConfidenceReportEntry(correctness, confidence) adv_x = x.copy() for goal in goals: bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size, eval_batch_size=eval_batch_size) # Many users will set `goals` to make this run forever, so the return # statement is not the primary way to get information out. return adv_x, run_counts
[ "def", "bundle_attacks", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "attack_configs", ",", "goals", ",", "report_path", ",", "attack_batch_size", "=", "BATCH_SIZE", ",", "eval_batch_size", "=", "BATCH_SIZE", ")", ":", "assert", "isinstance", "(", "sess", ",", "tf", ".", "Session", ")", "assert", "isinstance", "(", "model", ",", "Model", ")", "assert", "all", "(", "isinstance", "(", "attack_config", ",", "AttackConfig", ")", "for", "attack_config", "in", "attack_configs", ")", "assert", "all", "(", "isinstance", "(", "goal", ",", "AttackGoal", ")", "for", "goal", "in", "goals", ")", "assert", "isinstance", "(", "report_path", ",", "six", ".", "string_types", ")", "if", "x", ".", "shape", "[", "0", "]", "!=", "y", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Number of input examples does not match number of labels\"", ")", "# Note: no need to precompile attacks, correctness_and_confidence", "# caches them", "run_counts", "=", "{", "}", "for", "attack_config", "in", "attack_configs", ":", "run_counts", "[", "attack_config", "]", "=", "np", ".", "zeros", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "int64", ")", "# TODO: make an interface to pass this in if it has already been computed", "# elsewhere", "_logger", ".", "info", "(", "\"Running on clean data to initialize the report...\"", ")", "packed", "=", "correctness_and_confidence", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "batch_size", "=", "eval_batch_size", ",", "devices", "=", "devices", ")", "_logger", ".", "info", "(", "\"...done\"", ")", "correctness", ",", "confidence", "=", "packed", "_logger", ".", "info", "(", "\"Accuracy: \"", "+", "str", "(", "correctness", ".", "mean", "(", ")", ")", ")", "report", "=", "ConfidenceReport", "(", ")", "report", "[", "'clean'", "]", "=", "ConfidenceReportEntry", "(", "correctness", ",", "confidence", ")", "adv_x", "=", "x", ".", "copy", "(", ")", "for", "goal", "in", "goals", ":", "bundle_attacks_with_goal", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "adv_x", ",", "attack_configs", ",", "run_counts", ",", "goal", ",", "report", ",", "report_path", ",", "attack_batch_size", "=", "attack_batch_size", ",", "eval_batch_size", "=", "eval_batch_size", ")", "# Many users will set `goals` to make this run forever, so the return", "# statement is not the primary way to get information out.", "return", "adv_x", ",", "run_counts" ]
Runs attack bundling. Users of cleverhans may call this function but are more likely to call one of the recipes above. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param attack_configs: list of AttackConfigs to run :param goals: list of AttackGoals to run The bundler works through the goals in order, until each is satisfied. Some goals may never be satisfied, in which case the bundler will run forever, updating the report on disk as it goes. :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on clean / adversarial examples :returns: adv_x: The adversarial examples, in the same format as `x` run_counts: dict mapping each AttackConfig to a numpy array reporting how many times that AttackConfig was run on each example
[ "Runs", "attack", "bundling", ".", "Users", "of", "cleverhans", "may", "call", "this", "function", "but", "are", "more", "likely", "to", "call", "one", "of", "the", "recipes", "above", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L320-L383
28,588
tensorflow/cleverhans
cleverhans/attack_bundling.py
bundle_attacks_with_goal
def bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE): """ Runs attack bundling, working on one specific AttackGoal. This function is mostly intended to be called by `bundle_attacks`. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param attack_configs: list of AttackConfigs to run :param run_counts: dict mapping AttackConfigs to numpy arrays specifying how many times they have been run on each example :param goal: AttackGoal to run :param report: ConfidenceReport :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on adversarial examples """ goal.start(run_counts) _logger.info("Running criteria for new goal...") criteria = goal.get_criteria(sess, model, adv_x, y, batch_size=eval_batch_size) assert 'correctness' in criteria _logger.info("Accuracy: " + str(criteria['correctness'].mean())) assert 'confidence' in criteria while not goal.is_satisfied(criteria, run_counts): run_batch_with_goal(sess, model, x, y, adv_x, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size) # Save after finishing all goals. # The incremental saves run on a timer. This save is needed so that the last # few attacks after the timer don't get discarded report.completed = True save(criteria, report, report_path, adv_x)
python
def bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE): """ Runs attack bundling, working on one specific AttackGoal. This function is mostly intended to be called by `bundle_attacks`. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param attack_configs: list of AttackConfigs to run :param run_counts: dict mapping AttackConfigs to numpy arrays specifying how many times they have been run on each example :param goal: AttackGoal to run :param report: ConfidenceReport :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on adversarial examples """ goal.start(run_counts) _logger.info("Running criteria for new goal...") criteria = goal.get_criteria(sess, model, adv_x, y, batch_size=eval_batch_size) assert 'correctness' in criteria _logger.info("Accuracy: " + str(criteria['correctness'].mean())) assert 'confidence' in criteria while not goal.is_satisfied(criteria, run_counts): run_batch_with_goal(sess, model, x, y, adv_x, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size) # Save after finishing all goals. # The incremental saves run on a timer. This save is needed so that the last # few attacks after the timer don't get discarded report.completed = True save(criteria, report, report_path, adv_x)
[ "def", "bundle_attacks_with_goal", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "adv_x", ",", "attack_configs", ",", "run_counts", ",", "goal", ",", "report", ",", "report_path", ",", "attack_batch_size", "=", "BATCH_SIZE", ",", "eval_batch_size", "=", "BATCH_SIZE", ")", ":", "goal", ".", "start", "(", "run_counts", ")", "_logger", ".", "info", "(", "\"Running criteria for new goal...\"", ")", "criteria", "=", "goal", ".", "get_criteria", "(", "sess", ",", "model", ",", "adv_x", ",", "y", ",", "batch_size", "=", "eval_batch_size", ")", "assert", "'correctness'", "in", "criteria", "_logger", ".", "info", "(", "\"Accuracy: \"", "+", "str", "(", "criteria", "[", "'correctness'", "]", ".", "mean", "(", ")", ")", ")", "assert", "'confidence'", "in", "criteria", "while", "not", "goal", ".", "is_satisfied", "(", "criteria", ",", "run_counts", ")", ":", "run_batch_with_goal", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "adv_x", ",", "criteria", ",", "attack_configs", ",", "run_counts", ",", "goal", ",", "report", ",", "report_path", ",", "attack_batch_size", "=", "attack_batch_size", ")", "# Save after finishing all goals.", "# The incremental saves run on a timer. This save is needed so that the last", "# few attacks after the timer don't get discarded", "report", ".", "completed", "=", "True", "save", "(", "criteria", ",", "report", ",", "report_path", ",", "adv_x", ")" ]
Runs attack bundling, working on one specific AttackGoal. This function is mostly intended to be called by `bundle_attacks`. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param attack_configs: list of AttackConfigs to run :param run_counts: dict mapping AttackConfigs to numpy arrays specifying how many times they have been run on each example :param goal: AttackGoal to run :param report: ConfidenceReport :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on adversarial examples
[ "Runs", "attack", "bundling", "working", "on", "one", "specific", "AttackGoal", ".", "This", "function", "is", "mostly", "intended", "to", "be", "called", "by", "bundle_attacks", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L385-L425
28,589
tensorflow/cleverhans
cleverhans/attack_bundling.py
run_batch_with_goal
def run_batch_with_goal(sess, model, x, y, adv_x_val, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE): """ Runs attack bundling on one batch of data. This function is mostly intended to be called by `bundle_attacks_with_goal`. :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x_val: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param criteria: dict mapping string names of criteria to numpy arrays with their values for each example (Different AttackGoals track different criteria) :param run_counts: dict mapping AttackConfigs to numpy arrays reporting how many times they have been run on each example :param goal: the AttackGoal to work on :param report: dict, see `bundle_attacks_with_goal` :param report_path: str, path to save the report to """ attack_config = goal.get_attack_config(attack_configs, run_counts, criteria) idxs = goal.request_examples(attack_config, criteria, run_counts, attack_batch_size) x_batch = x[idxs] assert x_batch.shape[0] == attack_batch_size y_batch = y[idxs] assert y_batch.shape[0] == attack_batch_size adv_x_batch = run_attack(sess, model, x_batch, y_batch, attack_config.attack, attack_config.params, attack_batch_size, devices, pass_y=attack_config.pass_y) criteria_batch = goal.get_criteria(sess, model, adv_x_batch, y_batch, batch_size=min(attack_batch_size, BATCH_SIZE)) # This can't be parallelized because some orig examples are copied more # than once into the batch cur_run_counts = run_counts[attack_config] for batch_idx, orig_idx in enumerate(idxs): cur_run_counts[orig_idx] += 1 should_copy = goal.new_wins(criteria, orig_idx, criteria_batch, batch_idx) if should_copy: adv_x_val[orig_idx] = adv_x_batch[batch_idx] for key in criteria: criteria[key][orig_idx] = criteria_batch[key][batch_idx] assert np.allclose(y[orig_idx], y_batch[batch_idx]) report['bundled'] = ConfidenceReportEntry(criteria['correctness'], criteria['confidence']) should_save = False new_time = time.time() if hasattr(report, 'time'): if new_time - report.time > REPORT_TIME_INTERVAL: should_save = True else: should_save = True if should_save: report.time = new_time goal.print_progress(criteria, run_counts) save(criteria, report, report_path, adv_x_val)
python
def run_batch_with_goal(sess, model, x, y, adv_x_val, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE): """ Runs attack bundling on one batch of data. This function is mostly intended to be called by `bundle_attacks_with_goal`. :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x_val: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param criteria: dict mapping string names of criteria to numpy arrays with their values for each example (Different AttackGoals track different criteria) :param run_counts: dict mapping AttackConfigs to numpy arrays reporting how many times they have been run on each example :param goal: the AttackGoal to work on :param report: dict, see `bundle_attacks_with_goal` :param report_path: str, path to save the report to """ attack_config = goal.get_attack_config(attack_configs, run_counts, criteria) idxs = goal.request_examples(attack_config, criteria, run_counts, attack_batch_size) x_batch = x[idxs] assert x_batch.shape[0] == attack_batch_size y_batch = y[idxs] assert y_batch.shape[0] == attack_batch_size adv_x_batch = run_attack(sess, model, x_batch, y_batch, attack_config.attack, attack_config.params, attack_batch_size, devices, pass_y=attack_config.pass_y) criteria_batch = goal.get_criteria(sess, model, adv_x_batch, y_batch, batch_size=min(attack_batch_size, BATCH_SIZE)) # This can't be parallelized because some orig examples are copied more # than once into the batch cur_run_counts = run_counts[attack_config] for batch_idx, orig_idx in enumerate(idxs): cur_run_counts[orig_idx] += 1 should_copy = goal.new_wins(criteria, orig_idx, criteria_batch, batch_idx) if should_copy: adv_x_val[orig_idx] = adv_x_batch[batch_idx] for key in criteria: criteria[key][orig_idx] = criteria_batch[key][batch_idx] assert np.allclose(y[orig_idx], y_batch[batch_idx]) report['bundled'] = ConfidenceReportEntry(criteria['correctness'], criteria['confidence']) should_save = False new_time = time.time() if hasattr(report, 'time'): if new_time - report.time > REPORT_TIME_INTERVAL: should_save = True else: should_save = True if should_save: report.time = new_time goal.print_progress(criteria, run_counts) save(criteria, report, report_path, adv_x_val)
[ "def", "run_batch_with_goal", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "adv_x_val", ",", "criteria", ",", "attack_configs", ",", "run_counts", ",", "goal", ",", "report", ",", "report_path", ",", "attack_batch_size", "=", "BATCH_SIZE", ")", ":", "attack_config", "=", "goal", ".", "get_attack_config", "(", "attack_configs", ",", "run_counts", ",", "criteria", ")", "idxs", "=", "goal", ".", "request_examples", "(", "attack_config", ",", "criteria", ",", "run_counts", ",", "attack_batch_size", ")", "x_batch", "=", "x", "[", "idxs", "]", "assert", "x_batch", ".", "shape", "[", "0", "]", "==", "attack_batch_size", "y_batch", "=", "y", "[", "idxs", "]", "assert", "y_batch", ".", "shape", "[", "0", "]", "==", "attack_batch_size", "adv_x_batch", "=", "run_attack", "(", "sess", ",", "model", ",", "x_batch", ",", "y_batch", ",", "attack_config", ".", "attack", ",", "attack_config", ".", "params", ",", "attack_batch_size", ",", "devices", ",", "pass_y", "=", "attack_config", ".", "pass_y", ")", "criteria_batch", "=", "goal", ".", "get_criteria", "(", "sess", ",", "model", ",", "adv_x_batch", ",", "y_batch", ",", "batch_size", "=", "min", "(", "attack_batch_size", ",", "BATCH_SIZE", ")", ")", "# This can't be parallelized because some orig examples are copied more", "# than once into the batch", "cur_run_counts", "=", "run_counts", "[", "attack_config", "]", "for", "batch_idx", ",", "orig_idx", "in", "enumerate", "(", "idxs", ")", ":", "cur_run_counts", "[", "orig_idx", "]", "+=", "1", "should_copy", "=", "goal", ".", "new_wins", "(", "criteria", ",", "orig_idx", ",", "criteria_batch", ",", "batch_idx", ")", "if", "should_copy", ":", "adv_x_val", "[", "orig_idx", "]", "=", "adv_x_batch", "[", "batch_idx", "]", "for", "key", "in", "criteria", ":", "criteria", "[", "key", "]", "[", "orig_idx", "]", "=", "criteria_batch", "[", "key", "]", "[", "batch_idx", "]", "assert", "np", ".", "allclose", "(", "y", "[", "orig_idx", "]", ",", "y_batch", "[", "batch_idx", "]", ")", "report", "[", "'bundled'", "]", "=", "ConfidenceReportEntry", "(", "criteria", "[", "'correctness'", "]", ",", "criteria", "[", "'confidence'", "]", ")", "should_save", "=", "False", "new_time", "=", "time", ".", "time", "(", ")", "if", "hasattr", "(", "report", ",", "'time'", ")", ":", "if", "new_time", "-", "report", ".", "time", ">", "REPORT_TIME_INTERVAL", ":", "should_save", "=", "True", "else", ":", "should_save", "=", "True", "if", "should_save", ":", "report", ".", "time", "=", "new_time", "goal", ".", "print_progress", "(", "criteria", ",", "run_counts", ")", "save", "(", "criteria", ",", "report", ",", "report_path", ",", "adv_x_val", ")" ]
Runs attack bundling on one batch of data. This function is mostly intended to be called by `bundle_attacks_with_goal`. :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x_val: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param criteria: dict mapping string names of criteria to numpy arrays with their values for each example (Different AttackGoals track different criteria) :param run_counts: dict mapping AttackConfigs to numpy arrays reporting how many times they have been run on each example :param goal: the AttackGoal to work on :param report: dict, see `bundle_attacks_with_goal` :param report_path: str, path to save the report to
[ "Runs", "attack", "bundling", "on", "one", "batch", "of", "data", ".", "This", "function", "is", "mostly", "intended", "to", "be", "called", "by", "bundle_attacks_with_goal", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L428-L487
28,590
tensorflow/cleverhans
cleverhans/attack_bundling.py
bundle_examples_with_goal
def bundle_examples_with_goal(sess, model, adv_x_list, y, goal, report_path, batch_size=BATCH_SIZE): """ A post-processor version of attack bundling, that chooses the strongest example from the output of multiple earlier bundling strategies. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x_list: list of numpy arrays Each entry in the list is the output of a previous bundler; it is an adversarial version of the whole dataset. :param y: numpy array containing true labels :param goal: AttackGoal to use to choose the best version of each adversarial example :param report_path: str, the path the report will be saved to :param batch_size: int, batch size """ # Check the input num_attacks = len(adv_x_list) assert num_attacks > 0 adv_x_0 = adv_x_list[0] assert isinstance(adv_x_0, np.ndarray) assert all(adv_x.shape == adv_x_0.shape for adv_x in adv_x_list) # Allocate the output out = np.zeros_like(adv_x_0) m = adv_x_0.shape[0] # Initialize with negative sentinel values to make sure everything is # written to correctness = -np.ones(m, dtype='int32') confidence = -np.ones(m, dtype='float32') # Gather criteria criteria = [goal.get_criteria(sess, model, adv_x, y, batch_size=batch_size) for adv_x in adv_x_list] assert all('correctness' in c for c in criteria) assert all('confidence' in c for c in criteria) _logger.info("Accuracy on each advx dataset: ") for c in criteria: _logger.info("\t" + str(c['correctness'].mean())) for example_idx in range(m): # Index of the best attack for this example attack_idx = 0 # Find the winner for candidate_idx in range(1, num_attacks): if goal.new_wins(criteria[attack_idx], example_idx, criteria[candidate_idx], example_idx): attack_idx = candidate_idx # Copy the winner into the output out[example_idx] = adv_x_list[attack_idx][example_idx] correctness[example_idx] = criteria[attack_idx]['correctness'][example_idx] confidence[example_idx] = criteria[attack_idx]['confidence'][example_idx] assert correctness.min() >= 0 assert correctness.max() <= 1 assert confidence.min() >= 0. assert confidence.max() <= 1. correctness = correctness.astype('bool') _logger.info("Accuracy on bundled examples: " + str(correctness.mean())) report = ConfidenceReport() report['bundled'] = ConfidenceReportEntry(correctness, confidence) serial.save(report_path, report) assert report_path.endswith('.joblib') adv_x_path = report_path[:-len('.joblib')] + "_adv_x.npy" np.save(adv_x_path, out)
python
def bundle_examples_with_goal(sess, model, adv_x_list, y, goal, report_path, batch_size=BATCH_SIZE): """ A post-processor version of attack bundling, that chooses the strongest example from the output of multiple earlier bundling strategies. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x_list: list of numpy arrays Each entry in the list is the output of a previous bundler; it is an adversarial version of the whole dataset. :param y: numpy array containing true labels :param goal: AttackGoal to use to choose the best version of each adversarial example :param report_path: str, the path the report will be saved to :param batch_size: int, batch size """ # Check the input num_attacks = len(adv_x_list) assert num_attacks > 0 adv_x_0 = adv_x_list[0] assert isinstance(adv_x_0, np.ndarray) assert all(adv_x.shape == adv_x_0.shape for adv_x in adv_x_list) # Allocate the output out = np.zeros_like(adv_x_0) m = adv_x_0.shape[0] # Initialize with negative sentinel values to make sure everything is # written to correctness = -np.ones(m, dtype='int32') confidence = -np.ones(m, dtype='float32') # Gather criteria criteria = [goal.get_criteria(sess, model, adv_x, y, batch_size=batch_size) for adv_x in adv_x_list] assert all('correctness' in c for c in criteria) assert all('confidence' in c for c in criteria) _logger.info("Accuracy on each advx dataset: ") for c in criteria: _logger.info("\t" + str(c['correctness'].mean())) for example_idx in range(m): # Index of the best attack for this example attack_idx = 0 # Find the winner for candidate_idx in range(1, num_attacks): if goal.new_wins(criteria[attack_idx], example_idx, criteria[candidate_idx], example_idx): attack_idx = candidate_idx # Copy the winner into the output out[example_idx] = adv_x_list[attack_idx][example_idx] correctness[example_idx] = criteria[attack_idx]['correctness'][example_idx] confidence[example_idx] = criteria[attack_idx]['confidence'][example_idx] assert correctness.min() >= 0 assert correctness.max() <= 1 assert confidence.min() >= 0. assert confidence.max() <= 1. correctness = correctness.astype('bool') _logger.info("Accuracy on bundled examples: " + str(correctness.mean())) report = ConfidenceReport() report['bundled'] = ConfidenceReportEntry(correctness, confidence) serial.save(report_path, report) assert report_path.endswith('.joblib') adv_x_path = report_path[:-len('.joblib')] + "_adv_x.npy" np.save(adv_x_path, out)
[ "def", "bundle_examples_with_goal", "(", "sess", ",", "model", ",", "adv_x_list", ",", "y", ",", "goal", ",", "report_path", ",", "batch_size", "=", "BATCH_SIZE", ")", ":", "# Check the input", "num_attacks", "=", "len", "(", "adv_x_list", ")", "assert", "num_attacks", ">", "0", "adv_x_0", "=", "adv_x_list", "[", "0", "]", "assert", "isinstance", "(", "adv_x_0", ",", "np", ".", "ndarray", ")", "assert", "all", "(", "adv_x", ".", "shape", "==", "adv_x_0", ".", "shape", "for", "adv_x", "in", "adv_x_list", ")", "# Allocate the output", "out", "=", "np", ".", "zeros_like", "(", "adv_x_0", ")", "m", "=", "adv_x_0", ".", "shape", "[", "0", "]", "# Initialize with negative sentinel values to make sure everything is", "# written to", "correctness", "=", "-", "np", ".", "ones", "(", "m", ",", "dtype", "=", "'int32'", ")", "confidence", "=", "-", "np", ".", "ones", "(", "m", ",", "dtype", "=", "'float32'", ")", "# Gather criteria", "criteria", "=", "[", "goal", ".", "get_criteria", "(", "sess", ",", "model", ",", "adv_x", ",", "y", ",", "batch_size", "=", "batch_size", ")", "for", "adv_x", "in", "adv_x_list", "]", "assert", "all", "(", "'correctness'", "in", "c", "for", "c", "in", "criteria", ")", "assert", "all", "(", "'confidence'", "in", "c", "for", "c", "in", "criteria", ")", "_logger", ".", "info", "(", "\"Accuracy on each advx dataset: \"", ")", "for", "c", "in", "criteria", ":", "_logger", ".", "info", "(", "\"\\t\"", "+", "str", "(", "c", "[", "'correctness'", "]", ".", "mean", "(", ")", ")", ")", "for", "example_idx", "in", "range", "(", "m", ")", ":", "# Index of the best attack for this example", "attack_idx", "=", "0", "# Find the winner", "for", "candidate_idx", "in", "range", "(", "1", ",", "num_attacks", ")", ":", "if", "goal", ".", "new_wins", "(", "criteria", "[", "attack_idx", "]", ",", "example_idx", ",", "criteria", "[", "candidate_idx", "]", ",", "example_idx", ")", ":", "attack_idx", "=", "candidate_idx", "# Copy the winner into the output", "out", "[", "example_idx", "]", "=", "adv_x_list", "[", "attack_idx", "]", "[", "example_idx", "]", "correctness", "[", "example_idx", "]", "=", "criteria", "[", "attack_idx", "]", "[", "'correctness'", "]", "[", "example_idx", "]", "confidence", "[", "example_idx", "]", "=", "criteria", "[", "attack_idx", "]", "[", "'confidence'", "]", "[", "example_idx", "]", "assert", "correctness", ".", "min", "(", ")", ">=", "0", "assert", "correctness", ".", "max", "(", ")", "<=", "1", "assert", "confidence", ".", "min", "(", ")", ">=", "0.", "assert", "confidence", ".", "max", "(", ")", "<=", "1.", "correctness", "=", "correctness", ".", "astype", "(", "'bool'", ")", "_logger", ".", "info", "(", "\"Accuracy on bundled examples: \"", "+", "str", "(", "correctness", ".", "mean", "(", ")", ")", ")", "report", "=", "ConfidenceReport", "(", ")", "report", "[", "'bundled'", "]", "=", "ConfidenceReportEntry", "(", "correctness", ",", "confidence", ")", "serial", ".", "save", "(", "report_path", ",", "report", ")", "assert", "report_path", ".", "endswith", "(", "'.joblib'", ")", "adv_x_path", "=", "report_path", "[", ":", "-", "len", "(", "'.joblib'", ")", "]", "+", "\"_adv_x.npy\"", "np", ".", "save", "(", "adv_x_path", ",", "out", ")" ]
A post-processor version of attack bundling, that chooses the strongest example from the output of multiple earlier bundling strategies. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x_list: list of numpy arrays Each entry in the list is the output of a previous bundler; it is an adversarial version of the whole dataset. :param y: numpy array containing true labels :param goal: AttackGoal to use to choose the best version of each adversarial example :param report_path: str, the path the report will be saved to :param batch_size: int, batch size
[ "A", "post", "-", "processor", "version", "of", "attack", "bundling", "that", "chooses", "the", "strongest", "example", "from", "the", "output", "of", "multiple", "earlier", "bundling", "strategies", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L1044-L1110
28,591
tensorflow/cleverhans
cleverhans/attack_bundling.py
spsa_max_confidence_recipe
def spsa_max_confidence_recipe(sess, model, x, y, nb_classes, eps, clip_min, clip_max, nb_iter, report_path, spsa_samples=SPSA.DEFAULT_SPSA_SAMPLES, spsa_iters=SPSA.DEFAULT_SPSA_ITERS, eval_batch_size=BATCH_SIZE): """Runs the MaxConfidence attack using SPSA as the underlying optimizer. Even though this runs only one attack, it must be implemented as a bundler because SPSA supports only batch_size=1. The cleverhans.attacks.MaxConfidence attack internally multiplies the batch size by nb_classes, so it can't take SPSA as a base attacker. Insteader, we must bundle batch_size=1 calls using cleverhans.attack_bundling.MaxConfidence. References: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param eval_batch_size: int, batch size for evaluation (as opposed to making attacks) """ spsa = SPSA(model, sess) spsa_params = {"eps": eps, "clip_min" : clip_min, "clip_max" : clip_max, "nb_iter": nb_iter, "spsa_samples": spsa_samples, "spsa_iters": spsa_iters} attack_configs = [] dev_batch_size = 1 # The only batch size supported by SPSA batch_size = num_devices ones = tf.ones(dev_batch_size, tf.int32) for cls in range(nb_classes): cls_params = copy.copy(spsa_params) cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(spsa, cls_params, "spsa_" + str(cls)) attack_configs.append(cls_attack_config) new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=eval_batch_size)
python
def spsa_max_confidence_recipe(sess, model, x, y, nb_classes, eps, clip_min, clip_max, nb_iter, report_path, spsa_samples=SPSA.DEFAULT_SPSA_SAMPLES, spsa_iters=SPSA.DEFAULT_SPSA_ITERS, eval_batch_size=BATCH_SIZE): """Runs the MaxConfidence attack using SPSA as the underlying optimizer. Even though this runs only one attack, it must be implemented as a bundler because SPSA supports only batch_size=1. The cleverhans.attacks.MaxConfidence attack internally multiplies the batch size by nb_classes, so it can't take SPSA as a base attacker. Insteader, we must bundle batch_size=1 calls using cleverhans.attack_bundling.MaxConfidence. References: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param eval_batch_size: int, batch size for evaluation (as opposed to making attacks) """ spsa = SPSA(model, sess) spsa_params = {"eps": eps, "clip_min" : clip_min, "clip_max" : clip_max, "nb_iter": nb_iter, "spsa_samples": spsa_samples, "spsa_iters": spsa_iters} attack_configs = [] dev_batch_size = 1 # The only batch size supported by SPSA batch_size = num_devices ones = tf.ones(dev_batch_size, tf.int32) for cls in range(nb_classes): cls_params = copy.copy(spsa_params) cls_params['y_target'] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(spsa, cls_params, "spsa_" + str(cls)) attack_configs.append(cls_attack_config) new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1., new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=eval_batch_size)
[ "def", "spsa_max_confidence_recipe", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "nb_classes", ",", "eps", ",", "clip_min", ",", "clip_max", ",", "nb_iter", ",", "report_path", ",", "spsa_samples", "=", "SPSA", ".", "DEFAULT_SPSA_SAMPLES", ",", "spsa_iters", "=", "SPSA", ".", "DEFAULT_SPSA_ITERS", ",", "eval_batch_size", "=", "BATCH_SIZE", ")", ":", "spsa", "=", "SPSA", "(", "model", ",", "sess", ")", "spsa_params", "=", "{", "\"eps\"", ":", "eps", ",", "\"clip_min\"", ":", "clip_min", ",", "\"clip_max\"", ":", "clip_max", ",", "\"nb_iter\"", ":", "nb_iter", ",", "\"spsa_samples\"", ":", "spsa_samples", ",", "\"spsa_iters\"", ":", "spsa_iters", "}", "attack_configs", "=", "[", "]", "dev_batch_size", "=", "1", "# The only batch size supported by SPSA", "batch_size", "=", "num_devices", "ones", "=", "tf", ".", "ones", "(", "dev_batch_size", ",", "tf", ".", "int32", ")", "for", "cls", "in", "range", "(", "nb_classes", ")", ":", "cls_params", "=", "copy", ".", "copy", "(", "spsa_params", ")", "cls_params", "[", "'y_target'", "]", "=", "tf", ".", "to_float", "(", "tf", ".", "one_hot", "(", "ones", "*", "cls", ",", "nb_classes", ")", ")", "cls_attack_config", "=", "AttackConfig", "(", "spsa", ",", "cls_params", ",", "\"spsa_\"", "+", "str", "(", "cls", ")", ")", "attack_configs", ".", "append", "(", "cls_attack_config", ")", "new_work_goal", "=", "{", "config", ":", "1", "for", "config", "in", "attack_configs", "}", "goals", "=", "[", "MaxConfidence", "(", "t", "=", "1.", ",", "new_work_goal", "=", "new_work_goal", ")", "]", "bundle_attacks", "(", "sess", ",", "model", ",", "x", ",", "y", ",", "attack_configs", ",", "goals", ",", "report_path", ",", "attack_batch_size", "=", "batch_size", ",", "eval_batch_size", "=", "eval_batch_size", ")" ]
Runs the MaxConfidence attack using SPSA as the underlying optimizer. Even though this runs only one attack, it must be implemented as a bundler because SPSA supports only batch_size=1. The cleverhans.attacks.MaxConfidence attack internally multiplies the batch size by nb_classes, so it can't take SPSA as a base attacker. Insteader, we must bundle batch_size=1 calls using cleverhans.attack_bundling.MaxConfidence. References: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :param eval_batch_size: int, batch size for evaluation (as opposed to making attacks)
[ "Runs", "the", "MaxConfidence", "attack", "using", "SPSA", "as", "the", "underlying", "optimizer", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L1112-L1156
28,592
tensorflow/cleverhans
cleverhans/attack_bundling.py
AttackGoal.get_criteria
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE): """ Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size """ names, factory = self.extra_criteria() factory = _CriteriaFactory(model, factory) results = batch_eval_multi_worker(sess, factory, [advx, y], batch_size=batch_size, devices=devices) names = ['correctness', 'confidence'] + names out = dict(safe_zip(names, results)) return out
python
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE): """ Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size """ names, factory = self.extra_criteria() factory = _CriteriaFactory(model, factory) results = batch_eval_multi_worker(sess, factory, [advx, y], batch_size=batch_size, devices=devices) names = ['correctness', 'confidence'] + names out = dict(safe_zip(names, results)) return out
[ "def", "get_criteria", "(", "self", ",", "sess", ",", "model", ",", "advx", ",", "y", ",", "batch_size", "=", "BATCH_SIZE", ")", ":", "names", ",", "factory", "=", "self", ".", "extra_criteria", "(", ")", "factory", "=", "_CriteriaFactory", "(", "model", ",", "factory", ")", "results", "=", "batch_eval_multi_worker", "(", "sess", ",", "factory", ",", "[", "advx", ",", "y", "]", ",", "batch_size", "=", "batch_size", ",", "devices", "=", "devices", ")", "names", "=", "[", "'correctness'", ",", "'confidence'", "]", "+", "names", "out", "=", "dict", "(", "safe_zip", "(", "names", ",", "results", ")", ")", "return", "out" ]
Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size
[ "Returns", "a", "dictionary", "mapping", "the", "name", "of", "each", "criterion", "to", "a", "NumPy", "array", "containing", "the", "value", "of", "that", "criterion", "for", "each", "adversarial", "example", ".", "Subclasses", "can", "add", "extra", "criteria", "by", "implementing", "the", "extra_criteria", "method", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L532-L554
28,593
tensorflow/cleverhans
cleverhans/attack_bundling.py
AttackGoal.request_examples
def request_examples(self, attack_config, criteria, run_counts, batch_size): """ Returns a numpy array of integer example indices to run in the next batch. """ raise NotImplementedError(str(type(self)) + "needs to implement request_examples")
python
def request_examples(self, attack_config, criteria, run_counts, batch_size): """ Returns a numpy array of integer example indices to run in the next batch. """ raise NotImplementedError(str(type(self)) + "needs to implement request_examples")
[ "def", "request_examples", "(", "self", ",", "attack_config", ",", "criteria", ",", "run_counts", ",", "batch_size", ")", ":", "raise", "NotImplementedError", "(", "str", "(", "type", "(", "self", ")", ")", "+", "\"needs to implement request_examples\"", ")" ]
Returns a numpy array of integer example indices to run in the next batch.
[ "Returns", "a", "numpy", "array", "of", "integer", "example", "indices", "to", "run", "in", "the", "next", "batch", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L563-L568
28,594
tensorflow/cleverhans
cleverhans/attack_bundling.py
Misclassify.filter
def filter(self, run_counts, criteria): """ Return run counts only for examples that are still correctly classified """ correctness = criteria['correctness'] assert correctness.dtype == np.bool filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][correctness] return filtered_counts
python
def filter(self, run_counts, criteria): """ Return run counts only for examples that are still correctly classified """ correctness = criteria['correctness'] assert correctness.dtype == np.bool filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][correctness] return filtered_counts
[ "def", "filter", "(", "self", ",", "run_counts", ",", "criteria", ")", ":", "correctness", "=", "criteria", "[", "'correctness'", "]", "assert", "correctness", ".", "dtype", "==", "np", ".", "bool", "filtered_counts", "=", "deep_copy", "(", "run_counts", ")", "for", "key", "in", "filtered_counts", ":", "filtered_counts", "[", "key", "]", "=", "filtered_counts", "[", "key", "]", "[", "correctness", "]", "return", "filtered_counts" ]
Return run counts only for examples that are still correctly classified
[ "Return", "run", "counts", "only", "for", "examples", "that", "are", "still", "correctly", "classified" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L690-L699
28,595
tensorflow/cleverhans
cleverhans/attack_bundling.py
MaxConfidence.filter
def filter(self, run_counts, criteria): """ Return the counts for only those examples that are below the threshold """ wrong_confidence = criteria['wrong_confidence'] below_t = wrong_confidence <= self.t filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][below_t] return filtered_counts
python
def filter(self, run_counts, criteria): """ Return the counts for only those examples that are below the threshold """ wrong_confidence = criteria['wrong_confidence'] below_t = wrong_confidence <= self.t filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][below_t] return filtered_counts
[ "def", "filter", "(", "self", ",", "run_counts", ",", "criteria", ")", ":", "wrong_confidence", "=", "criteria", "[", "'wrong_confidence'", "]", "below_t", "=", "wrong_confidence", "<=", "self", ".", "t", "filtered_counts", "=", "deep_copy", "(", "run_counts", ")", "for", "key", "in", "filtered_counts", ":", "filtered_counts", "[", "key", "]", "=", "filtered_counts", "[", "key", "]", "[", "below_t", "]", "return", "filtered_counts" ]
Return the counts for only those examples that are below the threshold
[ "Return", "the", "counts", "for", "only", "those", "examples", "that", "are", "below", "the", "threshold" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L799-L808
28,596
tensorflow/cleverhans
cleverhans/attacks/bapp.py
clip_image
def clip_image(image, clip_min, clip_max): """ Clip an image, or an image batch, with upper and lower threshold. """ return np.minimum(np.maximum(clip_min, image), clip_max)
python
def clip_image(image, clip_min, clip_max): """ Clip an image, or an image batch, with upper and lower threshold. """ return np.minimum(np.maximum(clip_min, image), clip_max)
[ "def", "clip_image", "(", "image", ",", "clip_min", ",", "clip_max", ")", ":", "return", "np", ".", "minimum", "(", "np", ".", "maximum", "(", "clip_min", ",", "image", ")", ",", "clip_max", ")" ]
Clip an image, or an image batch, with upper and lower threshold.
[ "Clip", "an", "image", "or", "an", "image", "batch", "with", "upper", "and", "lower", "threshold", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L353-L355
28,597
tensorflow/cleverhans
cleverhans/attacks/bapp.py
compute_distance
def compute_distance(x_ori, x_pert, constraint='l2'): """ Compute the distance between two images. """ if constraint == 'l2': dist = np.linalg.norm(x_ori - x_pert) elif constraint == 'linf': dist = np.max(abs(x_ori - x_pert)) return dist
python
def compute_distance(x_ori, x_pert, constraint='l2'): """ Compute the distance between two images. """ if constraint == 'l2': dist = np.linalg.norm(x_ori - x_pert) elif constraint == 'linf': dist = np.max(abs(x_ori - x_pert)) return dist
[ "def", "compute_distance", "(", "x_ori", ",", "x_pert", ",", "constraint", "=", "'l2'", ")", ":", "if", "constraint", "==", "'l2'", ":", "dist", "=", "np", ".", "linalg", ".", "norm", "(", "x_ori", "-", "x_pert", ")", "elif", "constraint", "==", "'linf'", ":", "dist", "=", "np", ".", "max", "(", "abs", "(", "x_ori", "-", "x_pert", ")", ")", "return", "dist" ]
Compute the distance between two images.
[ "Compute", "the", "distance", "between", "two", "images", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L358-L364
28,598
tensorflow/cleverhans
cleverhans/attacks/bapp.py
approximate_gradient
def approximate_gradient(decision_function, sample, num_evals, delta, constraint, shape, clip_min, clip_max): """ Gradient direction estimation """ # Generate random vectors. noise_shape = [num_evals] + list(shape) if constraint == 'l2': rv = np.random.randn(*noise_shape) elif constraint == 'linf': rv = np.random.uniform(low=-1, high=1, size=noise_shape) axis = tuple(range(1, 1 + len(shape))) rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True)) perturbed = sample + delta * rv perturbed = clip_image(perturbed, clip_min, clip_max) rv = (perturbed - sample) / delta # query the model. decisions = decision_function(perturbed) decision_shape = [len(decisions)] + [1] * len(shape) fval = 2 * decisions.astype(np_dtype).reshape(decision_shape) - 1.0 # Baseline subtraction (when fval differs) if np.mean(fval) == 1.0: # label changes. gradf = np.mean(rv, axis=0) elif np.mean(fval) == -1.0: # label not change. gradf = - np.mean(rv, axis=0) else: fval = fval - np.mean(fval) gradf = np.mean(fval * rv, axis=0) # Get the gradient direction. gradf = gradf / np.linalg.norm(gradf) return gradf
python
def approximate_gradient(decision_function, sample, num_evals, delta, constraint, shape, clip_min, clip_max): """ Gradient direction estimation """ # Generate random vectors. noise_shape = [num_evals] + list(shape) if constraint == 'l2': rv = np.random.randn(*noise_shape) elif constraint == 'linf': rv = np.random.uniform(low=-1, high=1, size=noise_shape) axis = tuple(range(1, 1 + len(shape))) rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True)) perturbed = sample + delta * rv perturbed = clip_image(perturbed, clip_min, clip_max) rv = (perturbed - sample) / delta # query the model. decisions = decision_function(perturbed) decision_shape = [len(decisions)] + [1] * len(shape) fval = 2 * decisions.astype(np_dtype).reshape(decision_shape) - 1.0 # Baseline subtraction (when fval differs) if np.mean(fval) == 1.0: # label changes. gradf = np.mean(rv, axis=0) elif np.mean(fval) == -1.0: # label not change. gradf = - np.mean(rv, axis=0) else: fval = fval - np.mean(fval) gradf = np.mean(fval * rv, axis=0) # Get the gradient direction. gradf = gradf / np.linalg.norm(gradf) return gradf
[ "def", "approximate_gradient", "(", "decision_function", ",", "sample", ",", "num_evals", ",", "delta", ",", "constraint", ",", "shape", ",", "clip_min", ",", "clip_max", ")", ":", "# Generate random vectors.", "noise_shape", "=", "[", "num_evals", "]", "+", "list", "(", "shape", ")", "if", "constraint", "==", "'l2'", ":", "rv", "=", "np", ".", "random", ".", "randn", "(", "*", "noise_shape", ")", "elif", "constraint", "==", "'linf'", ":", "rv", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "-", "1", ",", "high", "=", "1", ",", "size", "=", "noise_shape", ")", "axis", "=", "tuple", "(", "range", "(", "1", ",", "1", "+", "len", "(", "shape", ")", ")", ")", "rv", "=", "rv", "/", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "rv", "**", "2", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", ")", "perturbed", "=", "sample", "+", "delta", "*", "rv", "perturbed", "=", "clip_image", "(", "perturbed", ",", "clip_min", ",", "clip_max", ")", "rv", "=", "(", "perturbed", "-", "sample", ")", "/", "delta", "# query the model.", "decisions", "=", "decision_function", "(", "perturbed", ")", "decision_shape", "=", "[", "len", "(", "decisions", ")", "]", "+", "[", "1", "]", "*", "len", "(", "shape", ")", "fval", "=", "2", "*", "decisions", ".", "astype", "(", "np_dtype", ")", ".", "reshape", "(", "decision_shape", ")", "-", "1.0", "# Baseline subtraction (when fval differs)", "if", "np", ".", "mean", "(", "fval", ")", "==", "1.0", ":", "# label changes.", "gradf", "=", "np", ".", "mean", "(", "rv", ",", "axis", "=", "0", ")", "elif", "np", ".", "mean", "(", "fval", ")", "==", "-", "1.0", ":", "# label not change.", "gradf", "=", "-", "np", ".", "mean", "(", "rv", ",", "axis", "=", "0", ")", "else", ":", "fval", "=", "fval", "-", "np", ".", "mean", "(", "fval", ")", "gradf", "=", "np", ".", "mean", "(", "fval", "*", "rv", ",", "axis", "=", "0", ")", "# Get the gradient direction.", "gradf", "=", "gradf", "/", "np", ".", "linalg", ".", "norm", "(", "gradf", ")", "return", "gradf" ]
Gradient direction estimation
[ "Gradient", "direction", "estimation" ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L366-L399
28,599
tensorflow/cleverhans
cleverhans/attacks/bapp.py
binary_search_batch
def binary_search_batch(original_image, perturbed_images, decision_function, shape, constraint, theta): """ Binary search to approach the boundary. """ # Compute distance between each of perturbed image and original image. dists_post_update = np.array([ compute_distance( original_image, perturbed_image, constraint ) for perturbed_image in perturbed_images]) # Choose upper thresholds in binary searchs based on constraint. if constraint == 'linf': highs = dists_post_update # Stopping criteria. thresholds = np.minimum(dists_post_update * theta, theta) else: highs = np.ones(len(perturbed_images)) thresholds = theta lows = np.zeros(len(perturbed_images)) while np.max((highs - lows) / thresholds) > 1: # projection to mids. mids = (highs + lows) / 2.0 mid_images = project(original_image, perturbed_images, mids, shape, constraint) # Update highs and lows based on model decisions. decisions = decision_function(mid_images) lows = np.where(decisions == 0, mids, lows) highs = np.where(decisions == 1, mids, highs) out_images = project(original_image, perturbed_images, highs, shape, constraint) # Compute distance of the output image to select the best choice. # (only used when stepsize_search is grid_search.) dists = np.array([ compute_distance( original_image, out_image, constraint ) for out_image in out_images]) idx = np.argmin(dists) dist = dists_post_update[idx] out_image = out_images[idx] return out_image, dist
python
def binary_search_batch(original_image, perturbed_images, decision_function, shape, constraint, theta): """ Binary search to approach the boundary. """ # Compute distance between each of perturbed image and original image. dists_post_update = np.array([ compute_distance( original_image, perturbed_image, constraint ) for perturbed_image in perturbed_images]) # Choose upper thresholds in binary searchs based on constraint. if constraint == 'linf': highs = dists_post_update # Stopping criteria. thresholds = np.minimum(dists_post_update * theta, theta) else: highs = np.ones(len(perturbed_images)) thresholds = theta lows = np.zeros(len(perturbed_images)) while np.max((highs - lows) / thresholds) > 1: # projection to mids. mids = (highs + lows) / 2.0 mid_images = project(original_image, perturbed_images, mids, shape, constraint) # Update highs and lows based on model decisions. decisions = decision_function(mid_images) lows = np.where(decisions == 0, mids, lows) highs = np.where(decisions == 1, mids, highs) out_images = project(original_image, perturbed_images, highs, shape, constraint) # Compute distance of the output image to select the best choice. # (only used when stepsize_search is grid_search.) dists = np.array([ compute_distance( original_image, out_image, constraint ) for out_image in out_images]) idx = np.argmin(dists) dist = dists_post_update[idx] out_image = out_images[idx] return out_image, dist
[ "def", "binary_search_batch", "(", "original_image", ",", "perturbed_images", ",", "decision_function", ",", "shape", ",", "constraint", ",", "theta", ")", ":", "# Compute distance between each of perturbed image and original image.", "dists_post_update", "=", "np", ".", "array", "(", "[", "compute_distance", "(", "original_image", ",", "perturbed_image", ",", "constraint", ")", "for", "perturbed_image", "in", "perturbed_images", "]", ")", "# Choose upper thresholds in binary searchs based on constraint.", "if", "constraint", "==", "'linf'", ":", "highs", "=", "dists_post_update", "# Stopping criteria.", "thresholds", "=", "np", ".", "minimum", "(", "dists_post_update", "*", "theta", ",", "theta", ")", "else", ":", "highs", "=", "np", ".", "ones", "(", "len", "(", "perturbed_images", ")", ")", "thresholds", "=", "theta", "lows", "=", "np", ".", "zeros", "(", "len", "(", "perturbed_images", ")", ")", "while", "np", ".", "max", "(", "(", "highs", "-", "lows", ")", "/", "thresholds", ")", ">", "1", ":", "# projection to mids.", "mids", "=", "(", "highs", "+", "lows", ")", "/", "2.0", "mid_images", "=", "project", "(", "original_image", ",", "perturbed_images", ",", "mids", ",", "shape", ",", "constraint", ")", "# Update highs and lows based on model decisions.", "decisions", "=", "decision_function", "(", "mid_images", ")", "lows", "=", "np", ".", "where", "(", "decisions", "==", "0", ",", "mids", ",", "lows", ")", "highs", "=", "np", ".", "where", "(", "decisions", "==", "1", ",", "mids", ",", "highs", ")", "out_images", "=", "project", "(", "original_image", ",", "perturbed_images", ",", "highs", ",", "shape", ",", "constraint", ")", "# Compute distance of the output image to select the best choice.", "# (only used when stepsize_search is grid_search.)", "dists", "=", "np", ".", "array", "(", "[", "compute_distance", "(", "original_image", ",", "out_image", ",", "constraint", ")", "for", "out_image", "in", "out_images", "]", ")", "idx", "=", "np", ".", "argmin", "(", "dists", ")", "dist", "=", "dists_post_update", "[", "idx", "]", "out_image", "=", "out_images", "[", "idx", "]", "return", "out_image", ",", "dist" ]
Binary search to approach the boundary.
[ "Binary", "search", "to", "approach", "the", "boundary", "." ]
97488e215760547b81afc53f5e5de8ba7da5bd98
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L417-L468