python_code
stringlengths
0
780k
repo_name
stringlengths
7
38
file_path
stringlengths
5
103
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code for constructing the model.""" from typing import Any, Mapping, Optional, Union from absl import logging from alphafold.common import confidence from alphafold.model import features from alphafold.model import modules from alphafold.model import modules_multimer import haiku as hk import jax import ml_collections import numpy as np import tensorflow.compat.v1 as tf import tree def get_confidence_metrics( prediction_result: Mapping[str, Any], multimer_mode: bool) -> Mapping[str, Any]: """Post processes prediction_result to get confidence metrics.""" confidence_metrics = {} confidence_metrics['plddt'] = confidence.compute_plddt( prediction_result['predicted_lddt']['logits']) if 'predicted_aligned_error' in prediction_result: confidence_metrics.update(confidence.compute_predicted_aligned_error( logits=prediction_result['predicted_aligned_error']['logits'], breaks=prediction_result['predicted_aligned_error']['breaks'])) confidence_metrics['ptm'] = confidence.predicted_tm_score( logits=prediction_result['predicted_aligned_error']['logits'], breaks=prediction_result['predicted_aligned_error']['breaks'], asym_id=None) if multimer_mode: # Compute the ipTM only for the multimer model. confidence_metrics['iptm'] = confidence.predicted_tm_score( logits=prediction_result['predicted_aligned_error']['logits'], breaks=prediction_result['predicted_aligned_error']['breaks'], asym_id=prediction_result['predicted_aligned_error']['asym_id'], interface=True) confidence_metrics['ranking_confidence'] = ( 0.8 * confidence_metrics['iptm'] + 0.2 * confidence_metrics['ptm']) if not multimer_mode: # Monomer models use mean pLDDT for model ranking. confidence_metrics['ranking_confidence'] = np.mean( confidence_metrics['plddt']) return confidence_metrics class RunModel: """Container for JAX model.""" def __init__(self, config: ml_collections.ConfigDict, params: Optional[Mapping[str, Mapping[str, jax.Array]]] = None): self.config = config self.params = params self.multimer_mode = config.model.global_config.multimer_mode if self.multimer_mode: def _forward_fn(batch): model = modules_multimer.AlphaFold(self.config.model) return model( batch, is_training=False) else: def _forward_fn(batch): model = modules.AlphaFold(self.config.model) return model( batch, is_training=False, compute_loss=False, ensemble_representations=True) self.apply = jax.jit(hk.transform(_forward_fn).apply) self.init = jax.jit(hk.transform(_forward_fn).init) def init_params(self, feat: features.FeatureDict, random_seed: int = 0): """Initializes the model parameters. If none were provided when this class was instantiated then the parameters are randomly initialized. Args: feat: A dictionary of NumPy feature arrays as output by RunModel.process_features. random_seed: A random seed to use to initialize the parameters if none were set when this class was initialized. """ if not self.params: # Init params randomly. rng = jax.random.PRNGKey(random_seed) self.params = hk.data_structures.to_mutable_dict( self.init(rng, feat)) logging.warning('Initialized parameters randomly') def process_features( self, raw_features: Union[tf.train.Example, features.FeatureDict], random_seed: int) -> features.FeatureDict: """Processes features to prepare for feeding them into the model. Args: raw_features: The output of the data pipeline either as a dict of NumPy arrays or as a tf.train.Example. random_seed: The random seed to use when processing the features. Returns: A dict of NumPy feature arrays suitable for feeding into the model. """ if self.multimer_mode: return raw_features # Single-chain mode. if isinstance(raw_features, dict): return features.np_example_to_features( np_example=raw_features, config=self.config, random_seed=random_seed) else: return features.tf_example_to_features( tf_example=raw_features, config=self.config, random_seed=random_seed) def eval_shape(self, feat: features.FeatureDict) -> jax.ShapeDtypeStruct: self.init_params(feat) logging.info('Running eval_shape with shape(feat) = %s', tree.map_structure(lambda x: x.shape, feat)) shape = jax.eval_shape(self.apply, self.params, jax.random.PRNGKey(0), feat) logging.info('Output shape was %s', shape) return shape def predict(self, feat: features.FeatureDict, random_seed: int, ) -> Mapping[str, Any]: """Makes a prediction by inferencing the model on the provided features. Args: feat: A dictionary of NumPy feature arrays as output by RunModel.process_features. random_seed: The random seed to use when running the model. In the multimer model this controls the MSA sampling. Returns: A dictionary of model outputs. """ self.init_params(feat) logging.info('Running predict with shape(feat) = %s', tree.map_structure(lambda x: x.shape, feat)) result = self.apply(self.params, jax.random.PRNGKey(random_seed), feat) # This block is to ensure benchmark timings are accurate. Some blocking is # already happening when computing get_confidence_metrics, and this ensures # all outputs are blocked on. jax.tree_map(lambda x: x.block_until_ready(), result) result.update( get_confidence_metrics(result, multimer_mode=self.multimer_mode)) logging.info('Output shape was %s', tree.map_structure(lambda x: x.shape, result)) return result
alphafold-main
alphafold/model/model.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Modules and utilities for the structure module.""" import functools from typing import Dict from alphafold.common import residue_constants from alphafold.model import all_atom from alphafold.model import common_modules from alphafold.model import prng from alphafold.model import quat_affine from alphafold.model import r3 from alphafold.model import utils import haiku as hk import jax import jax.numpy as jnp import ml_collections import numpy as np def squared_difference(x, y): return jnp.square(x - y) class InvariantPointAttention(hk.Module): """Invariant Point attention module. The high-level idea is that this attention module works over a set of points and associated orientations in 3D space (e.g. protein residues). Each residue outputs a set of queries and keys as points in their local reference frame. The attention is then defined as the euclidean distance between the queries and keys in the global frame. Jumper et al. (2021) Suppl. Alg. 22 "InvariantPointAttention" """ def __init__(self, config, global_config, dist_epsilon=1e-8, name='invariant_point_attention'): """Initialize. Args: config: Structure Module Config global_config: Global Config of Model. dist_epsilon: Small value to avoid NaN in distance calculation. name: Haiku Module name. """ super().__init__(name=name) self._dist_epsilon = dist_epsilon self._zero_initialize_last = global_config.zero_init self.config = config self.global_config = global_config def __call__(self, inputs_1d, inputs_2d, mask, affine): """Compute geometry-aware attention. Given a set of query residues (defined by affines and associated scalar features), this function computes geometry-aware attention between the query residues and target residues. The residues produce points in their local reference frame, which are converted into the global frame in order to compute attention via euclidean distance. Equivalently, the target residues produce points in their local frame to be used as attention values, which are converted into the query residues' local frames. Args: inputs_1d: (N, C) 1D input embedding that is the basis for the scalar queries. inputs_2d: (N, M, C') 2D input embedding, used for biases and values. mask: (N, 1) mask to indicate which elements of inputs_1d participate in the attention. affine: QuatAffine object describing the position and orientation of every element in inputs_1d. Returns: Transformation of the input embedding. """ num_residues, _ = inputs_1d.shape # Improve readability by removing a large number of 'self's. num_head = self.config.num_head num_scalar_qk = self.config.num_scalar_qk num_point_qk = self.config.num_point_qk num_scalar_v = self.config.num_scalar_v num_point_v = self.config.num_point_v num_output = self.config.num_channel assert num_scalar_qk > 0 assert num_point_qk > 0 assert num_point_v > 0 # Construct scalar queries of shape: # [num_query_residues, num_head, num_points] q_scalar = common_modules.Linear( num_head * num_scalar_qk, name='q_scalar')( inputs_1d) q_scalar = jnp.reshape( q_scalar, [num_residues, num_head, num_scalar_qk]) # Construct scalar keys/values of shape: # [num_target_residues, num_head, num_points] kv_scalar = common_modules.Linear( num_head * (num_scalar_v + num_scalar_qk), name='kv_scalar')( inputs_1d) kv_scalar = jnp.reshape(kv_scalar, [num_residues, num_head, num_scalar_v + num_scalar_qk]) k_scalar, v_scalar = jnp.split(kv_scalar, [num_scalar_qk], axis=-1) # Construct query points of shape: # [num_residues, num_head, num_point_qk] # First construct query points in local frame. q_point_local = common_modules.Linear( num_head * 3 * num_point_qk, name='q_point_local')( inputs_1d) q_point_local = jnp.split(q_point_local, 3, axis=-1) # Project query points into global frame. q_point_global = affine.apply_to_point(q_point_local, extra_dims=1) # Reshape query point for later use. q_point = [ jnp.reshape(x, [num_residues, num_head, num_point_qk]) for x in q_point_global] # Construct key and value points. # Key points have shape [num_residues, num_head, num_point_qk] # Value points have shape [num_residues, num_head, num_point_v] # Construct key and value points in local frame. kv_point_local = common_modules.Linear( num_head * 3 * (num_point_qk + num_point_v), name='kv_point_local')( inputs_1d) kv_point_local = jnp.split(kv_point_local, 3, axis=-1) # Project key and value points into global frame. kv_point_global = affine.apply_to_point(kv_point_local, extra_dims=1) kv_point_global = [ jnp.reshape(x, [num_residues, num_head, (num_point_qk + num_point_v)]) for x in kv_point_global] # Split key and value points. k_point, v_point = list( zip(*[ jnp.split(x, [num_point_qk,], axis=-1) for x in kv_point_global ])) # We assume that all queries and keys come iid from N(0, 1) distribution # and compute the variances of the attention logits. # Each scalar pair (q, k) contributes Var q*k = 1 scalar_variance = max(num_scalar_qk, 1) * 1. # Each point pair (q, k) contributes Var [0.5 ||q||^2 - <q, k>] = 9 / 2 point_variance = max(num_point_qk, 1) * 9. / 2 # Allocate equal variance to scalar, point and attention 2d parts so that # the sum is 1. num_logit_terms = 3 scalar_weights = np.sqrt(1.0 / (num_logit_terms * scalar_variance)) point_weights = np.sqrt(1.0 / (num_logit_terms * point_variance)) attention_2d_weights = np.sqrt(1.0 / (num_logit_terms)) # Trainable per-head weights for points. trainable_point_weights = jax.nn.softplus(hk.get_parameter( 'trainable_point_weights', shape=[num_head], # softplus^{-1} (1) init=hk.initializers.Constant(np.log(np.exp(1.) - 1.)))) point_weights *= jnp.expand_dims(trainable_point_weights, axis=1) v_point = [jnp.swapaxes(x, -2, -3) for x in v_point] q_point = [jnp.swapaxes(x, -2, -3) for x in q_point] k_point = [jnp.swapaxes(x, -2, -3) for x in k_point] dist2 = [ squared_difference(qx[:, :, None, :], kx[:, None, :, :]) for qx, kx in zip(q_point, k_point) ] dist2 = sum(dist2) attn_qk_point = -0.5 * jnp.sum( point_weights[:, None, None, :] * dist2, axis=-1) v = jnp.swapaxes(v_scalar, -2, -3) q = jnp.swapaxes(scalar_weights * q_scalar, -2, -3) k = jnp.swapaxes(k_scalar, -2, -3) attn_qk_scalar = jnp.matmul(q, jnp.swapaxes(k, -2, -1)) attn_logits = attn_qk_scalar + attn_qk_point attention_2d = common_modules.Linear( num_head, name='attention_2d')( inputs_2d) attention_2d = jnp.transpose(attention_2d, [2, 0, 1]) attention_2d = attention_2d_weights * attention_2d attn_logits += attention_2d mask_2d = mask * jnp.swapaxes(mask, -1, -2) attn_logits -= 1e5 * (1. - mask_2d) # [num_head, num_query_residues, num_target_residues] attn = jax.nn.softmax(attn_logits) # [num_head, num_query_residues, num_head * num_scalar_v] result_scalar = jnp.matmul(attn, v) # For point result, implement matmul manually so that it will be a float32 # on TPU. This is equivalent to # result_point_global = [jnp.einsum('bhqk,bhkc->bhqc', attn, vx) # for vx in v_point] # but on the TPU, doing the multiply and reduce_sum ensures the # computation happens in float32 instead of bfloat16. result_point_global = [jnp.sum( attn[:, :, :, None] * vx[:, None, :, :], axis=-2) for vx in v_point] # [num_query_residues, num_head, num_head * num_(scalar|point)_v] result_scalar = jnp.swapaxes(result_scalar, -2, -3) result_point_global = [ jnp.swapaxes(x, -2, -3) for x in result_point_global] # Features used in the linear output projection. Should have the size # [num_query_residues, ?] output_features = [] result_scalar = jnp.reshape( result_scalar, [num_residues, num_head * num_scalar_v]) output_features.append(result_scalar) result_point_global = [ jnp.reshape(r, [num_residues, num_head * num_point_v]) for r in result_point_global] result_point_local = affine.invert_point(result_point_global, extra_dims=1) output_features.extend(result_point_local) output_features.append(jnp.sqrt(self._dist_epsilon + jnp.square(result_point_local[0]) + jnp.square(result_point_local[1]) + jnp.square(result_point_local[2]))) # Dimensions: h = heads, i and j = residues, # c = inputs_2d channels # Contraction happens over the second residue dimension, similarly to how # the usual attention is performed. result_attention_over_2d = jnp.einsum('hij, ijc->ihc', attn, inputs_2d) num_out = num_head * result_attention_over_2d.shape[-1] output_features.append( jnp.reshape(result_attention_over_2d, [num_residues, num_out])) final_init = 'zeros' if self._zero_initialize_last else 'linear' final_act = jnp.concatenate(output_features, axis=-1) return common_modules.Linear( num_output, initializer=final_init, name='output_projection')(final_act) class FoldIteration(hk.Module): """A single iteration of the main structure module loop. Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" lines 6-21 First, each residue attends to all residues using InvariantPointAttention. Then, we apply transition layers to update the hidden representations. Finally, we use the hidden representations to produce an update to the affine of each residue. """ def __init__(self, config, global_config, name='fold_iteration'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, activations, sequence_mask, update_affine, is_training, initial_act, safe_key=None, static_feat_2d=None, aatype=None): c = self.config if safe_key is None: safe_key = prng.SafeKey(hk.next_rng_key()) def safe_dropout_fn(tensor, safe_key): return prng.safe_dropout( tensor=tensor, safe_key=safe_key, rate=c.dropout, is_deterministic=self.global_config.deterministic, is_training=is_training) affine = quat_affine.QuatAffine.from_tensor(activations['affine']) act = activations['act'] attention_module = InvariantPointAttention(self.config, self.global_config) # Attention attn = attention_module( inputs_1d=act, inputs_2d=static_feat_2d, mask=sequence_mask, affine=affine) act += attn safe_key, *sub_keys = safe_key.split(3) sub_keys = iter(sub_keys) act = safe_dropout_fn(act, next(sub_keys)) act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='attention_layer_norm')( act) final_init = 'zeros' if self.global_config.zero_init else 'linear' # Transition input_act = act for i in range(c.num_layer_in_transition): init = 'relu' if i < c.num_layer_in_transition - 1 else final_init act = common_modules.Linear( c.num_channel, initializer=init, name='transition')( act) if i < c.num_layer_in_transition - 1: act = jax.nn.relu(act) act += input_act act = safe_dropout_fn(act, next(sub_keys)) act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='transition_layer_norm')(act) if update_affine: # This block corresponds to # Jumper et al. (2021) Alg. 23 "Backbone update" affine_update_size = 6 # Affine update affine_update = common_modules.Linear( affine_update_size, initializer=final_init, name='affine_update')( act) affine = affine.pre_compose(affine_update) sc = MultiRigidSidechain(c.sidechain, self.global_config)( affine.scale_translation(c.position_scale), [act, initial_act], aatype) outputs = {'affine': affine.to_tensor(), 'sc': sc} affine = affine.apply_rotation_tensor_fn(jax.lax.stop_gradient) new_activations = { 'act': act, 'affine': affine.to_tensor() } return new_activations, outputs def generate_affines(representations, batch, config, global_config, is_training, safe_key): """Generate predicted affines for a single chain. Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" This is the main part of the structure module - it iteratively applies folding to produce a set of predicted residue positions. Args: representations: Representations dictionary. batch: Batch dictionary. config: Config for the structure module. global_config: Global config. is_training: Whether the model is being trained. safe_key: A prng.SafeKey object that wraps a PRNG key. Returns: A dictionary containing residue affines and sidechain positions. """ c = config sequence_mask = batch['seq_mask'][:, None] act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='single_layer_norm')( representations['single']) initial_act = act act = common_modules.Linear( c.num_channel, name='initial_projection')( act) affine = generate_new_affine(sequence_mask) fold_iteration = FoldIteration( c, global_config, name='fold_iteration') assert len(batch['seq_mask'].shape) == 1 activations = {'act': act, 'affine': affine.to_tensor(), } act_2d = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='pair_layer_norm')( representations['pair']) outputs = [] safe_keys = safe_key.split(c.num_layer) for sub_key in safe_keys: activations, output = fold_iteration( activations, initial_act=initial_act, static_feat_2d=act_2d, safe_key=sub_key, sequence_mask=sequence_mask, update_affine=True, is_training=is_training, aatype=batch['aatype']) outputs.append(output) output = jax.tree_map(lambda *x: jnp.stack(x), *outputs) # Include the activations in the output dict for use by the LDDT-Head. output['act'] = activations['act'] return output class StructureModule(hk.Module): """StructureModule as a network head. Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" """ def __init__(self, config, global_config, compute_loss=True, name='structure_module'): super().__init__(name=name) self.config = config self.global_config = global_config self.compute_loss = compute_loss def __call__(self, representations, batch, is_training, safe_key=None): c = self.config ret = {} if safe_key is None: safe_key = prng.SafeKey(hk.next_rng_key()) output = generate_affines( representations=representations, batch=batch, config=self.config, global_config=self.global_config, is_training=is_training, safe_key=safe_key) ret['representations'] = {'structure_module': output['act']} ret['traj'] = output['affine'] * jnp.array([1.] * 4 + [c.position_scale] * 3) ret['sidechains'] = output['sc'] atom14_pred_positions = r3.vecs_to_tensor(output['sc']['atom_pos'])[-1] ret['final_atom14_positions'] = atom14_pred_positions # (N, 14, 3) ret['final_atom14_mask'] = batch['atom14_atom_exists'] # (N, 14) atom37_pred_positions = all_atom.atom14_to_atom37(atom14_pred_positions, batch) atom37_pred_positions *= batch['atom37_atom_exists'][:, :, None] ret['final_atom_positions'] = atom37_pred_positions # (N, 37, 3) ret['final_atom_mask'] = batch['atom37_atom_exists'] # (N, 37) ret['final_affines'] = ret['traj'][-1] if self.compute_loss: return ret else: no_loss_features = ['final_atom_positions', 'final_atom_mask', 'representations'] no_loss_ret = {k: ret[k] for k in no_loss_features} return no_loss_ret def loss(self, value, batch): ret = {'loss': 0.} ret['metrics'] = {} # If requested, compute in-graph metrics. if self.config.compute_in_graph_metrics: atom14_pred_positions = value['final_atom14_positions'] # Compute renaming and violations. value.update(compute_renamed_ground_truth(batch, atom14_pred_positions)) value['violations'] = find_structural_violations( batch, atom14_pred_positions, self.config) # Several violation metrics: violation_metrics = compute_violation_metrics( batch=batch, atom14_pred_positions=atom14_pred_positions, violations=value['violations']) ret['metrics'].update(violation_metrics) backbone_loss(ret, batch, value, self.config) if 'renamed_atom14_gt_positions' not in value: value.update(compute_renamed_ground_truth( batch, value['final_atom14_positions'])) sc_loss = sidechain_loss(batch, value, self.config) ret['loss'] = ((1 - self.config.sidechain.weight_frac) * ret['loss'] + self.config.sidechain.weight_frac * sc_loss['loss']) ret['sidechain_fape'] = sc_loss['fape'] supervised_chi_loss(ret, batch, value, self.config) if self.config.structural_violation_loss_weight: if 'violations' not in value: value['violations'] = find_structural_violations( batch, value['final_atom14_positions'], self.config) structural_violation_loss(ret, batch, value, self.config) return ret def compute_renamed_ground_truth( batch: Dict[str, jnp.ndarray], atom14_pred_positions: jnp.ndarray, ) -> Dict[str, jnp.ndarray]: """Find optimal renaming of ground truth based on the predicted positions. Jumper et al. (2021) Suppl. Alg. 26 "renameSymmetricGroundTruthAtoms" This renamed ground truth is then used for all losses, such that each loss moves the atoms in the same direction. Shape (N). Args: batch: Dictionary containing: * atom14_gt_positions: Ground truth positions. * atom14_alt_gt_positions: Ground truth positions with renaming swaps. * atom14_atom_is_ambiguous: 1.0 for atoms that are affected by renaming swaps. * atom14_gt_exists: Mask for which atoms exist in ground truth. * atom14_alt_gt_exists: Mask for which atoms exist in ground truth after renaming. * atom14_atom_exists: Mask for whether each atom is part of the given amino acid type. atom14_pred_positions: Array of atom positions in global frame with shape (N, 14, 3). Returns: Dictionary containing: alt_naming_is_better: Array with 1.0 where alternative swap is better. renamed_atom14_gt_positions: Array of optimal ground truth positions after renaming swaps are performed. renamed_atom14_gt_exists: Mask after renaming swap is performed. """ alt_naming_is_better = all_atom.find_optimal_renaming( atom14_gt_positions=batch['atom14_gt_positions'], atom14_alt_gt_positions=batch['atom14_alt_gt_positions'], atom14_atom_is_ambiguous=batch['atom14_atom_is_ambiguous'], atom14_gt_exists=batch['atom14_gt_exists'], atom14_pred_positions=atom14_pred_positions, atom14_atom_exists=batch['atom14_atom_exists']) renamed_atom14_gt_positions = ( (1. - alt_naming_is_better[:, None, None]) * batch['atom14_gt_positions'] + alt_naming_is_better[:, None, None] * batch['atom14_alt_gt_positions']) renamed_atom14_gt_mask = ( (1. - alt_naming_is_better[:, None]) * batch['atom14_gt_exists'] + alt_naming_is_better[:, None] * batch['atom14_alt_gt_exists']) return { 'alt_naming_is_better': alt_naming_is_better, # (N) 'renamed_atom14_gt_positions': renamed_atom14_gt_positions, # (N, 14, 3) 'renamed_atom14_gt_exists': renamed_atom14_gt_mask, # (N, 14) } def backbone_loss(ret, batch, value, config): """Backbone FAPE Loss. Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" line 17 Args: ret: Dictionary to write outputs into, needs to contain 'loss'. batch: Batch, needs to contain 'backbone_affine_tensor', 'backbone_affine_mask'. value: Dictionary containing structure module output, needs to contain 'traj', a trajectory of rigids. config: Configuration of loss, should contain 'fape.clamp_distance' and 'fape.loss_unit_distance'. """ affine_trajectory = quat_affine.QuatAffine.from_tensor(value['traj']) rigid_trajectory = r3.rigids_from_quataffine(affine_trajectory) gt_affine = quat_affine.QuatAffine.from_tensor( batch['backbone_affine_tensor']) gt_rigid = r3.rigids_from_quataffine(gt_affine) backbone_mask = batch['backbone_affine_mask'] fape_loss_fn = functools.partial( all_atom.frame_aligned_point_error, l1_clamp_distance=config.fape.clamp_distance, length_scale=config.fape.loss_unit_distance) fape_loss_fn = jax.vmap(fape_loss_fn, (0, None, None, 0, None, None)) fape_loss = fape_loss_fn(rigid_trajectory, gt_rigid, backbone_mask, rigid_trajectory.trans, gt_rigid.trans, backbone_mask) if 'use_clamped_fape' in batch: # Jumper et al. (2021) Suppl. Sec. 1.11.5 "Loss clamping details" use_clamped_fape = jnp.asarray(batch['use_clamped_fape'], jnp.float32) unclamped_fape_loss_fn = functools.partial( all_atom.frame_aligned_point_error, l1_clamp_distance=None, length_scale=config.fape.loss_unit_distance) unclamped_fape_loss_fn = jax.vmap(unclamped_fape_loss_fn, (0, None, None, 0, None, None)) fape_loss_unclamped = unclamped_fape_loss_fn(rigid_trajectory, gt_rigid, backbone_mask, rigid_trajectory.trans, gt_rigid.trans, backbone_mask) fape_loss = (fape_loss * use_clamped_fape + fape_loss_unclamped * (1 - use_clamped_fape)) ret['fape'] = fape_loss[-1] ret['loss'] += jnp.mean(fape_loss) def sidechain_loss(batch, value, config): """All Atom FAPE Loss using renamed rigids.""" # Rename Frames # Jumper et al. (2021) Suppl. Alg. 26 "renameSymmetricGroundTruthAtoms" line 7 alt_naming_is_better = value['alt_naming_is_better'] renamed_gt_frames = ( (1. - alt_naming_is_better[:, None, None]) * batch['rigidgroups_gt_frames'] + alt_naming_is_better[:, None, None] * batch['rigidgroups_alt_gt_frames']) flat_gt_frames = r3.rigids_from_tensor_flat12( jnp.reshape(renamed_gt_frames, [-1, 12])) flat_frames_mask = jnp.reshape(batch['rigidgroups_gt_exists'], [-1]) flat_gt_positions = r3.vecs_from_tensor( jnp.reshape(value['renamed_atom14_gt_positions'], [-1, 3])) flat_positions_mask = jnp.reshape(value['renamed_atom14_gt_exists'], [-1]) # Compute frame_aligned_point_error score for the final layer. pred_frames = value['sidechains']['frames'] pred_positions = value['sidechains']['atom_pos'] def _slice_last_layer_and_flatten(x): return jnp.reshape(x[-1], [-1]) flat_pred_frames = jax.tree_map( _slice_last_layer_and_flatten, pred_frames) flat_pred_positions = jax.tree_map( _slice_last_layer_and_flatten, pred_positions) # FAPE Loss on sidechains fape = all_atom.frame_aligned_point_error( pred_frames=flat_pred_frames, target_frames=flat_gt_frames, frames_mask=flat_frames_mask, pred_positions=flat_pred_positions, target_positions=flat_gt_positions, positions_mask=flat_positions_mask, l1_clamp_distance=config.sidechain.atom_clamp_distance, length_scale=config.sidechain.length_scale) return { 'fape': fape, 'loss': fape} def structural_violation_loss(ret, batch, value, config): """Computes loss for structural violations.""" assert config.sidechain.weight_frac # Put all violation losses together to one large loss. violations = value['violations'] num_atoms = jnp.sum(batch['atom14_atom_exists']).astype(jnp.float32) ret['loss'] += (config.structural_violation_loss_weight * ( violations['between_residues']['bonds_c_n_loss_mean'] + violations['between_residues']['angles_ca_c_n_loss_mean'] + violations['between_residues']['angles_c_n_ca_loss_mean'] + jnp.sum( violations['between_residues']['clashes_per_atom_loss_sum'] + violations['within_residues']['per_atom_loss_sum']) / (1e-6 + num_atoms))) def find_structural_violations( batch: Dict[str, jnp.ndarray], atom14_pred_positions: jnp.ndarray, # (N, 14, 3) config: ml_collections.ConfigDict ): """Computes several checks for structural violations.""" # Compute between residue backbone violations of bonds and angles. connection_violations = all_atom.between_residue_bond_loss( pred_atom_positions=atom14_pred_positions, pred_atom_mask=batch['atom14_atom_exists'].astype(jnp.float32), residue_index=batch['residue_index'].astype(jnp.float32), aatype=batch['aatype'], tolerance_factor_soft=config.violation_tolerance_factor, tolerance_factor_hard=config.violation_tolerance_factor) # Compute the Van der Waals radius for every atom # (the first letter of the atom name is the element type). # Shape: (N, 14). atomtype_radius = jnp.array([ residue_constants.van_der_waals_radius[name[0]] for name in residue_constants.atom_types ]) atom14_atom_radius = batch['atom14_atom_exists'] * utils.batched_gather( atomtype_radius, batch['residx_atom14_to_atom37']) # Compute the between residue clash loss. between_residue_clashes = all_atom.between_residue_clash_loss( atom14_pred_positions=atom14_pred_positions, atom14_atom_exists=batch['atom14_atom_exists'], atom14_atom_radius=atom14_atom_radius, residue_index=batch['residue_index'], overlap_tolerance_soft=config.clash_overlap_tolerance, overlap_tolerance_hard=config.clash_overlap_tolerance) # Compute all within-residue violations (clashes, # bond length and angle violations). restype_atom14_bounds = residue_constants.make_atom14_dists_bounds( overlap_tolerance=config.clash_overlap_tolerance, bond_length_tolerance_factor=config.violation_tolerance_factor) atom14_dists_lower_bound = utils.batched_gather( restype_atom14_bounds['lower_bound'], batch['aatype']) atom14_dists_upper_bound = utils.batched_gather( restype_atom14_bounds['upper_bound'], batch['aatype']) within_residue_violations = all_atom.within_residue_violations( atom14_pred_positions=atom14_pred_positions, atom14_atom_exists=batch['atom14_atom_exists'], atom14_dists_lower_bound=atom14_dists_lower_bound, atom14_dists_upper_bound=atom14_dists_upper_bound, tighten_bounds_for_loss=0.0) # Combine them to a single per-residue violation mask (used later for LDDT). per_residue_violations_mask = jnp.max(jnp.stack([ connection_violations['per_residue_violation_mask'], jnp.max(between_residue_clashes['per_atom_clash_mask'], axis=-1), jnp.max(within_residue_violations['per_atom_violations'], axis=-1)]), axis=0) return { 'between_residues': { 'bonds_c_n_loss_mean': connection_violations['c_n_loss_mean'], # () 'angles_ca_c_n_loss_mean': connection_violations['ca_c_n_loss_mean'], # () 'angles_c_n_ca_loss_mean': connection_violations['c_n_ca_loss_mean'], # () 'connections_per_residue_loss_sum': connection_violations['per_residue_loss_sum'], # (N) 'connections_per_residue_violation_mask': connection_violations['per_residue_violation_mask'], # (N) 'clashes_mean_loss': between_residue_clashes['mean_loss'], # () 'clashes_per_atom_loss_sum': between_residue_clashes['per_atom_loss_sum'], # (N, 14) 'clashes_per_atom_clash_mask': between_residue_clashes['per_atom_clash_mask'], # (N, 14) }, 'within_residues': { 'per_atom_loss_sum': within_residue_violations['per_atom_loss_sum'], # (N, 14) 'per_atom_violations': within_residue_violations['per_atom_violations'], # (N, 14), }, 'total_per_residue_violations_mask': per_residue_violations_mask, # (N) } def compute_violation_metrics( batch: Dict[str, jnp.ndarray], atom14_pred_positions: jnp.ndarray, # (N, 14, 3) violations: Dict[str, jnp.ndarray], ) -> Dict[str, jnp.ndarray]: """Compute several metrics to assess the structural violations.""" ret = {} extreme_ca_ca_violations = all_atom.extreme_ca_ca_distance_violations( pred_atom_positions=atom14_pred_positions, pred_atom_mask=batch['atom14_atom_exists'].astype(jnp.float32), residue_index=batch['residue_index'].astype(jnp.float32)) ret['violations_extreme_ca_ca_distance'] = extreme_ca_ca_violations ret['violations_between_residue_bond'] = utils.mask_mean( mask=batch['seq_mask'], value=violations['between_residues'][ 'connections_per_residue_violation_mask']) ret['violations_between_residue_clash'] = utils.mask_mean( mask=batch['seq_mask'], value=jnp.max( violations['between_residues']['clashes_per_atom_clash_mask'], axis=-1)) ret['violations_within_residue'] = utils.mask_mean( mask=batch['seq_mask'], value=jnp.max( violations['within_residues']['per_atom_violations'], axis=-1)) ret['violations_per_residue'] = utils.mask_mean( mask=batch['seq_mask'], value=violations['total_per_residue_violations_mask']) return ret def supervised_chi_loss(ret, batch, value, config): """Computes loss for direct chi angle supervision. Jumper et al. (2021) Suppl. Alg. 27 "torsionAngleLoss" Args: ret: Dictionary to write outputs into, needs to contain 'loss'. batch: Batch, needs to contain 'seq_mask', 'chi_mask', 'chi_angles'. value: Dictionary containing structure module output, needs to contain value['sidechains']['angles_sin_cos'] for angles and value['sidechains']['unnormalized_angles_sin_cos'] for unnormalized angles. config: Configuration of loss, should contain 'chi_weight' and 'angle_norm_weight', 'angle_norm_weight' scales angle norm term, 'chi_weight' scales torsion term. """ eps = 1e-6 sequence_mask = batch['seq_mask'] num_res = sequence_mask.shape[0] chi_mask = batch['chi_mask'].astype(jnp.float32) pred_angles = jnp.reshape( value['sidechains']['angles_sin_cos'], [-1, num_res, 7, 2]) pred_angles = pred_angles[:, :, 3:] residue_type_one_hot = jax.nn.one_hot( batch['aatype'], residue_constants.restype_num + 1, dtype=jnp.float32)[None] chi_pi_periodic = jnp.einsum('ijk, kl->ijl', residue_type_one_hot, jnp.asarray(residue_constants.chi_pi_periodic)) true_chi = batch['chi_angles'][None] sin_true_chi = jnp.sin(true_chi) cos_true_chi = jnp.cos(true_chi) sin_cos_true_chi = jnp.stack([sin_true_chi, cos_true_chi], axis=-1) # This is -1 if chi is pi-periodic and +1 if it's 2pi-periodic shifted_mask = (1 - 2 * chi_pi_periodic)[..., None] sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi sq_chi_error = jnp.sum( squared_difference(sin_cos_true_chi, pred_angles), -1) sq_chi_error_shifted = jnp.sum( squared_difference(sin_cos_true_chi_shifted, pred_angles), -1) sq_chi_error = jnp.minimum(sq_chi_error, sq_chi_error_shifted) sq_chi_loss = utils.mask_mean(mask=chi_mask[None], value=sq_chi_error) ret['chi_loss'] = sq_chi_loss ret['loss'] += config.chi_weight * sq_chi_loss unnormed_angles = jnp.reshape( value['sidechains']['unnormalized_angles_sin_cos'], [-1, num_res, 7, 2]) angle_norm = jnp.sqrt(jnp.sum(jnp.square(unnormed_angles), axis=-1) + eps) norm_error = jnp.abs(angle_norm - 1.) angle_norm_loss = utils.mask_mean(mask=sequence_mask[None, :, None], value=norm_error) ret['angle_norm_loss'] = angle_norm_loss ret['loss'] += config.angle_norm_weight * angle_norm_loss def generate_new_affine(sequence_mask): num_residues, _ = sequence_mask.shape quaternion = jnp.tile( jnp.reshape(jnp.asarray([1., 0., 0., 0.]), [1, 4]), [num_residues, 1]) translation = jnp.zeros([num_residues, 3]) return quat_affine.QuatAffine(quaternion, translation, unstack_inputs=True) def l2_normalize(x, axis=-1, epsilon=1e-12): return x / jnp.sqrt( jnp.maximum(jnp.sum(x**2, axis=axis, keepdims=True), epsilon)) class MultiRigidSidechain(hk.Module): """Class to make side chain atoms.""" def __init__(self, config, global_config, name='rigid_sidechain'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, affine, representations_list, aatype): """Predict side chains using multi-rigid representations. Args: affine: The affines for each residue (translations in angstroms). representations_list: A list of activations to predict side chains from. aatype: Amino acid types. Returns: Dict containing atom positions and frames (in angstroms). """ act = [ common_modules.Linear( # pylint: disable=g-complex-comprehension self.config.num_channel, name='input_projection')(jax.nn.relu(x)) for x in representations_list ] # Sum the activation list (equivalent to concat then Linear). act = sum(act) final_init = 'zeros' if self.global_config.zero_init else 'linear' # Mapping with some residual blocks. for _ in range(self.config.num_residual_block): old_act = act act = common_modules.Linear( self.config.num_channel, initializer='relu', name='resblock1')( jax.nn.relu(act)) act = common_modules.Linear( self.config.num_channel, initializer=final_init, name='resblock2')( jax.nn.relu(act)) act += old_act # Map activations to torsion angles. Shape: (num_res, 14). num_res = act.shape[0] unnormalized_angles = common_modules.Linear( 14, name='unnormalized_angles')( jax.nn.relu(act)) unnormalized_angles = jnp.reshape( unnormalized_angles, [num_res, 7, 2]) angles = l2_normalize(unnormalized_angles, axis=-1) outputs = { 'angles_sin_cos': angles, # jnp.ndarray (N, 7, 2) 'unnormalized_angles_sin_cos': unnormalized_angles, # jnp.ndarray (N, 7, 2) } # Map torsion angles to frames. backb_to_global = r3.rigids_from_quataffine(affine) # Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" # r3.Rigids with shape (N, 8). all_frames_to_global = all_atom.torsion_angles_to_frames( aatype, backb_to_global, angles) # Use frames and literature positions to create the final atom coordinates. # r3.Vecs with shape (N, 14). pred_positions = all_atom.frames_and_literature_positions_to_atom14_pos( aatype, all_frames_to_global) outputs.update({ 'atom_pos': pred_positions, # r3.Vecs (N, 14) 'frames': all_frames_to_global, # r3.Rigids (N, 8) }) return outputs
alphafold-main
alphafold/model/folding.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A collection of JAX utility functions for use in protein folding.""" import collections import contextlib import functools import numbers from typing import Mapping import haiku as hk import jax import jax.numpy as jnp import numpy as np def stable_softmax(logits: jax.Array) -> jax.Array: """Numerically stable softmax for (potential) bfloat 16.""" if logits.dtype == jnp.float32: output = jax.nn.softmax(logits) elif logits.dtype == jnp.bfloat16: # Need to explicitly do softmax in float32 to avoid numerical issues # with large negatives. Large negatives can occur if trying to mask # by adding on large negative logits so that things softmax to zero. output = jax.nn.softmax(logits.astype(jnp.float32)).astype(jnp.bfloat16) else: raise ValueError(f'Unexpected input dtype {logits.dtype}') return output def bfloat16_creator(next_creator, shape, dtype, init, context): """Creates float32 variables when bfloat16 is requested.""" if context.original_dtype == jnp.bfloat16: dtype = jnp.float32 return next_creator(shape, dtype, init) def bfloat16_getter(next_getter, value, context): """Casts float32 to bfloat16 when bfloat16 was originally requested.""" if context.original_dtype == jnp.bfloat16: assert value.dtype == jnp.float32 value = value.astype(jnp.bfloat16) return next_getter(value) @contextlib.contextmanager def bfloat16_context(): with hk.custom_creator(bfloat16_creator), hk.custom_getter(bfloat16_getter): yield def final_init(config): if config.zero_init: return 'zeros' else: return 'linear' def batched_gather(params, indices, axis=0, batch_dims=0): """Implements a JAX equivalent of `tf.gather` with `axis` and `batch_dims`.""" take_fn = lambda p, i: jnp.take(p, i, axis=axis, mode='clip') for _ in range(batch_dims): take_fn = jax.vmap(take_fn) return take_fn(params, indices) def mask_mean(mask, value, axis=None, drop_mask_channel=False, eps=1e-10): """Masked mean.""" if drop_mask_channel: mask = mask[..., 0] mask_shape = mask.shape value_shape = value.shape assert len(mask_shape) == len(value_shape) if isinstance(axis, numbers.Integral): axis = [axis] elif axis is None: axis = list(range(len(mask_shape))) assert isinstance(axis, collections.abc.Iterable), ( 'axis needs to be either an iterable, integer or "None"') broadcast_factor = 1. for axis_ in axis: value_size = value_shape[axis_] mask_size = mask_shape[axis_] if mask_size == 1: broadcast_factor *= value_size else: assert mask_size == value_size return (jnp.sum(mask * value, axis=axis) / (jnp.sum(mask, axis=axis) * broadcast_factor + eps)) def flat_params_to_haiku(params: Mapping[str, np.ndarray]) -> hk.Params: """Convert a dictionary of NumPy arrays to Haiku parameters.""" hk_params = {} for path, array in params.items(): scope, name = path.split('//') if scope not in hk_params: hk_params[scope] = {} hk_params[scope][name] = jnp.array(array) return hk_params def padding_consistent_rng(f): """Modify any element-wise random function to be consistent with padding. Normally if you take a function like jax.random.normal and generate an array, say of size (10,10), you will get a different set of random numbers to if you add padding and take the first (10,10) sub-array. This function makes a random function that is consistent regardless of the amount of padding added. Note: The padding-consistent function is likely to be slower to compile and run than the function it is wrapping, but these slowdowns are likely to be negligible in a large network. Args: f: Any element-wise function that takes (PRNG key, shape) as the first 2 arguments. Returns: An equivalent function to f, that is now consistent for different amounts of padding. """ def grid_keys(key, shape): """Generate a grid of rng keys that is consistent with different padding. Generate random keys such that the keys will be identical, regardless of how much padding is added to any dimension. Args: key: A PRNG key. shape: The shape of the output array of keys that will be generated. Returns: An array of shape `shape` consisting of random keys. """ if not shape: return key new_keys = jax.vmap(functools.partial(jax.random.fold_in, key))( jnp.arange(shape[0])) return jax.vmap(functools.partial(grid_keys, shape=shape[1:]))(new_keys) def inner(key, shape, **kwargs): keys = grid_keys(key, shape) signature = ( '()->()' if isinstance(keys, jax.random.PRNGKeyArray) else '(2)->()' ) return jnp.vectorize( functools.partial(f, shape=(), **kwargs), signature=signature )(keys) return inner
alphafold-main
alphafold/model/utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A collection of common Haiku modules for use in protein folding.""" import numbers from typing import Union, Sequence import haiku as hk import jax.numpy as jnp import numpy as np # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) TRUNCATED_NORMAL_STDDEV_FACTOR = np.asarray(.87962566103423978, dtype=np.float32) def get_initializer_scale(initializer_name, input_shape): """Get Initializer for weights and scale to multiply activations by.""" if initializer_name == 'zeros': w_init = hk.initializers.Constant(0.0) else: # fan-in scaling scale = 1. for channel_dim in input_shape: scale /= channel_dim if initializer_name == 'relu': scale *= 2 noise_scale = scale stddev = np.sqrt(noise_scale) # Adjust stddev for truncation. stddev = stddev / TRUNCATED_NORMAL_STDDEV_FACTOR w_init = hk.initializers.TruncatedNormal(mean=0.0, stddev=stddev) return w_init class Linear(hk.Module): """Protein folding specific Linear module. This differs from the standard Haiku Linear in a few ways: * It supports inputs and outputs of arbitrary rank * Initializers are specified by strings """ def __init__(self, num_output: Union[int, Sequence[int]], initializer: str = 'linear', num_input_dims: int = 1, use_bias: bool = True, bias_init: float = 0., precision = None, name: str = 'linear'): """Constructs Linear Module. Args: num_output: Number of output channels. Can be tuple when outputting multiple dimensions. initializer: What initializer to use, should be one of {'linear', 'relu', 'zeros'} num_input_dims: Number of dimensions from the end to project. use_bias: Whether to include trainable bias bias_init: Value used to initialize bias. precision: What precision to use for matrix multiplication, defaults to None. name: Name of module, used for name scopes. """ super().__init__(name=name) if isinstance(num_output, numbers.Integral): self.output_shape = (num_output,) else: self.output_shape = tuple(num_output) self.initializer = initializer self.use_bias = use_bias self.bias_init = bias_init self.num_input_dims = num_input_dims self.num_output_dims = len(self.output_shape) self.precision = precision def __call__(self, inputs): """Connects Module. Args: inputs: Tensor with at least num_input_dims dimensions. Returns: output of shape [...] + num_output. """ num_input_dims = self.num_input_dims if self.num_input_dims > 0: in_shape = inputs.shape[-self.num_input_dims:] else: in_shape = () weight_init = get_initializer_scale(self.initializer, in_shape) in_letters = 'abcde'[:self.num_input_dims] out_letters = 'hijkl'[:self.num_output_dims] weight_shape = in_shape + self.output_shape weights = hk.get_parameter('weights', weight_shape, inputs.dtype, weight_init) equation = f'...{in_letters}, {in_letters}{out_letters}->...{out_letters}' output = jnp.einsum(equation, inputs, weights, precision=self.precision) if self.use_bias: bias = hk.get_parameter('bias', self.output_shape, inputs.dtype, hk.initializers.Constant(self.bias_init)) output += bias return output class LayerNorm(hk.LayerNorm): """LayerNorm module. Equivalent to hk.LayerNorm but with different parameter shapes: they are always vectors rather than possibly higher-rank tensors. This makes it easier to change the layout whilst keep the model weight-compatible. """ def __init__(self, axis, create_scale: bool, create_offset: bool, eps: float = 1e-5, scale_init=None, offset_init=None, use_fast_variance: bool = False, name=None, param_axis=None): super().__init__( axis=axis, create_scale=False, create_offset=False, eps=eps, scale_init=None, offset_init=None, use_fast_variance=use_fast_variance, name=name, param_axis=param_axis) self._temp_create_scale = create_scale self._temp_create_offset = create_offset def __call__(self, x: jnp.ndarray) -> jnp.ndarray: is_bf16 = (x.dtype == jnp.bfloat16) if is_bf16: x = x.astype(jnp.float32) param_axis = self.param_axis[0] if self.param_axis else -1 param_shape = (x.shape[param_axis],) param_broadcast_shape = [1] * x.ndim param_broadcast_shape[param_axis] = x.shape[param_axis] scale = None offset = None if self._temp_create_scale: scale = hk.get_parameter( 'scale', param_shape, x.dtype, init=self.scale_init) scale = scale.reshape(param_broadcast_shape) if self._temp_create_offset: offset = hk.get_parameter( 'offset', param_shape, x.dtype, init=self.offset_init) offset = offset.reshape(param_broadcast_shape) out = super().__call__(x, scale=scale, offset=offset) if is_bf16: out = out.astype(jnp.bfloat16) return out
alphafold-main
alphafold/model/common_modules.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Modules and code used in the core part of AlphaFold. The structure generation code is in 'folding.py'. """ import functools from alphafold.common import residue_constants from alphafold.model import all_atom from alphafold.model import common_modules from alphafold.model import folding from alphafold.model import layer_stack from alphafold.model import lddt from alphafold.model import mapping from alphafold.model import prng from alphafold.model import quat_affine from alphafold.model import utils import haiku as hk import jax import jax.numpy as jnp _SOFTMAX_MASK = -1e9 def softmax_cross_entropy(logits, labels): """Computes softmax cross entropy given logits and one-hot class labels.""" loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1) return jnp.asarray(loss) def sigmoid_cross_entropy(logits, labels): """Computes sigmoid cross entropy given logits and multiple class labels.""" log_p = jax.nn.log_sigmoid(logits) # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter is more numerically stable log_not_p = jax.nn.log_sigmoid(-logits) loss = -labels * log_p - (1. - labels) * log_not_p return jnp.asarray(loss) def apply_dropout(*, tensor, safe_key, rate, is_training, broadcast_dim=None): """Applies dropout to a tensor.""" if is_training and rate != 0.0: shape = list(tensor.shape) if broadcast_dim is not None: shape[broadcast_dim] = 1 keep_rate = 1.0 - rate keep = jax.random.bernoulli(safe_key.get(), keep_rate, shape=shape) return keep * tensor / keep_rate else: return tensor def dropout_wrapper(module, input_act, mask, safe_key, global_config, output_act=None, is_training=True, **kwargs): """Applies module + dropout + residual update.""" if output_act is None: output_act = input_act gc = global_config residual = module(input_act, mask, is_training=is_training, **kwargs) dropout_rate = 0.0 if gc.deterministic else module.config.dropout_rate # Will override `is_training` to True if want to use dropout. should_apply_dropout = True if gc.eval_dropout else is_training if module.config.shared_dropout: if module.config.orientation == 'per_row': broadcast_dim = 0 else: broadcast_dim = 1 else: broadcast_dim = None residual = apply_dropout(tensor=residual, safe_key=safe_key, rate=dropout_rate, is_training=should_apply_dropout, broadcast_dim=broadcast_dim) new_act = output_act + residual return new_act def create_extra_msa_feature(batch): """Expand extra_msa into 1hot and concat with other extra msa features. We do this as late as possible as the one_hot extra msa can be very large. Arguments: batch: a dictionary with the following keys: * 'extra_msa': [N_extra_seq, N_res] MSA that wasn't selected as a cluster centre. Note, that this is not one-hot encoded. * 'extra_has_deletion': [N_extra_seq, N_res] Whether there is a deletion to the left of each position in the extra MSA. * 'extra_deletion_value': [N_extra_seq, N_res] The number of deletions to the left of each position in the extra MSA. Returns: Concatenated tensor of extra MSA features. """ # 23 = 20 amino acids + 'X' for unknown + gap + bert mask msa_1hot = jax.nn.one_hot(batch['extra_msa'], 23) msa_feat = [msa_1hot, jnp.expand_dims(batch['extra_has_deletion'], axis=-1), jnp.expand_dims(batch['extra_deletion_value'], axis=-1)] return jnp.concatenate(msa_feat, axis=-1) class AlphaFoldIteration(hk.Module): """A single recycling iteration of AlphaFold architecture. Computes ensembled (averaged) representations from the provided features. These representations are then passed to the various heads that have been requested by the configuration file. Each head also returns a loss which is combined as a weighted sum to produce the total loss. Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 3-22 """ def __init__(self, config, global_config, name='alphafold_iteration'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, ensembled_batch, non_ensembled_batch, is_training, compute_loss=False, ensemble_representations=False, return_representations=False): num_ensemble = jnp.asarray(ensembled_batch['seq_length'].shape[0]) if not ensemble_representations: assert ensembled_batch['seq_length'].shape[0] == 1 def slice_batch(i): b = {k: v[i] for k, v in ensembled_batch.items()} b.update(non_ensembled_batch) return b # Compute representations for each batch element and average. evoformer_module = EmbeddingsAndEvoformer( self.config.embeddings_and_evoformer, self.global_config) batch0 = slice_batch(0) representations = evoformer_module(batch0, is_training) # MSA representations are not ensembled so # we don't pass tensor into the loop. msa_representation = representations['msa'] del representations['msa'] # Average the representations (except MSA) over the batch dimension. if ensemble_representations: def body(x): """Add one element to the representations ensemble.""" i, current_representations = x feats = slice_batch(i) representations_update = evoformer_module( feats, is_training) new_representations = {} for k in current_representations: new_representations[k] = ( current_representations[k] + representations_update[k]) return i+1, new_representations if hk.running_init(): # When initializing the Haiku module, run one iteration of the # while_loop to initialize the Haiku modules used in `body`. _, representations = body((1, representations)) else: _, representations = hk.while_loop( lambda x: x[0] < num_ensemble, body, (1, representations)) for k in representations: if k != 'msa': representations[k] /= num_ensemble.astype(representations[k].dtype) representations['msa'] = msa_representation batch = batch0 # We are not ensembled from here on. heads = {} for head_name, head_config in sorted(self.config.heads.items()): if not head_config.weight: continue # Do not instantiate zero-weight heads. head_factory = { 'masked_msa': MaskedMsaHead, 'distogram': DistogramHead, 'structure_module': functools.partial( folding.StructureModule, compute_loss=compute_loss), 'predicted_lddt': PredictedLDDTHead, 'predicted_aligned_error': PredictedAlignedErrorHead, 'experimentally_resolved': ExperimentallyResolvedHead, }[head_name] heads[head_name] = (head_config, head_factory(head_config, self.global_config)) total_loss = 0. ret = {} ret['representations'] = representations def loss(module, head_config, ret, name, filter_ret=True): if filter_ret: value = ret[name] else: value = ret loss_output = module.loss(value, batch) ret[name].update(loss_output) loss = head_config.weight * ret[name]['loss'] return loss for name, (head_config, module) in heads.items(): # Skip PredictedLDDTHead and PredictedAlignedErrorHead until # StructureModule is executed. if name in ('predicted_lddt', 'predicted_aligned_error'): continue else: ret[name] = module(representations, batch, is_training) if 'representations' in ret[name]: # Extra representations from the head. Used by the structure module # to provide activations for the PredictedLDDTHead. representations.update(ret[name].pop('representations')) if compute_loss: total_loss += loss(module, head_config, ret, name) if self.config.heads.get('predicted_lddt.weight', 0.0): # Add PredictedLDDTHead after StructureModule executes. name = 'predicted_lddt' # Feed all previous results to give access to structure_module result. head_config, module = heads[name] ret[name] = module(representations, batch, is_training) if compute_loss: total_loss += loss(module, head_config, ret, name, filter_ret=False) if ('predicted_aligned_error' in self.config.heads and self.config.heads.get('predicted_aligned_error.weight', 0.0)): # Add PredictedAlignedErrorHead after StructureModule executes. name = 'predicted_aligned_error' # Feed all previous results to give access to structure_module result. head_config, module = heads[name] ret[name] = module(representations, batch, is_training) if compute_loss: total_loss += loss(module, head_config, ret, name, filter_ret=False) if compute_loss: return ret, total_loss else: return ret class AlphaFold(hk.Module): """AlphaFold model with recycling. Jumper et al. (2021) Suppl. Alg. 2 "Inference" """ def __init__(self, config, name='alphafold'): super().__init__(name=name) self.config = config self.global_config = config.global_config def __call__( self, batch, is_training, compute_loss=False, ensemble_representations=False, return_representations=False): """Run the AlphaFold model. Arguments: batch: Dictionary with inputs to the AlphaFold model. is_training: Whether the system is in training or inference mode. compute_loss: Whether to compute losses (requires extra features to be present in the batch and knowing the true structure). ensemble_representations: Whether to use ensembling of representations. return_representations: Whether to also return the intermediate representations. Returns: When compute_loss is True: a tuple of loss and output of AlphaFoldIteration. When compute_loss is False: just output of AlphaFoldIteration. The output of AlphaFoldIteration is a nested dictionary containing predictions from the various heads. """ impl = AlphaFoldIteration(self.config, self.global_config) batch_size, num_residues = batch['aatype'].shape def get_prev(ret): new_prev = { 'prev_pos': ret['structure_module']['final_atom_positions'], 'prev_msa_first_row': ret['representations']['msa_first_row'], 'prev_pair': ret['representations']['pair'], } return jax.tree_map(jax.lax.stop_gradient, new_prev) def do_call(prev, recycle_idx, compute_loss=compute_loss): if self.config.resample_msa_in_recycling: num_ensemble = batch_size // (self.config.num_recycle + 1) def slice_recycle_idx(x): start = recycle_idx * num_ensemble size = num_ensemble return jax.lax.dynamic_slice_in_dim(x, start, size, axis=0) ensembled_batch = jax.tree_map(slice_recycle_idx, batch) else: num_ensemble = batch_size ensembled_batch = batch non_ensembled_batch = jax.tree_map(lambda x: x, prev) return impl( ensembled_batch=ensembled_batch, non_ensembled_batch=non_ensembled_batch, is_training=is_training, compute_loss=compute_loss, ensemble_representations=ensemble_representations) prev = {} emb_config = self.config.embeddings_and_evoformer if emb_config.recycle_pos: prev['prev_pos'] = jnp.zeros( [num_residues, residue_constants.atom_type_num, 3]) if emb_config.recycle_features: prev['prev_msa_first_row'] = jnp.zeros( [num_residues, emb_config.msa_channel]) prev['prev_pair'] = jnp.zeros( [num_residues, num_residues, emb_config.pair_channel]) if self.config.num_recycle: if 'num_iter_recycling' in batch: # Training time: num_iter_recycling is in batch. # The value for each ensemble batch is the same, so arbitrarily taking # 0-th. num_iter = batch['num_iter_recycling'][0] # Add insurance that we will not run more # recyclings than the model is configured to run. num_iter = jnp.minimum(num_iter, self.config.num_recycle) else: # Eval mode or tests: use the maximum number of iterations. num_iter = self.config.num_recycle body = lambda x: (x[0] + 1, # pylint: disable=g-long-lambda get_prev(do_call(x[1], recycle_idx=x[0], compute_loss=False))) if hk.running_init(): # When initializing the Haiku module, run one iteration of the # while_loop to initialize the Haiku modules used in `body`. _, prev = body((0, prev)) else: _, prev = hk.while_loop( lambda x: x[0] < num_iter, body, (0, prev)) else: num_iter = 0 ret = do_call(prev=prev, recycle_idx=num_iter) if compute_loss: ret = ret[0], [ret[1]] if not return_representations: del (ret[0] if compute_loss else ret)['representations'] # pytype: disable=unsupported-operands return ret class TemplatePairStack(hk.Module): """Pair stack for the templates. Jumper et al. (2021) Suppl. Alg. 16 "TemplatePairStack" """ def __init__(self, config, global_config, name='template_pair_stack'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, pair_act, pair_mask, is_training, safe_key=None): """Builds TemplatePairStack module. Arguments: pair_act: Pair activations for single template, shape [N_res, N_res, c_t]. pair_mask: Pair mask, shape [N_res, N_res]. is_training: Whether the module is in training mode. safe_key: Safe key object encapsulating the random number generation key. Returns: Updated pair_act, shape [N_res, N_res, c_t]. """ if safe_key is None: safe_key = prng.SafeKey(hk.next_rng_key()) gc = self.global_config c = self.config if not c.num_block: return pair_act def block(x): """One block of the template pair stack.""" pair_act, safe_key = x dropout_wrapper_fn = functools.partial( dropout_wrapper, is_training=is_training, global_config=gc) safe_key, *sub_keys = safe_key.split(6) sub_keys = iter(sub_keys) pair_act = dropout_wrapper_fn( TriangleAttention(c.triangle_attention_starting_node, gc, name='triangle_attention_starting_node'), pair_act, pair_mask, next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleAttention(c.triangle_attention_ending_node, gc, name='triangle_attention_ending_node'), pair_act, pair_mask, next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleMultiplication(c.triangle_multiplication_outgoing, gc, name='triangle_multiplication_outgoing'), pair_act, pair_mask, next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleMultiplication(c.triangle_multiplication_incoming, gc, name='triangle_multiplication_incoming'), pair_act, pair_mask, next(sub_keys)) pair_act = dropout_wrapper_fn( Transition(c.pair_transition, gc, name='pair_transition'), pair_act, pair_mask, next(sub_keys)) return pair_act, safe_key if gc.use_remat: block = hk.remat(block) res_stack = layer_stack.layer_stack(c.num_block)(block) pair_act, safe_key = res_stack((pair_act, safe_key)) return pair_act class Transition(hk.Module): """Transition layer. Jumper et al. (2021) Suppl. Alg. 9 "MSATransition" Jumper et al. (2021) Suppl. Alg. 15 "PairTransition" """ def __init__(self, config, global_config, name='transition_block'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, act, mask, is_training=True): """Builds Transition module. Arguments: act: A tensor of queries of size [batch_size, N_res, N_channel]. mask: A tensor denoting the mask of size [batch_size, N_res]. is_training: Whether the module is in training mode. Returns: A float32 tensor of size [batch_size, N_res, N_channel]. """ _, _, nc = act.shape num_intermediate = int(nc * self.config.num_intermediate_factor) mask = jnp.expand_dims(mask, axis=-1) act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='input_layer_norm')( act) transition_module = hk.Sequential([ common_modules.Linear( num_intermediate, initializer='relu', name='transition1'), jax.nn.relu, common_modules.Linear( nc, initializer=utils.final_init(self.global_config), name='transition2') ]) act = mapping.inference_subbatch( transition_module, self.global_config.subbatch_size, batched_args=[act], nonbatched_args=[], low_memory=not is_training) return act def glorot_uniform(): return hk.initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform') class Attention(hk.Module): """Multihead attention.""" def __init__(self, config, global_config, output_dim, name='attention'): super().__init__(name=name) self.config = config self.global_config = global_config self.output_dim = output_dim def __call__(self, q_data, m_data, mask, nonbatched_bias=None): """Builds Attention module. Arguments: q_data: A tensor of queries, shape [batch_size, N_queries, q_channels]. m_data: A tensor of memories from which the keys and values are projected, shape [batch_size, N_keys, m_channels]. mask: A mask for the attention, shape [batch_size, N_queries, N_keys]. nonbatched_bias: Shared bias, shape [N_queries, N_keys]. Returns: A float32 tensor of shape [batch_size, N_queries, output_dim]. """ # Sensible default for when the config keys are missing key_dim = self.config.get('key_dim', int(q_data.shape[-1])) value_dim = self.config.get('value_dim', int(m_data.shape[-1])) num_head = self.config.num_head assert key_dim % num_head == 0 assert value_dim % num_head == 0 key_dim = key_dim // num_head value_dim = value_dim // num_head q_weights = hk.get_parameter( 'query_w', shape=(q_data.shape[-1], num_head, key_dim), dtype=q_data.dtype, init=glorot_uniform()) k_weights = hk.get_parameter( 'key_w', shape=(m_data.shape[-1], num_head, key_dim), dtype=q_data.dtype, init=glorot_uniform()) v_weights = hk.get_parameter( 'value_w', shape=(m_data.shape[-1], num_head, value_dim), dtype=q_data.dtype, init=glorot_uniform()) q = jnp.einsum('bqa,ahc->bqhc', q_data, q_weights) * key_dim**(-0.5) k = jnp.einsum('bka,ahc->bkhc', m_data, k_weights) v = jnp.einsum('bka,ahc->bkhc', m_data, v_weights) logits = jnp.einsum('bqhc,bkhc->bhqk', q, k) if nonbatched_bias is not None: logits += jnp.expand_dims(nonbatched_bias, axis=0) logits = jnp.where(mask, logits, _SOFTMAX_MASK) weights = utils.stable_softmax(logits) weighted_avg = jnp.einsum('bhqk,bkhc->bqhc', weights, v) if self.global_config.zero_init: init = hk.initializers.Constant(0.0) else: init = glorot_uniform() if self.config.gating: gating_weights = hk.get_parameter( 'gating_w', shape=(q_data.shape[-1], num_head, value_dim), dtype=q_data.dtype, init=hk.initializers.Constant(0.0)) gating_bias = hk.get_parameter( 'gating_b', shape=(num_head, value_dim), dtype=q_data.dtype, init=hk.initializers.Constant(1.0)) gate_values = jnp.einsum('bqc, chv->bqhv', q_data, gating_weights) + gating_bias gate_values = jax.nn.sigmoid(gate_values) weighted_avg *= gate_values o_weights = hk.get_parameter( 'output_w', shape=(num_head, value_dim, self.output_dim), dtype=q_data.dtype, init=init) o_bias = hk.get_parameter( 'output_b', shape=(self.output_dim,), dtype=q_data.dtype, init=hk.initializers.Constant(0.0)) output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias return output class GlobalAttention(hk.Module): """Global attention. Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" lines 2-7 """ def __init__(self, config, global_config, output_dim, name='attention'): super().__init__(name=name) self.config = config self.global_config = global_config self.output_dim = output_dim def __call__(self, q_data, m_data, q_mask): """Builds GlobalAttention module. Arguments: q_data: A tensor of queries with size [batch_size, N_queries, q_channels] m_data: A tensor of memories from which the keys and values projected. Size [batch_size, N_keys, m_channels] q_mask: A binary mask for q_data with zeros in the padded sequence elements and ones otherwise. Size [batch_size, N_queries, q_channels] (or broadcastable to this shape). Returns: A float32 tensor of size [batch_size, N_queries, output_dim]. """ # Sensible default for when the config keys are missing key_dim = self.config.get('key_dim', int(q_data.shape[-1])) value_dim = self.config.get('value_dim', int(m_data.shape[-1])) num_head = self.config.num_head assert key_dim % num_head == 0 assert value_dim % num_head == 0 key_dim = key_dim // num_head value_dim = value_dim // num_head q_weights = hk.get_parameter( 'query_w', shape=(q_data.shape[-1], num_head, key_dim), dtype=q_data.dtype, init=glorot_uniform()) k_weights = hk.get_parameter( 'key_w', shape=(m_data.shape[-1], key_dim), dtype=q_data.dtype, init=glorot_uniform()) v_weights = hk.get_parameter( 'value_w', shape=(m_data.shape[-1], value_dim), dtype=q_data.dtype, init=glorot_uniform()) v = jnp.einsum('bka,ac->bkc', m_data, v_weights) q_avg = utils.mask_mean(q_mask, q_data, axis=1) q = jnp.einsum('ba,ahc->bhc', q_avg, q_weights) * key_dim**(-0.5) k = jnp.einsum('bka,ac->bkc', m_data, k_weights) bias = q_mask[:, None, :, 0] logits = jnp.einsum('bhc,bkc->bhk', q, k) logits = jnp.where(bias, logits, _SOFTMAX_MASK) weights = utils.stable_softmax(logits) weighted_avg = jnp.einsum('bhk,bkc->bhc', weights, v) if self.global_config.zero_init: init = hk.initializers.Constant(0.0) else: init = glorot_uniform() o_weights = hk.get_parameter( 'output_w', shape=(num_head, value_dim, self.output_dim), dtype=q_data.dtype, init=init) o_bias = hk.get_parameter( 'output_b', shape=(self.output_dim,), dtype=q_data.dtype, init=hk.initializers.Constant(0.0)) if self.config.gating: gating_weights = hk.get_parameter( 'gating_w', shape=(q_data.shape[-1], num_head, value_dim), dtype=q_data.dtype, init=hk.initializers.Constant(0.0)) gating_bias = hk.get_parameter( 'gating_b', shape=(num_head, value_dim), dtype=q_data.dtype, init=hk.initializers.Constant(1.0)) gate_values = jnp.einsum('bqc, chv->bqhv', q_data, gating_weights) gate_values = jax.nn.sigmoid(gate_values + gating_bias) weighted_avg = weighted_avg[:, None] * gate_values output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias else: output = jnp.einsum('bhc,hco->bo', weighted_avg, o_weights) + o_bias output = output[:, None] return output class MSARowAttentionWithPairBias(hk.Module): """MSA per-row attention biased by the pair representation. Jumper et al. (2021) Suppl. Alg. 7 "MSARowAttentionWithPairBias" """ def __init__(self, config, global_config, name='msa_row_attention_with_pair_bias'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, msa_act, msa_mask, pair_act, is_training=False): """Builds MSARowAttentionWithPairBias module. Arguments: msa_act: [N_seq, N_res, c_m] MSA representation. msa_mask: [N_seq, N_res] mask of non-padded regions. pair_act: [N_res, N_res, c_z] pair representation. is_training: Whether the module is in training mode. Returns: Update to msa_act, shape [N_seq, N_res, c_m]. """ c = self.config assert len(msa_act.shape) == 3 assert len(msa_mask.shape) == 2 assert c.orientation == 'per_row' mask = msa_mask[:, None, None, :] assert len(mask.shape) == 4 msa_act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='query_norm')( msa_act) pair_act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='feat_2d_norm')( pair_act) init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1])) weights = hk.get_parameter( 'feat_2d_weights', shape=(pair_act.shape[-1], c.num_head), dtype=msa_act.dtype, init=hk.initializers.RandomNormal(stddev=init_factor)) nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights) attn_mod = Attention( c, self.global_config, msa_act.shape[-1]) msa_act = mapping.inference_subbatch( attn_mod, self.global_config.subbatch_size, batched_args=[msa_act, msa_act, mask], nonbatched_args=[nonbatched_bias], low_memory=not is_training) return msa_act class MSAColumnAttention(hk.Module): """MSA per-column attention. Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention" """ def __init__(self, config, global_config, name='msa_column_attention'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, msa_act, msa_mask, is_training=False): """Builds MSAColumnAttention module. Arguments: msa_act: [N_seq, N_res, c_m] MSA representation. msa_mask: [N_seq, N_res] mask of non-padded regions. is_training: Whether the module is in training mode. Returns: Update to msa_act, shape [N_seq, N_res, c_m] """ c = self.config assert len(msa_act.shape) == 3 assert len(msa_mask.shape) == 2 assert c.orientation == 'per_column' msa_act = jnp.swapaxes(msa_act, -2, -3) msa_mask = jnp.swapaxes(msa_mask, -1, -2) mask = msa_mask[:, None, None, :] assert len(mask.shape) == 4 msa_act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='query_norm')( msa_act) attn_mod = Attention( c, self.global_config, msa_act.shape[-1]) msa_act = mapping.inference_subbatch( attn_mod, self.global_config.subbatch_size, batched_args=[msa_act, msa_act, mask], nonbatched_args=[], low_memory=not is_training) msa_act = jnp.swapaxes(msa_act, -2, -3) return msa_act class MSAColumnGlobalAttention(hk.Module): """MSA per-column global attention. Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" """ def __init__(self, config, global_config, name='msa_column_global_attention'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, msa_act, msa_mask, is_training=False): """Builds MSAColumnGlobalAttention module. Arguments: msa_act: [N_seq, N_res, c_m] MSA representation. msa_mask: [N_seq, N_res] mask of non-padded regions. is_training: Whether the module is in training mode. Returns: Update to msa_act, shape [N_seq, N_res, c_m]. """ c = self.config assert len(msa_act.shape) == 3 assert len(msa_mask.shape) == 2 assert c.orientation == 'per_column' msa_act = jnp.swapaxes(msa_act, -2, -3) msa_mask = jnp.swapaxes(msa_mask, -1, -2) msa_act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='query_norm')( msa_act) attn_mod = GlobalAttention( c, self.global_config, msa_act.shape[-1], name='attention') # [N_seq, N_res, 1] msa_mask = jnp.expand_dims(msa_mask, axis=-1) msa_act = mapping.inference_subbatch( attn_mod, self.global_config.subbatch_size, batched_args=[msa_act, msa_act, msa_mask], nonbatched_args=[], low_memory=not is_training) msa_act = jnp.swapaxes(msa_act, -2, -3) return msa_act class TriangleAttention(hk.Module): """Triangle Attention. Jumper et al. (2021) Suppl. Alg. 13 "TriangleAttentionStartingNode" Jumper et al. (2021) Suppl. Alg. 14 "TriangleAttentionEndingNode" """ def __init__(self, config, global_config, name='triangle_attention'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, pair_act, pair_mask, is_training=False): """Builds TriangleAttention module. Arguments: pair_act: [N_res, N_res, c_z] pair activations tensor pair_mask: [N_res, N_res] mask of non-padded regions in the tensor. is_training: Whether the module is in training mode. Returns: Update to pair_act, shape [N_res, N_res, c_z]. """ c = self.config assert len(pair_act.shape) == 3 assert len(pair_mask.shape) == 2 assert c.orientation in ['per_row', 'per_column'] if c.orientation == 'per_column': pair_act = jnp.swapaxes(pair_act, -2, -3) pair_mask = jnp.swapaxes(pair_mask, -1, -2) mask = pair_mask[:, None, None, :] assert len(mask.shape) == 4 pair_act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='query_norm')( pair_act) init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1])) weights = hk.get_parameter( 'feat_2d_weights', shape=(pair_act.shape[-1], c.num_head), dtype=pair_act.dtype, init=hk.initializers.RandomNormal(stddev=init_factor)) nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights) attn_mod = Attention( c, self.global_config, pair_act.shape[-1]) pair_act = mapping.inference_subbatch( attn_mod, self.global_config.subbatch_size, batched_args=[pair_act, pair_act, mask], nonbatched_args=[nonbatched_bias], low_memory=not is_training) if c.orientation == 'per_column': pair_act = jnp.swapaxes(pair_act, -2, -3) return pair_act class MaskedMsaHead(hk.Module): """Head to predict MSA at the masked locations. The MaskedMsaHead employs a BERT-style objective to reconstruct a masked version of the full MSA, based on a linear projection of the MSA representation. Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction" """ def __init__(self, config, global_config, name='masked_msa_head'): super().__init__(name=name) self.config = config self.global_config = global_config if global_config.multimer_mode: self.num_output = len(residue_constants.restypes_with_x_and_gap) else: self.num_output = config.num_output def __call__(self, representations, batch, is_training): """Builds MaskedMsaHead module. Arguments: representations: Dictionary of representations, must contain: * 'msa': MSA representation, shape [N_seq, N_res, c_m]. batch: Batch, unused. is_training: Whether the module is in training mode. Returns: Dictionary containing: * 'logits': logits of shape [N_seq, N_res, N_aatype] with (unnormalized) log probabilies of predicted aatype at position. """ del batch logits = common_modules.Linear( self.num_output, initializer=utils.final_init(self.global_config), name='logits')( representations['msa']) return dict(logits=logits) def loss(self, value, batch): errors = softmax_cross_entropy( labels=jax.nn.one_hot(batch['true_msa'], num_classes=self.num_output), logits=value['logits']) loss = (jnp.sum(errors * batch['bert_mask'], axis=(-2, -1)) / (1e-8 + jnp.sum(batch['bert_mask'], axis=(-2, -1)))) return {'loss': loss} class PredictedLDDTHead(hk.Module): """Head to predict the per-residue LDDT to be used as a confidence measure. Jumper et al. (2021) Suppl. Sec. 1.9.6 "Model confidence prediction (pLDDT)" Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca" """ def __init__(self, config, global_config, name='predicted_lddt_head'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, representations, batch, is_training): """Builds PredictedLDDTHead module. Arguments: representations: Dictionary of representations, must contain: * 'structure_module': Single representation from the structure module, shape [N_res, c_s]. batch: Batch, unused. is_training: Whether the module is in training mode. Returns: Dictionary containing : * 'logits': logits of shape [N_res, N_bins] with (unnormalized) log probabilies of binned predicted lDDT. """ act = representations['structure_module'] act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='input_layer_norm')( act) act = common_modules.Linear( self.config.num_channels, initializer='relu', name='act_0')( act) act = jax.nn.relu(act) act = common_modules.Linear( self.config.num_channels, initializer='relu', name='act_1')( act) act = jax.nn.relu(act) logits = common_modules.Linear( self.config.num_bins, initializer=utils.final_init(self.global_config), name='logits')( act) # Shape (batch_size, num_res, num_bins) return dict(logits=logits) def loss(self, value, batch): # Shape (num_res, 37, 3) pred_all_atom_pos = value['structure_module']['final_atom_positions'] # Shape (num_res, 37, 3) true_all_atom_pos = batch['all_atom_positions'] # Shape (num_res, 37) all_atom_mask = batch['all_atom_mask'] # Shape (num_res,) lddt_ca = lddt.lddt( # Shape (batch_size, num_res, 3) predicted_points=pred_all_atom_pos[None, :, 1, :], # Shape (batch_size, num_res, 3) true_points=true_all_atom_pos[None, :, 1, :], # Shape (batch_size, num_res, 1) true_points_mask=all_atom_mask[None, :, 1:2].astype(jnp.float32), cutoff=15., per_residue=True) lddt_ca = jax.lax.stop_gradient(lddt_ca) num_bins = self.config.num_bins bin_index = jnp.floor(lddt_ca * num_bins).astype(jnp.int32) # protect against out of range for lddt_ca == 1 bin_index = jnp.minimum(bin_index, num_bins - 1) lddt_ca_one_hot = jax.nn.one_hot(bin_index, num_classes=num_bins) # Shape (num_res, num_channel) logits = value['predicted_lddt']['logits'] errors = softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits) # Shape (num_res,) mask_ca = all_atom_mask[:, residue_constants.atom_order['CA']] mask_ca = mask_ca.astype(jnp.float32) loss = jnp.sum(errors * mask_ca) / (jnp.sum(mask_ca) + 1e-8) if self.config.filter_by_resolution: # NMR & distillation have resolution = 0 loss *= ((batch['resolution'] >= self.config.min_resolution) & (batch['resolution'] <= self.config.max_resolution)).astype( jnp.float32) output = {'loss': loss} return output class PredictedAlignedErrorHead(hk.Module): """Head to predict the distance errors in the backbone alignment frames. Can be used to compute predicted TM-Score. Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction" """ def __init__(self, config, global_config, name='predicted_aligned_error_head'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, representations, batch, is_training): """Builds PredictedAlignedErrorHead module. Arguments: representations: Dictionary of representations, must contain: * 'pair': pair representation, shape [N_res, N_res, c_z]. batch: Batch, unused. is_training: Whether the module is in training mode. Returns: Dictionary containing: * logits: logits for aligned error, shape [N_res, N_res, N_bins]. * bin_breaks: array containing bin breaks, shape [N_bins - 1]. """ act = representations['pair'] # Shape (num_res, num_res, num_bins) logits = common_modules.Linear( self.config.num_bins, initializer=utils.final_init(self.global_config), name='logits')(act) # Shape (num_bins,) breaks = jnp.linspace( 0., self.config.max_error_bin, self.config.num_bins - 1) return dict(logits=logits, breaks=breaks) def loss(self, value, batch): # Shape (num_res, 7) predicted_affine = quat_affine.QuatAffine.from_tensor( value['structure_module']['final_affines']) # Shape (num_res, 7) true_affine = quat_affine.QuatAffine.from_tensor( batch['backbone_affine_tensor']) # Shape (num_res) mask = batch['backbone_affine_mask'] # Shape (num_res, num_res) square_mask = mask[:, None] * mask[None, :] num_bins = self.config.num_bins # (1, num_bins - 1) breaks = value['predicted_aligned_error']['breaks'] # (1, num_bins) logits = value['predicted_aligned_error']['logits'] # Compute the squared error for each alignment. def _local_frame_points(affine): points = [jnp.expand_dims(x, axis=-2) for x in affine.translation] return affine.invert_point(points, extra_dims=1) error_dist2_xyz = [ jnp.square(a - b) for a, b in zip(_local_frame_points(predicted_affine), _local_frame_points(true_affine))] error_dist2 = sum(error_dist2_xyz) # Shape (num_res, num_res) # First num_res are alignment frames, second num_res are the residues. error_dist2 = jax.lax.stop_gradient(error_dist2) sq_breaks = jnp.square(breaks) true_bins = jnp.sum(( error_dist2[..., None] > sq_breaks).astype(jnp.int32), axis=-1) errors = softmax_cross_entropy( labels=jax.nn.one_hot(true_bins, num_bins, axis=-1), logits=logits) loss = (jnp.sum(errors * square_mask, axis=(-2, -1)) / (1e-8 + jnp.sum(square_mask, axis=(-2, -1)))) if self.config.filter_by_resolution: # NMR & distillation have resolution = 0 loss *= ((batch['resolution'] >= self.config.min_resolution) & (batch['resolution'] <= self.config.max_resolution)).astype( jnp.float32) output = {'loss': loss} return output class ExperimentallyResolvedHead(hk.Module): """Predicts if an atom is experimentally resolved in a high-res structure. Only trained on high-resolution X-ray crystals & cryo-EM. Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction' """ def __init__(self, config, global_config, name='experimentally_resolved_head'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, representations, batch, is_training): """Builds ExperimentallyResolvedHead module. Arguments: representations: Dictionary of representations, must contain: * 'single': Single representation, shape [N_res, c_s]. batch: Batch, unused. is_training: Whether the module is in training mode. Returns: Dictionary containing: * 'logits': logits of shape [N_res, 37], log probability that an atom is resolved in atom37 representation, can be converted to probability by applying sigmoid. """ logits = common_modules.Linear( 37, # atom_exists.shape[-1] initializer=utils.final_init(self.global_config), name='logits')(representations['single']) return dict(logits=logits) def loss(self, value, batch): logits = value['logits'] assert len(logits.shape) == 2 # Does the atom appear in the amino acid? atom_exists = batch['atom37_atom_exists'] # Is the atom resolved in the experiment? Subset of atom_exists, # *except for OXT* all_atom_mask = batch['all_atom_mask'].astype(jnp.float32) xent = sigmoid_cross_entropy(labels=all_atom_mask, logits=logits) loss = jnp.sum(xent * atom_exists) / (1e-8 + jnp.sum(atom_exists)) if self.config.filter_by_resolution: # NMR & distillation examples have resolution = 0. loss *= ((batch['resolution'] >= self.config.min_resolution) & (batch['resolution'] <= self.config.max_resolution)).astype( jnp.float32) output = {'loss': loss} return output def _layer_norm(axis=-1, name='layer_norm'): return common_modules.LayerNorm( axis=axis, create_scale=True, create_offset=True, eps=1e-5, use_fast_variance=True, scale_init=hk.initializers.Constant(1.), offset_init=hk.initializers.Constant(0.), param_axis=axis, name=name) class TriangleMultiplication(hk.Module): """Triangle multiplication layer ("outgoing" or "incoming"). Jumper et al. (2021) Suppl. Alg. 11 "TriangleMultiplicationOutgoing" Jumper et al. (2021) Suppl. Alg. 12 "TriangleMultiplicationIncoming" """ def __init__(self, config, global_config, name='triangle_multiplication'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, left_act, left_mask, is_training=True): """Builds TriangleMultiplication module. Arguments: left_act: Pair activations, shape [N_res, N_res, c_z] left_mask: Pair mask, shape [N_res, N_res]. is_training: Whether the module is in training mode. Returns: Outputs, same shape/type as left_act. """ del is_training if self.config.fuse_projection_weights: return self._fused_triangle_multiplication(left_act, left_mask) else: return self._triangle_multiplication(left_act, left_mask) @hk.transparent def _triangle_multiplication(self, left_act, left_mask): """Implementation of TriangleMultiplication used in AF2 and AF-M<2.3.""" c = self.config gc = self.global_config mask = left_mask[..., None] act = common_modules.LayerNorm(axis=[-1], create_scale=True, create_offset=True, name='layer_norm_input')(left_act) input_act = act left_projection = common_modules.Linear( c.num_intermediate_channel, name='left_projection') left_proj_act = mask * left_projection(act) right_projection = common_modules.Linear( c.num_intermediate_channel, name='right_projection') right_proj_act = mask * right_projection(act) left_gate_values = jax.nn.sigmoid(common_modules.Linear( c.num_intermediate_channel, bias_init=1., initializer=utils.final_init(gc), name='left_gate')(act)) right_gate_values = jax.nn.sigmoid(common_modules.Linear( c.num_intermediate_channel, bias_init=1., initializer=utils.final_init(gc), name='right_gate')(act)) left_proj_act *= left_gate_values right_proj_act *= right_gate_values # "Outgoing" edges equation: 'ikc,jkc->ijc' # "Incoming" edges equation: 'kjc,kic->ijc' # Note on the Suppl. Alg. 11 & 12 notation: # For the "outgoing" edges, a = left_proj_act and b = right_proj_act # For the "incoming" edges, it's swapped: # b = left_proj_act and a = right_proj_act act = jnp.einsum(c.equation, left_proj_act, right_proj_act) act = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='center_layer_norm')( act) output_channel = int(input_act.shape[-1]) act = common_modules.Linear( output_channel, initializer=utils.final_init(gc), name='output_projection')(act) gate_values = jax.nn.sigmoid(common_modules.Linear( output_channel, bias_init=1., initializer=utils.final_init(gc), name='gating_linear')(input_act)) act *= gate_values return act @hk.transparent def _fused_triangle_multiplication(self, left_act, left_mask): """TriangleMultiplication with fused projection weights.""" mask = left_mask[..., None] c = self.config gc = self.global_config left_act = _layer_norm(axis=-1, name='left_norm_input')(left_act) # Both left and right projections are fused into projection. projection = common_modules.Linear( 2*c.num_intermediate_channel, name='projection') proj_act = mask * projection(left_act) # Both left + right gate are fused into gate_values. gate_values = common_modules.Linear( 2 * c.num_intermediate_channel, name='gate', bias_init=1., initializer=utils.final_init(gc))(left_act) proj_act *= jax.nn.sigmoid(gate_values) left_proj_act = proj_act[:, :, :c.num_intermediate_channel] right_proj_act = proj_act[:, :, c.num_intermediate_channel:] act = jnp.einsum(c.equation, left_proj_act, right_proj_act) act = _layer_norm(axis=-1, name='center_norm')(act) output_channel = int(left_act.shape[-1]) act = common_modules.Linear( output_channel, initializer=utils.final_init(gc), name='output_projection')(act) gate_values = common_modules.Linear( output_channel, bias_init=1., initializer=utils.final_init(gc), name='gating_linear')(left_act) act *= jax.nn.sigmoid(gate_values) return act class DistogramHead(hk.Module): """Head to predict a distogram. Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction" """ def __init__(self, config, global_config, name='distogram_head'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, representations, batch, is_training): """Builds DistogramHead module. Arguments: representations: Dictionary of representations, must contain: * 'pair': pair representation, shape [N_res, N_res, c_z]. batch: Batch, unused. is_training: Whether the module is in training mode. Returns: Dictionary containing: * logits: logits for distogram, shape [N_res, N_res, N_bins]. * bin_breaks: array containing bin breaks, shape [N_bins - 1,]. """ half_logits = common_modules.Linear( self.config.num_bins, initializer=utils.final_init(self.global_config), name='half_logits')( representations['pair']) logits = half_logits + jnp.swapaxes(half_logits, -2, -3) breaks = jnp.linspace(self.config.first_break, self.config.last_break, self.config.num_bins - 1) return dict(logits=logits, bin_edges=breaks) def loss(self, value, batch): return _distogram_log_loss(value['logits'], value['bin_edges'], batch, self.config.num_bins) def _distogram_log_loss(logits, bin_edges, batch, num_bins): """Log loss of a distogram.""" assert len(logits.shape) == 3 positions = batch['pseudo_beta'] mask = batch['pseudo_beta_mask'] assert positions.shape[-1] == 3 sq_breaks = jnp.square(bin_edges) dist2 = jnp.sum( jnp.square( jnp.expand_dims(positions, axis=-2) - jnp.expand_dims(positions, axis=-3)), axis=-1, keepdims=True) true_bins = jnp.sum(dist2 > sq_breaks, axis=-1) errors = softmax_cross_entropy( labels=jax.nn.one_hot(true_bins, num_bins), logits=logits) square_mask = jnp.expand_dims(mask, axis=-2) * jnp.expand_dims(mask, axis=-1) avg_error = ( jnp.sum(errors * square_mask, axis=(-2, -1)) / (1e-6 + jnp.sum(square_mask, axis=(-2, -1)))) dist2 = dist2[..., 0] return dict(loss=avg_error, true_dist=jnp.sqrt(1e-6 + dist2)) class OuterProductMean(hk.Module): """Computes mean outer product. Jumper et al. (2021) Suppl. Alg. 10 "OuterProductMean" """ def __init__(self, config, global_config, num_output_channel, name='outer_product_mean'): super().__init__(name=name) self.global_config = global_config self.config = config self.num_output_channel = num_output_channel def __call__(self, act, mask, is_training=True): """Builds OuterProductMean module. Arguments: act: MSA representation, shape [N_seq, N_res, c_m]. mask: MSA mask, shape [N_seq, N_res]. is_training: Whether the module is in training mode. Returns: Update to pair representation, shape [N_res, N_res, c_z]. """ gc = self.global_config c = self.config mask = mask[..., None] act = common_modules.LayerNorm([-1], True, True, name='layer_norm_input')(act) left_act = mask * common_modules.Linear( c.num_outer_channel, initializer='linear', name='left_projection')( act) right_act = mask * common_modules.Linear( c.num_outer_channel, initializer='linear', name='right_projection')( act) if gc.zero_init: init_w = hk.initializers.Constant(0.0) else: init_w = hk.initializers.VarianceScaling(scale=2., mode='fan_in') output_w = hk.get_parameter( 'output_w', shape=(c.num_outer_channel, c.num_outer_channel, self.num_output_channel), dtype=act.dtype, init=init_w) output_b = hk.get_parameter( 'output_b', shape=(self.num_output_channel,), dtype=act.dtype, init=hk.initializers.Constant(0.0)) def compute_chunk(left_act): # This is equivalent to # # act = jnp.einsum('abc,ade->dceb', left_act, right_act) # act = jnp.einsum('dceb,cef->bdf', act, output_w) + output_b # # but faster. left_act = jnp.transpose(left_act, [0, 2, 1]) act = jnp.einsum('acb,ade->dceb', left_act, right_act) act = jnp.einsum('dceb,cef->dbf', act, output_w) + output_b return jnp.transpose(act, [1, 0, 2]) act = mapping.inference_subbatch( compute_chunk, c.chunk_size, batched_args=[left_act], nonbatched_args=[], low_memory=True, input_subbatch_dim=1, output_subbatch_dim=0) epsilon = 1e-3 norm = jnp.einsum('abc,adc->bdc', mask, mask) act /= epsilon + norm return act def dgram_from_positions(positions, num_bins, min_bin, max_bin): """Compute distogram from amino acid positions. Arguments: positions: [N_res, 3] Position coordinates. num_bins: The number of bins in the distogram. min_bin: The left edge of the first bin. max_bin: The left edge of the final bin. The final bin catches everything larger than `max_bin`. Returns: Distogram with the specified number of bins. """ def squared_difference(x, y): return jnp.square(x - y) lower_breaks = jnp.linspace(min_bin, max_bin, num_bins) lower_breaks = jnp.square(lower_breaks) upper_breaks = jnp.concatenate([lower_breaks[1:], jnp.array([1e8], dtype=jnp.float32)], axis=-1) dist2 = jnp.sum( squared_difference( jnp.expand_dims(positions, axis=-2), jnp.expand_dims(positions, axis=-3)), axis=-1, keepdims=True) dgram = ((dist2 > lower_breaks).astype(jnp.float32) * (dist2 < upper_breaks).astype(jnp.float32)) return dgram def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): """Create pseudo beta features.""" is_gly = jnp.equal(aatype, residue_constants.restype_order['G']) ca_idx = residue_constants.atom_order['CA'] cb_idx = residue_constants.atom_order['CB'] pseudo_beta = jnp.where( jnp.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :]) if all_atom_masks is not None: pseudo_beta_mask = jnp.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) pseudo_beta_mask = pseudo_beta_mask.astype(jnp.float32) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta class EvoformerIteration(hk.Module): """Single iteration (block) of Evoformer stack. Jumper et al. (2021) Suppl. Alg. 6 "EvoformerStack" lines 2-10 """ def __init__(self, config, global_config, is_extra_msa, name='evoformer_iteration'): super().__init__(name=name) self.config = config self.global_config = global_config self.is_extra_msa = is_extra_msa def __call__(self, activations, masks, is_training=True, safe_key=None): """Builds EvoformerIteration module. Arguments: activations: Dictionary containing activations: * 'msa': MSA activations, shape [N_seq, N_res, c_m]. * 'pair': pair activations, shape [N_res, N_res, c_z]. masks: Dictionary of masks: * 'msa': MSA mask, shape [N_seq, N_res]. * 'pair': pair mask, shape [N_res, N_res]. is_training: Whether the module is in training mode. safe_key: prng.SafeKey encapsulating rng key. Returns: Outputs, same shape/type as act. """ c = self.config gc = self.global_config msa_act, pair_act = activations['msa'], activations['pair'] if safe_key is None: safe_key = prng.SafeKey(hk.next_rng_key()) msa_mask, pair_mask = masks['msa'], masks['pair'] dropout_wrapper_fn = functools.partial( dropout_wrapper, is_training=is_training, global_config=gc) safe_key, *sub_keys = safe_key.split(10) sub_keys = iter(sub_keys) outer_module = OuterProductMean( config=c.outer_product_mean, global_config=self.global_config, num_output_channel=int(pair_act.shape[-1]), name='outer_product_mean') if c.outer_product_mean.first: pair_act = dropout_wrapper_fn( outer_module, msa_act, msa_mask, safe_key=next(sub_keys), output_act=pair_act) msa_act = dropout_wrapper_fn( MSARowAttentionWithPairBias( c.msa_row_attention_with_pair_bias, gc, name='msa_row_attention_with_pair_bias'), msa_act, msa_mask, safe_key=next(sub_keys), pair_act=pair_act) if not self.is_extra_msa: attn_mod = MSAColumnAttention( c.msa_column_attention, gc, name='msa_column_attention') else: attn_mod = MSAColumnGlobalAttention( c.msa_column_attention, gc, name='msa_column_global_attention') msa_act = dropout_wrapper_fn( attn_mod, msa_act, msa_mask, safe_key=next(sub_keys)) msa_act = dropout_wrapper_fn( Transition(c.msa_transition, gc, name='msa_transition'), msa_act, msa_mask, safe_key=next(sub_keys)) if not c.outer_product_mean.first: pair_act = dropout_wrapper_fn( outer_module, msa_act, msa_mask, safe_key=next(sub_keys), output_act=pair_act) pair_act = dropout_wrapper_fn( TriangleMultiplication(c.triangle_multiplication_outgoing, gc, name='triangle_multiplication_outgoing'), pair_act, pair_mask, safe_key=next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleMultiplication(c.triangle_multiplication_incoming, gc, name='triangle_multiplication_incoming'), pair_act, pair_mask, safe_key=next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleAttention(c.triangle_attention_starting_node, gc, name='triangle_attention_starting_node'), pair_act, pair_mask, safe_key=next(sub_keys)) pair_act = dropout_wrapper_fn( TriangleAttention(c.triangle_attention_ending_node, gc, name='triangle_attention_ending_node'), pair_act, pair_mask, safe_key=next(sub_keys)) pair_act = dropout_wrapper_fn( Transition(c.pair_transition, gc, name='pair_transition'), pair_act, pair_mask, safe_key=next(sub_keys)) return {'msa': msa_act, 'pair': pair_act} class EmbeddingsAndEvoformer(hk.Module): """Embeds the input data and runs Evoformer. Produces the MSA, single and pair representations. Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5-18 """ def __init__(self, config, global_config, name='evoformer'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, batch, is_training, safe_key=None): c = self.config gc = self.global_config if safe_key is None: safe_key = prng.SafeKey(hk.next_rng_key()) # Embed clustered MSA. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5 # Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder" preprocess_1d = common_modules.Linear( c.msa_channel, name='preprocess_1d')( batch['target_feat']) preprocess_msa = common_modules.Linear( c.msa_channel, name='preprocess_msa')( batch['msa_feat']) msa_activations = jnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa left_single = common_modules.Linear( c.pair_channel, name='left_single')( batch['target_feat']) right_single = common_modules.Linear( c.pair_channel, name='right_single')( batch['target_feat']) pair_activations = left_single[:, None] + right_single[None] mask_2d = batch['seq_mask'][:, None] * batch['seq_mask'][None, :] # Inject previous outputs for recycling. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6 # Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder" if c.recycle_pos: prev_pseudo_beta = pseudo_beta_fn( batch['aatype'], batch['prev_pos'], None) dgram = dgram_from_positions(prev_pseudo_beta, **self.config.prev_pos) pair_activations += common_modules.Linear( c.pair_channel, name='prev_pos_linear')( dgram) if c.recycle_features: prev_msa_first_row = common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='prev_msa_first_row_norm')( batch['prev_msa_first_row']) msa_activations = msa_activations.at[0].add(prev_msa_first_row) pair_activations += common_modules.LayerNorm( axis=[-1], create_scale=True, create_offset=True, name='prev_pair_norm')( batch['prev_pair']) # Relative position encoding. # Jumper et al. (2021) Suppl. Alg. 4 "relpos" # Jumper et al. (2021) Suppl. Alg. 5 "one_hot" if c.max_relative_feature: # Add one-hot-encoded clipped residue distances to the pair activations. pos = batch['residue_index'] offset = pos[:, None] - pos[None, :] rel_pos = jax.nn.one_hot( jnp.clip( offset + c.max_relative_feature, a_min=0, a_max=2 * c.max_relative_feature), 2 * c.max_relative_feature + 1) pair_activations += common_modules.Linear( c.pair_channel, name='pair_activiations')( rel_pos) # Embed templates into the pair activations. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13 if c.template.enabled: template_batch = {k: batch[k] for k in batch if k.startswith('template_')} template_pair_representation = TemplateEmbedding(c.template, gc)( pair_activations, template_batch, mask_2d, is_training=is_training) pair_activations += template_pair_representation # Embed extra MSA features. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16 extra_msa_feat = create_extra_msa_feature(batch) extra_msa_activations = common_modules.Linear( c.extra_msa_channel, name='extra_msa_activations')( extra_msa_feat) # Extra MSA Stack. # Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack" extra_msa_stack_input = { 'msa': extra_msa_activations, 'pair': pair_activations, } extra_msa_stack_iteration = EvoformerIteration( c.evoformer, gc, is_extra_msa=True, name='extra_msa_stack') def extra_msa_stack_fn(x): act, safe_key = x safe_key, safe_subkey = safe_key.split() extra_evoformer_output = extra_msa_stack_iteration( activations=act, masks={ 'msa': batch['extra_msa_mask'], 'pair': mask_2d }, is_training=is_training, safe_key=safe_subkey) return (extra_evoformer_output, safe_key) if gc.use_remat: extra_msa_stack_fn = hk.remat(extra_msa_stack_fn) extra_msa_stack = layer_stack.layer_stack( c.extra_msa_stack_num_block)( extra_msa_stack_fn) extra_msa_output, safe_key = extra_msa_stack( (extra_msa_stack_input, safe_key)) pair_activations = extra_msa_output['pair'] evoformer_input = { 'msa': msa_activations, 'pair': pair_activations, } evoformer_masks = {'msa': batch['msa_mask'], 'pair': mask_2d} # Append num_templ rows to msa_activations with template embeddings. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 7-8 if c.template.enabled and c.template.embed_torsion_angles: num_templ, num_res = batch['template_aatype'].shape # Embed the templates aatypes. aatype_one_hot = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1) # Embed the templates aatype, torsion angles and masks. # Shape (templates, residues, msa_channels) ret = all_atom.atom37_to_torsion_angles( aatype=batch['template_aatype'], all_atom_pos=batch['template_all_atom_positions'], all_atom_mask=batch['template_all_atom_masks'], # Ensure consistent behaviour during testing: placeholder_for_undefined=not gc.zero_init) template_features = jnp.concatenate([ aatype_one_hot, jnp.reshape( ret['torsion_angles_sin_cos'], [num_templ, num_res, 14]), jnp.reshape( ret['alt_torsion_angles_sin_cos'], [num_templ, num_res, 14]), ret['torsion_angles_mask']], axis=-1) template_activations = common_modules.Linear( c.msa_channel, initializer='relu', name='template_single_embedding')( template_features) template_activations = jax.nn.relu(template_activations) template_activations = common_modules.Linear( c.msa_channel, initializer='relu', name='template_projection')( template_activations) # Concatenate the templates to the msa. evoformer_input['msa'] = jnp.concatenate( [evoformer_input['msa'], template_activations], axis=0) # Concatenate templates masks to the msa masks. # Use mask from the psi angle, as it only depends on the backbone atoms # from a single residue. torsion_angle_mask = ret['torsion_angles_mask'][:, :, 2] torsion_angle_mask = torsion_angle_mask.astype( evoformer_masks['msa'].dtype) evoformer_masks['msa'] = jnp.concatenate( [evoformer_masks['msa'], torsion_angle_mask], axis=0) # Main trunk of the network # Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18 evoformer_iteration = EvoformerIteration( c.evoformer, gc, is_extra_msa=False, name='evoformer_iteration') def evoformer_fn(x): act, safe_key = x safe_key, safe_subkey = safe_key.split() evoformer_output = evoformer_iteration( activations=act, masks=evoformer_masks, is_training=is_training, safe_key=safe_subkey) return (evoformer_output, safe_key) if gc.use_remat: evoformer_fn = hk.remat(evoformer_fn) evoformer_stack = layer_stack.layer_stack(c.evoformer_num_block)( evoformer_fn) evoformer_output, safe_key = evoformer_stack( (evoformer_input, safe_key)) msa_activations = evoformer_output['msa'] pair_activations = evoformer_output['pair'] single_activations = common_modules.Linear( c.seq_channel, name='single_activations')( msa_activations[0]) num_sequences = batch['msa_feat'].shape[0] output = { 'single': single_activations, 'pair': pair_activations, # Crop away template rows such that they are not used in MaskedMsaHead. 'msa': msa_activations[:num_sequences, :, :], 'msa_first_row': msa_activations[0], } return output class SingleTemplateEmbedding(hk.Module): """Embeds a single template. Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9+11 """ def __init__(self, config, global_config, name='single_template_embedding'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, query_embedding, batch, mask_2d, is_training): """Build the single template embedding. Arguments: query_embedding: Query pair representation, shape [N_res, N_res, c_z]. batch: A batch of template features (note the template dimension has been stripped out as this module only runs over a single template). mask_2d: Padding mask (Note: this doesn't care if a template exists, unlike the template_pseudo_beta_mask). is_training: Whether the module is in training mode. Returns: A template embedding [N_res, N_res, c_z]. """ assert mask_2d.dtype == query_embedding.dtype dtype = query_embedding.dtype num_res = batch['template_aatype'].shape[0] num_channels = (self.config.template_pair_stack .triangle_attention_ending_node.value_dim) template_mask = batch['template_pseudo_beta_mask'] template_mask_2d = template_mask[:, None] * template_mask[None, :] template_mask_2d = template_mask_2d.astype(dtype) template_dgram = dgram_from_positions(batch['template_pseudo_beta'], **self.config.dgram_features) template_dgram = template_dgram.astype(dtype) to_concat = [template_dgram, template_mask_2d[:, :, None]] aatype = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1, dtype=dtype) to_concat.append(jnp.tile(aatype[None, :, :], [num_res, 1, 1])) to_concat.append(jnp.tile(aatype[:, None, :], [1, num_res, 1])) n, ca, c = [residue_constants.atom_order[a] for a in ('N', 'CA', 'C')] rot, trans = quat_affine.make_transform_from_reference( n_xyz=batch['template_all_atom_positions'][:, n], ca_xyz=batch['template_all_atom_positions'][:, ca], c_xyz=batch['template_all_atom_positions'][:, c]) affines = quat_affine.QuatAffine( quaternion=quat_affine.rot_to_quat(rot, unstack_inputs=True), translation=trans, rotation=rot, unstack_inputs=True) points = [jnp.expand_dims(x, axis=-2) for x in affines.translation] affine_vec = affines.invert_point(points, extra_dims=1) inv_distance_scalar = jax.lax.rsqrt( 1e-6 + sum([jnp.square(x) for x in affine_vec])) # Backbone affine mask: whether the residue has C, CA, N # (the template mask defined above only considers pseudo CB). template_mask = ( batch['template_all_atom_masks'][..., n] * batch['template_all_atom_masks'][..., ca] * batch['template_all_atom_masks'][..., c]) template_mask_2d = template_mask[:, None] * template_mask[None, :] inv_distance_scalar *= template_mask_2d.astype(inv_distance_scalar.dtype) unit_vector = [(x * inv_distance_scalar)[..., None] for x in affine_vec] unit_vector = [x.astype(dtype) for x in unit_vector] template_mask_2d = template_mask_2d.astype(dtype) if not self.config.use_template_unit_vector: unit_vector = [jnp.zeros_like(x) for x in unit_vector] to_concat.extend(unit_vector) to_concat.append(template_mask_2d[..., None]) act = jnp.concatenate(to_concat, axis=-1) # Mask out non-template regions so we don't get arbitrary values in the # distogram for these regions. act *= template_mask_2d[..., None] # Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 9 act = common_modules.Linear( num_channels, initializer='relu', name='embedding2d')( act) # Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 11 act = TemplatePairStack( self.config.template_pair_stack, self.global_config)( act, mask_2d, is_training) act = common_modules.LayerNorm([-1], True, True, name='output_layer_norm')(act) return act class TemplateEmbedding(hk.Module): """Embeds a set of templates. Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12 Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention" """ def __init__(self, config, global_config, name='template_embedding'): super().__init__(name=name) self.config = config self.global_config = global_config def __call__(self, query_embedding, template_batch, mask_2d, is_training): """Build TemplateEmbedding module. Arguments: query_embedding: Query pair representation, shape [N_res, N_res, c_z]. template_batch: A batch of template features. mask_2d: Padding mask (Note: this doesn't care if a template exists, unlike the template_pseudo_beta_mask). is_training: Whether the module is in training mode. Returns: A template embedding [N_res, N_res, c_z]. """ num_templates = template_batch['template_mask'].shape[0] num_channels = (self.config.template_pair_stack .triangle_attention_ending_node.value_dim) num_res = query_embedding.shape[0] dtype = query_embedding.dtype template_mask = template_batch['template_mask'] template_mask = template_mask.astype(dtype) query_num_channels = query_embedding.shape[-1] # Make sure the weights are shared across templates by constructing the # embedder here. # Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12 template_embedder = SingleTemplateEmbedding(self.config, self.global_config) def map_fn(batch): return template_embedder(query_embedding, batch, mask_2d, is_training) template_pair_representation = mapping.sharded_map(map_fn, in_axes=0)( template_batch) # Cross attend from the query to the templates along the residue # dimension by flattening everything else into the batch dimension. # Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention" flat_query = jnp.reshape(query_embedding, [num_res * num_res, 1, query_num_channels]) flat_templates = jnp.reshape( jnp.transpose(template_pair_representation, [1, 2, 0, 3]), [num_res * num_res, num_templates, num_channels]) mask = template_mask[None, None, None, :] template_pointwise_attention_module = Attention( self.config.attention, self.global_config, query_num_channels) nonbatched_args = [mask] batched_args = [flat_query, flat_templates] embedding = mapping.inference_subbatch( template_pointwise_attention_module, self.config.subbatch_size, batched_args=batched_args, nonbatched_args=nonbatched_args, low_memory=not is_training) embedding = jnp.reshape(embedding, [num_res, num_res, query_num_channels]) # No gradients if no templates. embedding *= (jnp.sum(template_mask) > 0.).astype(embedding.dtype) return embedding
alphafold-main
alphafold/model/modules.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformations for 3D coordinates. This Module contains objects for representing Vectors (Vecs), Rotation Matrices (Rots) and proper Rigid transformation (Rigids). These are represented as named tuples with arrays for each entry, for example a set of [N, M] points would be represented as a Vecs object with arrays of shape [N, M] for x, y and z. This is being done to improve readability by making it very clear what objects are geometric objects rather than relying on comments and array shapes. Another reason for this is to avoid using matrix multiplication primitives like matmul or einsum, on modern accelerator hardware these can end up on specialized cores such as tensor cores on GPU or the MXU on cloud TPUs, this often involves lower computational precision which can be problematic for coordinate geometry. Also these cores are typically optimized for larger matrices than 3 dimensional, this code is written to avoid any unintended use of these cores on both GPUs and TPUs. """ import collections from typing import List from alphafold.model import quat_affine import jax.numpy as jnp import tree # Array of 3-component vectors, stored as individual array for # each component. Vecs = collections.namedtuple('Vecs', ['x', 'y', 'z']) # Array of 3x3 rotation matrices, stored as individual array for # each component. Rots = collections.namedtuple('Rots', ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']) # Array of rigid 3D transformations, stored as array of rotations and # array of translations. Rigids = collections.namedtuple('Rigids', ['rot', 'trans']) def squared_difference(x, y): return jnp.square(x - y) def invert_rigids(r: Rigids) -> Rigids: """Computes group inverse of rigid transformations 'r'.""" inv_rots = invert_rots(r.rot) t = rots_mul_vecs(inv_rots, r.trans) inv_trans = Vecs(-t.x, -t.y, -t.z) return Rigids(inv_rots, inv_trans) def invert_rots(m: Rots) -> Rots: """Computes inverse of rotations 'm'.""" return Rots(m.xx, m.yx, m.zx, m.xy, m.yy, m.zy, m.xz, m.yz, m.zz) def rigids_from_3_points( point_on_neg_x_axis: Vecs, # shape (...) origin: Vecs, # shape (...) point_on_xy_plane: Vecs, # shape (...) ) -> Rigids: # shape (...) """Create Rigids from 3 points. Jumper et al. (2021) Suppl. Alg. 21 "rigidFrom3Points" This creates a set of rigid transformations from 3 points by Gram Schmidt orthogonalization. Args: point_on_neg_x_axis: Vecs corresponding to points on the negative x axis origin: Origin of resulting rigid transformations point_on_xy_plane: Vecs corresponding to points in the xy plane Returns: Rigid transformations from global frame to local frames derived from the input points. """ m = rots_from_two_vecs( e0_unnormalized=vecs_sub(origin, point_on_neg_x_axis), e1_unnormalized=vecs_sub(point_on_xy_plane, origin)) return Rigids(rot=m, trans=origin) def rigids_from_list(l: List[jnp.ndarray]) -> Rigids: """Converts flat list of arrays to rigid transformations.""" assert len(l) == 12 return Rigids(Rots(*(l[:9])), Vecs(*(l[9:]))) def rigids_from_quataffine(a: quat_affine.QuatAffine) -> Rigids: """Converts QuatAffine object to the corresponding Rigids object.""" return Rigids(Rots(*tree.flatten(a.rotation)), Vecs(*a.translation)) def rigids_from_tensor4x4( m: jnp.ndarray # shape (..., 4, 4) ) -> Rigids: # shape (...) """Construct Rigids object from an 4x4 array. Here the 4x4 is representing the transformation in homogeneous coordinates. Args: m: Array representing transformations in homogeneous coordinates. Returns: Rigids object corresponding to transformations m """ assert m.shape[-1] == 4 assert m.shape[-2] == 4 return Rigids( Rots(m[..., 0, 0], m[..., 0, 1], m[..., 0, 2], m[..., 1, 0], m[..., 1, 1], m[..., 1, 2], m[..., 2, 0], m[..., 2, 1], m[..., 2, 2]), Vecs(m[..., 0, 3], m[..., 1, 3], m[..., 2, 3])) def rigids_from_tensor_flat9( m: jnp.ndarray # shape (..., 9) ) -> Rigids: # shape (...) """Flat9 encoding: first two columns of rotation matrix + translation.""" assert m.shape[-1] == 9 e0 = Vecs(m[..., 0], m[..., 1], m[..., 2]) e1 = Vecs(m[..., 3], m[..., 4], m[..., 5]) trans = Vecs(m[..., 6], m[..., 7], m[..., 8]) return Rigids(rot=rots_from_two_vecs(e0, e1), trans=trans) def rigids_from_tensor_flat12( m: jnp.ndarray # shape (..., 12) ) -> Rigids: # shape (...) """Flat12 encoding: rotation matrix (9 floats) + translation (3 floats).""" assert m.shape[-1] == 12 x = jnp.moveaxis(m, -1, 0) # Unstack return Rigids(Rots(*x[:9]), Vecs(*x[9:])) def rigids_mul_rigids(a: Rigids, b: Rigids) -> Rigids: """Group composition of Rigids 'a' and 'b'.""" return Rigids( rots_mul_rots(a.rot, b.rot), vecs_add(a.trans, rots_mul_vecs(a.rot, b.trans))) def rigids_mul_rots(r: Rigids, m: Rots) -> Rigids: """Compose rigid transformations 'r' with rotations 'm'.""" return Rigids(rots_mul_rots(r.rot, m), r.trans) def rigids_mul_vecs(r: Rigids, v: Vecs) -> Vecs: """Apply rigid transforms 'r' to points 'v'.""" return vecs_add(rots_mul_vecs(r.rot, v), r.trans) def rigids_to_list(r: Rigids) -> List[jnp.ndarray]: """Turn Rigids into flat list, inverse of 'rigids_from_list'.""" return list(r.rot) + list(r.trans) def rigids_to_quataffine(r: Rigids) -> quat_affine.QuatAffine: """Convert Rigids r into QuatAffine, inverse of 'rigids_from_quataffine'.""" return quat_affine.QuatAffine( quaternion=None, rotation=[[r.rot.xx, r.rot.xy, r.rot.xz], [r.rot.yx, r.rot.yy, r.rot.yz], [r.rot.zx, r.rot.zy, r.rot.zz]], translation=[r.trans.x, r.trans.y, r.trans.z]) def rigids_to_tensor_flat9( r: Rigids # shape (...) ) -> jnp.ndarray: # shape (..., 9) """Flat9 encoding: first two columns of rotation matrix + translation.""" return jnp.stack( [r.rot.xx, r.rot.yx, r.rot.zx, r.rot.xy, r.rot.yy, r.rot.zy] + list(r.trans), axis=-1) def rigids_to_tensor_flat12( r: Rigids # shape (...) ) -> jnp.ndarray: # shape (..., 12) """Flat12 encoding: rotation matrix (9 floats) + translation (3 floats).""" return jnp.stack(list(r.rot) + list(r.trans), axis=-1) def rots_from_tensor3x3( m: jnp.ndarray, # shape (..., 3, 3) ) -> Rots: # shape (...) """Convert rotations represented as (3, 3) array to Rots.""" assert m.shape[-1] == 3 assert m.shape[-2] == 3 return Rots(m[..., 0, 0], m[..., 0, 1], m[..., 0, 2], m[..., 1, 0], m[..., 1, 1], m[..., 1, 2], m[..., 2, 0], m[..., 2, 1], m[..., 2, 2]) def rots_from_two_vecs(e0_unnormalized: Vecs, e1_unnormalized: Vecs) -> Rots: """Create rotation matrices from unnormalized vectors for the x and y-axes. This creates a rotation matrix from two vectors using Gram-Schmidt orthogonalization. Args: e0_unnormalized: vectors lying along x-axis of resulting rotation e1_unnormalized: vectors lying in xy-plane of resulting rotation Returns: Rotations resulting from Gram-Schmidt procedure. """ # Normalize the unit vector for the x-axis, e0. e0 = vecs_robust_normalize(e0_unnormalized) # make e1 perpendicular to e0. c = vecs_dot_vecs(e1_unnormalized, e0) e1 = Vecs(e1_unnormalized.x - c * e0.x, e1_unnormalized.y - c * e0.y, e1_unnormalized.z - c * e0.z) e1 = vecs_robust_normalize(e1) # Compute e2 as cross product of e0 and e1. e2 = vecs_cross_vecs(e0, e1) return Rots(e0.x, e1.x, e2.x, e0.y, e1.y, e2.y, e0.z, e1.z, e2.z) def rots_mul_rots(a: Rots, b: Rots) -> Rots: """Composition of rotations 'a' and 'b'.""" c0 = rots_mul_vecs(a, Vecs(b.xx, b.yx, b.zx)) c1 = rots_mul_vecs(a, Vecs(b.xy, b.yy, b.zy)) c2 = rots_mul_vecs(a, Vecs(b.xz, b.yz, b.zz)) return Rots(c0.x, c1.x, c2.x, c0.y, c1.y, c2.y, c0.z, c1.z, c2.z) def rots_mul_vecs(m: Rots, v: Vecs) -> Vecs: """Apply rotations 'm' to vectors 'v'.""" return Vecs(m.xx * v.x + m.xy * v.y + m.xz * v.z, m.yx * v.x + m.yy * v.y + m.yz * v.z, m.zx * v.x + m.zy * v.y + m.zz * v.z) def vecs_add(v1: Vecs, v2: Vecs) -> Vecs: """Add two vectors 'v1' and 'v2'.""" return Vecs(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z) def vecs_dot_vecs(v1: Vecs, v2: Vecs) -> jnp.ndarray: """Dot product of vectors 'v1' and 'v2'.""" return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z def vecs_cross_vecs(v1: Vecs, v2: Vecs) -> Vecs: """Cross product of vectors 'v1' and 'v2'.""" return Vecs(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x) def vecs_from_tensor(x: jnp.ndarray # shape (..., 3) ) -> Vecs: # shape (...) """Converts from tensor of shape (3,) to Vecs.""" num_components = x.shape[-1] assert num_components == 3 return Vecs(x[..., 0], x[..., 1], x[..., 2]) def vecs_robust_normalize(v: Vecs, epsilon: float = 1e-8) -> Vecs: """Normalizes vectors 'v'. Args: v: vectors to be normalized. epsilon: small regularizer added to squared norm before taking square root. Returns: normalized vectors """ norms = vecs_robust_norm(v, epsilon) return Vecs(v.x / norms, v.y / norms, v.z / norms) def vecs_robust_norm(v: Vecs, epsilon: float = 1e-8) -> jnp.ndarray: """Computes norm of vectors 'v'. Args: v: vectors to be normalized. epsilon: small regularizer added to squared norm before taking square root. Returns: norm of 'v' """ return jnp.sqrt(jnp.square(v.x) + jnp.square(v.y) + jnp.square(v.z) + epsilon) def vecs_sub(v1: Vecs, v2: Vecs) -> Vecs: """Computes v1 - v2.""" return Vecs(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z) def vecs_squared_distance(v1: Vecs, v2: Vecs) -> jnp.ndarray: """Computes squared euclidean difference between 'v1' and 'v2'.""" return (squared_difference(v1.x, v2.x) + squared_difference(v1.y, v2.y) + squared_difference(v1.z, v2.z)) def vecs_to_tensor(v: Vecs # shape (...) ) -> jnp.ndarray: # shape(..., 3) """Converts 'v' to tensor with shape 3, inverse of 'vecs_from_tensor'.""" return jnp.stack([v.x, v.y, v.z], axis=-1)
alphafold-main
alphafold/model/r3.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for all_atom.""" from absl.testing import absltest from absl.testing import parameterized from alphafold.model import all_atom from alphafold.model import r3 import numpy as np L1_CLAMP_DISTANCE = 10 def get_identity_rigid(shape): """Returns identity rigid transform.""" ones = np.ones(shape) zeros = np.zeros(shape) rot = r3.Rots(ones, zeros, zeros, zeros, ones, zeros, zeros, zeros, ones) trans = r3.Vecs(zeros, zeros, zeros) return r3.Rigids(rot, trans) def get_global_rigid_transform(rot_angle, translation, bcast_dims): """Returns rigid transform that globally rotates/translates by same amount.""" rot_angle = np.asarray(rot_angle) translation = np.asarray(translation) if bcast_dims: for _ in range(bcast_dims): rot_angle = np.expand_dims(rot_angle, 0) translation = np.expand_dims(translation, 0) sin_angle = np.sin(np.deg2rad(rot_angle)) cos_angle = np.cos(np.deg2rad(rot_angle)) ones = np.ones_like(sin_angle) zeros = np.zeros_like(sin_angle) rot = r3.Rots(ones, zeros, zeros, zeros, cos_angle, -sin_angle, zeros, sin_angle, cos_angle) trans = r3.Vecs(translation[..., 0], translation[..., 1], translation[..., 2]) return r3.Rigids(rot, trans) class AllAtomTest(parameterized.TestCase, absltest.TestCase): @parameterized.named_parameters( ('identity', 0, [0, 0, 0]), ('rot_90', 90, [0, 0, 0]), ('trans_10', 0, [0, 0, 10]), ('rot_174_trans_1', 174, [1, 1, 1])) def test_frame_aligned_point_error_perfect_on_global_transform( self, rot_angle, translation): """Tests global transform between target and preds gives perfect score.""" # pylint: disable=bad-whitespace target_positions = np.array( [[ 21.182, 23.095, 19.731], [ 22.055, 20.919, 17.294], [ 24.599, 20.005, 15.041], [ 25.567, 18.214, 12.166], [ 28.063, 17.082, 10.043], [ 28.779, 15.569, 6.985], [ 30.581, 13.815, 4.612], [ 29.258, 12.193, 2.296]]) # pylint: enable=bad-whitespace global_rigid_transform = get_global_rigid_transform( rot_angle, translation, 1) target_positions = r3.vecs_from_tensor(target_positions) pred_positions = r3.rigids_mul_vecs( global_rigid_transform, target_positions) positions_mask = np.ones(target_positions.x.shape[0]) target_frames = get_identity_rigid(10) pred_frames = r3.rigids_mul_rigids(global_rigid_transform, target_frames) frames_mask = np.ones(10) fape = all_atom.frame_aligned_point_error( pred_frames, target_frames, frames_mask, pred_positions, target_positions, positions_mask, L1_CLAMP_DISTANCE, L1_CLAMP_DISTANCE, epsilon=0) self.assertAlmostEqual(fape, 0.) @parameterized.named_parameters( ('identity', [[0, 0, 0], [5, 0, 0], [10, 0, 0]], [[0, 0, 0], [5, 0, 0], [10, 0, 0]], 0.), ('shift_2.5', [[0, 0, 0], [5, 0, 0], [10, 0, 0]], [[2.5, 0, 0], [7.5, 0, 0], [7.5, 0, 0]], 0.25), ('shift_5', [[0, 0, 0], [5, 0, 0], [10, 0, 0]], [[5, 0, 0], [10, 0, 0], [15, 0, 0]], 0.5), ('shift_10', [[0, 0, 0], [5, 0, 0], [10, 0, 0]], [[10, 0, 0], [15, 0, 0], [0, 0, 0]], 1.)) def test_frame_aligned_point_error_matches_expected( self, target_positions, pred_positions, expected_alddt): """Tests score matches expected.""" target_frames = get_identity_rigid(2) pred_frames = target_frames frames_mask = np.ones(2) target_positions = r3.vecs_from_tensor(np.array(target_positions)) pred_positions = r3.vecs_from_tensor(np.array(pred_positions)) positions_mask = np.ones(target_positions.x.shape[0]) alddt = all_atom.frame_aligned_point_error( pred_frames, target_frames, frames_mask, pred_positions, target_positions, positions_mask, L1_CLAMP_DISTANCE, L1_CLAMP_DISTANCE, epsilon=0) self.assertAlmostEqual(alddt, expected_alddt) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/model/all_atom_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Specialized mapping functions.""" import functools import inspect from typing import Any, Callable, Optional, Sequence, Union import haiku as hk import jax import jax.numpy as jnp PYTREE = Any PYTREE_JAX_ARRAY = Any partial = functools.partial PROXY = object() def _maybe_slice(array, i, slice_size, axis): if axis is PROXY: return array else: return jax.lax.dynamic_slice_in_dim( array, i, slice_size=slice_size, axis=axis) def _maybe_get_size(array, axis): if axis == PROXY: return -1 else: return array.shape[axis] def _expand_axes(axes, values, name='sharded_apply'): values_tree_def = jax.tree_util.tree_flatten(values)[1] flat_axes = jax.api_util.flatten_axes(name, values_tree_def, axes) # Replace None's with PROXY flat_axes = [PROXY if x is None else x for x in flat_axes] return jax.tree_util.tree_unflatten(values_tree_def, flat_axes) def sharded_map( fun: Callable[..., PYTREE_JAX_ARRAY], shard_size: Union[int, None] = 1, in_axes: Union[int, PYTREE] = 0, out_axes: Union[int, PYTREE] = 0) -> Callable[..., PYTREE_JAX_ARRAY]: """Sharded vmap. Maps `fun` over axes, in a way similar to vmap, but does so in shards of `shard_size`. This allows a smooth trade-off between memory usage (as in a plain map) vs higher throughput (as in a vmap). Args: fun: Function to apply smap transform to. shard_size: Integer denoting shard size. in_axes: Either integer or pytree describing which axis to map over for each input to `fun`, None denotes broadcasting. out_axes: integer or pytree denoting to what axis in the output the mapped over axis maps. Returns: function with smap applied. """ if 'split_rng' in inspect.signature(hk.vmap).parameters: vmapped_fun = hk.vmap(fun, in_axes, out_axes, split_rng=False) else: # TODO(tomhennigan): Remove this when older versions of Haiku aren't used. vmapped_fun = hk.vmap(fun, in_axes, out_axes) return sharded_apply(vmapped_fun, shard_size, in_axes, out_axes) def sharded_apply( fun: Callable[..., PYTREE_JAX_ARRAY], # pylint: disable=g-bare-generic shard_size: Union[int, None] = 1, in_axes: Union[int, PYTREE] = 0, out_axes: Union[int, PYTREE] = 0, new_out_axes: bool = False) -> Callable[..., PYTREE_JAX_ARRAY]: """Sharded apply. Applies `fun` over shards to axes, in a way similar to vmap, but does so in shards of `shard_size`. Shards are stacked after. This allows a smooth trade-off between memory usage (as in a plain map) vs higher throughput (as in a vmap). Args: fun: Function to apply smap transform to. shard_size: Integer denoting shard size. in_axes: Either integer or pytree describing which axis to map over for each input to `fun`, None denotes broadcasting. out_axes: integer or pytree denoting to what axis in the output the mapped over axis maps. new_out_axes: whether to stack outputs on new axes. This assumes that the output sizes for each shard (including the possible remainder shard) are the same. Returns: function with smap applied. """ docstr = ('Mapped version of {fun}. Takes similar arguments to {fun} ' 'but with additional array axes over which {fun} is mapped.') if new_out_axes: raise NotImplementedError('New output axes not yet implemented.') # shard size None denotes no sharding if shard_size is None: return fun @jax.util.wraps(fun, docstr=docstr) def mapped_fn(*args): # Expand in axes and Determine Loop range in_axes_ = _expand_axes(in_axes, args) in_sizes = jax.tree_map(_maybe_get_size, args, in_axes_) flat_sizes = jax.tree_util.tree_flatten(in_sizes)[0] in_size = max(flat_sizes) assert all(i in {in_size, -1} for i in flat_sizes) num_extra_shards = (in_size - 1) // shard_size # Fix Up if necessary last_shard_size = in_size % shard_size last_shard_size = shard_size if last_shard_size == 0 else last_shard_size def apply_fun_to_slice(slice_start, slice_size): input_slice = jax.tree_map( lambda array, axis: _maybe_slice(array, slice_start, slice_size, axis ), args, in_axes_) return fun(*input_slice) remainder_shape_dtype = hk.eval_shape( partial(apply_fun_to_slice, 0, last_shard_size)) out_dtypes = jax.tree_map(lambda x: x.dtype, remainder_shape_dtype) out_shapes = jax.tree_map(lambda x: x.shape, remainder_shape_dtype) out_axes_ = _expand_axes(out_axes, remainder_shape_dtype) if num_extra_shards > 0: regular_shard_shape_dtype = hk.eval_shape( partial(apply_fun_to_slice, 0, shard_size)) shard_shapes = jax.tree_map(lambda x: x.shape, regular_shard_shape_dtype) def make_output_shape(axis, shard_shape, remainder_shape): return shard_shape[:axis] + ( shard_shape[axis] * num_extra_shards + remainder_shape[axis],) + shard_shape[axis + 1:] out_shapes = jax.tree_map(make_output_shape, out_axes_, shard_shapes, out_shapes) # Calls dynamic Update slice with different argument order # This is here since tree_map only works with positional arguments def dynamic_update_slice_in_dim(full_array, update, axis, i): return jax.lax.dynamic_update_slice_in_dim(full_array, update, i, axis) def compute_shard(outputs, slice_start, slice_size): slice_out = apply_fun_to_slice(slice_start, slice_size) update_slice = partial( dynamic_update_slice_in_dim, i=slice_start) return jax.tree_map(update_slice, outputs, slice_out, out_axes_) def scan_iteration(outputs, i): new_outputs = compute_shard(outputs, i, shard_size) return new_outputs, () slice_starts = jnp.arange(0, in_size - shard_size + 1, shard_size) def allocate_buffer(dtype, shape): return jnp.zeros(shape, dtype=dtype) outputs = jax.tree_map(allocate_buffer, out_dtypes, out_shapes) if slice_starts.shape[0] > 0: outputs, _ = hk.scan(scan_iteration, outputs, slice_starts) if last_shard_size != shard_size: remainder_start = in_size - last_shard_size outputs = compute_shard(outputs, remainder_start, last_shard_size) return outputs return mapped_fn def inference_subbatch( module: Callable[..., PYTREE_JAX_ARRAY], subbatch_size: int, batched_args: Sequence[PYTREE_JAX_ARRAY], nonbatched_args: Sequence[PYTREE_JAX_ARRAY], low_memory: bool = True, input_subbatch_dim: int = 0, output_subbatch_dim: Optional[int] = None) -> PYTREE_JAX_ARRAY: """Run through subbatches (like batch apply but with split and concat).""" assert len(batched_args) > 0 # pylint: disable=g-explicit-length-test if not low_memory: args = list(batched_args) + list(nonbatched_args) return module(*args) if output_subbatch_dim is None: output_subbatch_dim = input_subbatch_dim def run_module(*batched_args): args = list(batched_args) + list(nonbatched_args) return module(*args) sharded_module = sharded_apply(run_module, shard_size=subbatch_size, in_axes=input_subbatch_dim, out_axes=output_subbatch_dim) return sharded_module(*batched_args)
alphafold-main
alphafold/model/mapping.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Ops for all atom representations.""" from typing import Dict, Optional from alphafold.common import residue_constants from alphafold.model import geometry from alphafold.model import utils import jax import jax.numpy as jnp import numpy as np def squared_difference(x, y): return jnp.square(x - y) def _make_chi_atom_indices(): """Returns atom indices needed to compute chi angles for all residue types. Returns: A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are in the order specified in residue_constants.restypes + unknown residue type at the end. For chi angles which are not defined on the residue, the positions indices are by default set to 0. """ chi_atom_indices = [] for residue_name in residue_constants.restypes: residue_name = residue_constants.restype_1to3[residue_name] residue_chi_angles = residue_constants.chi_angles_atoms[residue_name] atom_indices = [] for chi_angle in residue_chi_angles: atom_indices.append( [residue_constants.atom_order[atom] for atom in chi_angle]) for _ in range(4 - len(atom_indices)): atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA. chi_atom_indices.append(atom_indices) chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue. return np.array(chi_atom_indices) def _make_renaming_matrices(): """Matrices to map atoms to symmetry partners in ambiguous case.""" # As the atom naming is ambiguous for 7 of the 20 amino acids, provide # alternative groundtruth coordinates where the naming is swapped restype_3 = [ residue_constants.restype_1to3[res] for res in residue_constants.restypes ] restype_3 += ['UNK'] # Matrices for renaming ambiguous atoms. all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3} for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): correspondences = np.arange(14) for source_atom_swap, target_atom_swap in swap.items(): source_index = residue_constants.restype_name_to_atom14_names[ resname].index(source_atom_swap) target_index = residue_constants.restype_name_to_atom14_names[ resname].index(target_atom_swap) correspondences[source_index] = target_index correspondences[target_index] = source_index renaming_matrix = np.zeros((14, 14), dtype=np.float32) for index, correspondence in enumerate(correspondences): renaming_matrix[index, correspondence] = 1. all_matrices[resname] = renaming_matrix.astype(np.float32) renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3]) return renaming_matrices def _make_restype_atom37_mask(): """Mask of which atoms are present for which residue type in atom37.""" # create the corresponding mask restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) for restype, restype_letter in enumerate(residue_constants.restypes): restype_name = residue_constants.restype_1to3[restype_letter] atom_names = residue_constants.residue_atoms[restype_name] for atom_name in atom_names: atom_type = residue_constants.atom_order[atom_name] restype_atom37_mask[restype, atom_type] = 1 return restype_atom37_mask def _make_restype_atom14_mask(): """Mask of which atoms are present for which residue type in atom14.""" restype_atom14_mask = [] for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] restype_atom14_mask.append([(1. if name else 0.) for name in atom_names]) restype_atom14_mask.append([0.] * 14) restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32) return restype_atom14_mask def _make_restype_atom37_to_atom14(): """Map from atom37 to atom14 per residue type.""" restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14 for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} restype_atom37_to_atom14.append([ (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in residue_constants.atom_types ]) restype_atom37_to_atom14.append([0] * 37) restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32) return restype_atom37_to_atom14 def _make_restype_atom14_to_atom37(): """Map from atom14 to atom37 per residue type.""" restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37 for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] restype_atom14_to_atom37.append([ (residue_constants.atom_order[name] if name else 0) for name in atom_names ]) # Add dummy mapping for restype 'UNK' restype_atom14_to_atom37.append([0] * 14) restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32) return restype_atom14_to_atom37 def _make_restype_atom14_is_ambiguous(): """Mask which atoms are ambiguous in atom14.""" # create an ambiguous atoms mask. shape: (21, 14) restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32) for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): for atom_name1, atom_name2 in swap.items(): restype = residue_constants.restype_order[ residue_constants.restype_3to1[resname]] atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index( atom_name1) atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index( atom_name2) restype_atom14_is_ambiguous[restype, atom_idx1] = 1 restype_atom14_is_ambiguous[restype, atom_idx2] = 1 return restype_atom14_is_ambiguous def _make_restype_rigidgroup_base_atom37_idx(): """Create Map from rigidgroups to atom37 indices.""" # Create an array with the atom names. # shape (num_restypes, num_rigidgroups, 3_atoms): (21, 8, 3) base_atom_names = np.full([21, 8, 3], '', dtype=object) # 0: backbone frame base_atom_names[:, 0, :] = ['C', 'CA', 'N'] # 3: 'psi-group' base_atom_names[:, 3, :] = ['CA', 'C', 'O'] # 4,5,6,7: 'chi1,2,3,4-group' for restype, restype_letter in enumerate(residue_constants.restypes): resname = residue_constants.restype_1to3[restype_letter] for chi_idx in range(4): if residue_constants.chi_angles_mask[restype][chi_idx]: atom_names = residue_constants.chi_angles_atoms[resname][chi_idx] base_atom_names[restype, chi_idx + 4, :] = atom_names[1:] # Translate atom names into atom37 indices. lookuptable = residue_constants.atom_order.copy() lookuptable[''] = 0 restype_rigidgroup_base_atom37_idx = np.vectorize(lambda x: lookuptable[x])( base_atom_names) return restype_rigidgroup_base_atom37_idx CHI_ATOM_INDICES = _make_chi_atom_indices() RENAMING_MATRICES = _make_renaming_matrices() RESTYPE_ATOM14_TO_ATOM37 = _make_restype_atom14_to_atom37() RESTYPE_ATOM37_TO_ATOM14 = _make_restype_atom37_to_atom14() RESTYPE_ATOM37_MASK = _make_restype_atom37_mask() RESTYPE_ATOM14_MASK = _make_restype_atom14_mask() RESTYPE_ATOM14_IS_AMBIGUOUS = _make_restype_atom14_is_ambiguous() RESTYPE_RIGIDGROUP_BASE_ATOM37_IDX = _make_restype_rigidgroup_base_atom37_idx() # Create mask for existing rigid groups. RESTYPE_RIGIDGROUP_MASK = np.zeros([21, 8], dtype=np.float32) RESTYPE_RIGIDGROUP_MASK[:, 0] = 1 RESTYPE_RIGIDGROUP_MASK[:, 3] = 1 RESTYPE_RIGIDGROUP_MASK[:20, 4:] = residue_constants.chi_angles_mask def get_atom37_mask(aatype): return utils.batched_gather(jnp.asarray(RESTYPE_ATOM37_MASK), aatype) def get_atom14_mask(aatype): return utils.batched_gather(jnp.asarray(RESTYPE_ATOM14_MASK), aatype) def get_atom14_is_ambiguous(aatype): return utils.batched_gather(jnp.asarray(RESTYPE_ATOM14_IS_AMBIGUOUS), aatype) def get_atom14_to_atom37_map(aatype): return utils.batched_gather(jnp.asarray(RESTYPE_ATOM14_TO_ATOM37), aatype) def get_atom37_to_atom14_map(aatype): return utils.batched_gather(jnp.asarray(RESTYPE_ATOM37_TO_ATOM14), aatype) def atom14_to_atom37(atom14_data: jnp.ndarray, # (N, 14, ...) aatype: jnp.ndarray ) -> jnp.ndarray: # (N, 37, ...) """Convert atom14 to atom37 representation.""" assert len(atom14_data.shape) in [2, 3] idx_atom37_to_atom14 = get_atom37_to_atom14_map(aatype) atom37_data = utils.batched_gather( atom14_data, idx_atom37_to_atom14, batch_dims=1) atom37_mask = get_atom37_mask(aatype) if len(atom14_data.shape) == 2: atom37_data *= atom37_mask elif len(atom14_data.shape) == 3: atom37_data *= atom37_mask[:, :, None].astype(atom37_data.dtype) return atom37_data def atom37_to_atom14(aatype, all_atom_pos, all_atom_mask): """Convert Atom37 positions to Atom14 positions.""" residx_atom14_to_atom37 = utils.batched_gather( jnp.asarray(RESTYPE_ATOM14_TO_ATOM37), aatype) atom14_mask = utils.batched_gather( all_atom_mask, residx_atom14_to_atom37, batch_dims=1).astype(jnp.float32) # create a mask for known groundtruth positions atom14_mask *= utils.batched_gather(jnp.asarray(RESTYPE_ATOM14_MASK), aatype) # gather the groundtruth positions atom14_positions = jax.tree_map( lambda x: utils.batched_gather(x, residx_atom14_to_atom37, batch_dims=1), all_atom_pos) atom14_positions = atom14_mask * atom14_positions return atom14_positions, atom14_mask def get_alt_atom14(aatype, positions: geometry.Vec3Array, mask): """Get alternative atom14 positions.""" # pick the transformation matrices for the given residue sequence # shape (num_res, 14, 14) renaming_transform = utils.batched_gather( jnp.asarray(RENAMING_MATRICES), aatype) alternative_positions = jax.tree_map( lambda x: jnp.sum(x, axis=1), positions[:, :, None] * renaming_transform) # Create the mask for the alternative ground truth (differs from the # ground truth mask, if only one of the atoms in an ambiguous pair has a # ground truth position) alternative_mask = jnp.sum(mask[..., None] * renaming_transform, axis=1) return alternative_positions, alternative_mask def atom37_to_frames( aatype: jnp.ndarray, # (...) all_atom_positions: geometry.Vec3Array, # (..., 37) all_atom_mask: jnp.ndarray, # (..., 37) ) -> Dict[str, jnp.ndarray]: """Computes the frames for the up to 8 rigid groups for each residue.""" # 0: 'backbone group', # 1: 'pre-omega-group', (empty) # 2: 'phi-group', (currently empty, because it defines only hydrogens) # 3: 'psi-group', # 4,5,6,7: 'chi1,2,3,4-group' aatype_in_shape = aatype.shape # If there is a batch axis, just flatten it away, and reshape everything # back at the end of the function. aatype = jnp.reshape(aatype, [-1]) all_atom_positions = jax.tree_map(lambda x: jnp.reshape(x, [-1, 37]), all_atom_positions) all_atom_mask = jnp.reshape(all_atom_mask, [-1, 37]) # Compute the gather indices for all residues in the chain. # shape (N, 8, 3) residx_rigidgroup_base_atom37_idx = utils.batched_gather( RESTYPE_RIGIDGROUP_BASE_ATOM37_IDX, aatype) # Gather the base atom positions for each rigid group. base_atom_pos = jax.tree_map( lambda x: utils.batched_gather( # pylint: disable=g-long-lambda x, residx_rigidgroup_base_atom37_idx, batch_dims=1), all_atom_positions) # Compute the Rigids. point_on_neg_x_axis = base_atom_pos[:, :, 0] origin = base_atom_pos[:, :, 1] point_on_xy_plane = base_atom_pos[:, :, 2] gt_rotation = geometry.Rot3Array.from_two_vectors( origin - point_on_neg_x_axis, point_on_xy_plane - origin) gt_frames = geometry.Rigid3Array(gt_rotation, origin) # Compute a mask whether the group exists. # (N, 8) group_exists = utils.batched_gather(RESTYPE_RIGIDGROUP_MASK, aatype) # Compute a mask whether ground truth exists for the group gt_atoms_exist = utils.batched_gather( # shape (N, 8, 3) all_atom_mask.astype(jnp.float32), residx_rigidgroup_base_atom37_idx, batch_dims=1) gt_exists = jnp.min(gt_atoms_exist, axis=-1) * group_exists # (N, 8) # Adapt backbone frame to old convention (mirror x-axis and z-axis). rots = np.tile(np.eye(3, dtype=np.float32), [8, 1, 1]) rots[0, 0, 0] = -1 rots[0, 2, 2] = -1 gt_frames = gt_frames.compose_rotation( geometry.Rot3Array.from_array(rots)) # The frames for ambiguous rigid groups are just rotated by 180 degree around # the x-axis. The ambiguous group is always the last chi-group. restype_rigidgroup_is_ambiguous = np.zeros([21, 8], dtype=np.float32) restype_rigidgroup_rots = np.tile(np.eye(3, dtype=np.float32), [21, 8, 1, 1]) for resname, _ in residue_constants.residue_atom_renaming_swaps.items(): restype = residue_constants.restype_order[ residue_constants.restype_3to1[resname]] chi_idx = int(sum(residue_constants.chi_angles_mask[restype]) - 1) restype_rigidgroup_is_ambiguous[restype, chi_idx + 4] = 1 restype_rigidgroup_rots[restype, chi_idx + 4, 1, 1] = -1 restype_rigidgroup_rots[restype, chi_idx + 4, 2, 2] = -1 # Gather the ambiguity information for each residue. residx_rigidgroup_is_ambiguous = utils.batched_gather( restype_rigidgroup_is_ambiguous, aatype) ambiguity_rot = utils.batched_gather(restype_rigidgroup_rots, aatype) ambiguity_rot = geometry.Rot3Array.from_array(ambiguity_rot) # Create the alternative ground truth frames. alt_gt_frames = gt_frames.compose_rotation(ambiguity_rot) fix_shape = lambda x: jnp.reshape(x, aatype_in_shape + (8,)) # reshape back to original residue layout gt_frames = jax.tree_map(fix_shape, gt_frames) gt_exists = fix_shape(gt_exists) group_exists = fix_shape(group_exists) residx_rigidgroup_is_ambiguous = fix_shape(residx_rigidgroup_is_ambiguous) alt_gt_frames = jax.tree_map(fix_shape, alt_gt_frames) return { 'rigidgroups_gt_frames': gt_frames, # Rigid (..., 8) 'rigidgroups_gt_exists': gt_exists, # (..., 8) 'rigidgroups_group_exists': group_exists, # (..., 8) 'rigidgroups_group_is_ambiguous': residx_rigidgroup_is_ambiguous, # (..., 8) 'rigidgroups_alt_gt_frames': alt_gt_frames, # Rigid (..., 8) } def torsion_angles_to_frames( aatype: jnp.ndarray, # (N) backb_to_global: geometry.Rigid3Array, # (N) torsion_angles_sin_cos: jnp.ndarray # (N, 7, 2) ) -> geometry.Rigid3Array: # (N, 8) """Compute rigid group frames from torsion angles.""" assert len(aatype.shape) == 1, ( f'Expected array of rank 1, got array with shape: {aatype.shape}.') assert len(backb_to_global.rotation.shape) == 1, ( f'Expected array of rank 1, got array with shape: ' f'{backb_to_global.rotation.shape}') assert len(torsion_angles_sin_cos.shape) == 3, ( f'Expected array of rank 3, got array with shape: ' f'{torsion_angles_sin_cos.shape}') assert torsion_angles_sin_cos.shape[1] == 7, ( f'wrong shape {torsion_angles_sin_cos.shape}') assert torsion_angles_sin_cos.shape[2] == 2, ( f'wrong shape {torsion_angles_sin_cos.shape}') # Gather the default frames for all rigid groups. # geometry.Rigid3Array with shape (N, 8) m = utils.batched_gather(residue_constants.restype_rigid_group_default_frame, aatype) default_frames = geometry.Rigid3Array.from_array4x4(m) # Create the rotation matrices according to the given angles (each frame is # defined such that its rotation is around the x-axis). sin_angles = torsion_angles_sin_cos[..., 0] cos_angles = torsion_angles_sin_cos[..., 1] # insert zero rotation for backbone group. num_residues, = aatype.shape sin_angles = jnp.concatenate([jnp.zeros([num_residues, 1]), sin_angles], axis=-1) cos_angles = jnp.concatenate([jnp.ones([num_residues, 1]), cos_angles], axis=-1) zeros = jnp.zeros_like(sin_angles) ones = jnp.ones_like(sin_angles) # all_rots are geometry.Rot3Array with shape (N, 8) all_rots = geometry.Rot3Array(ones, zeros, zeros, zeros, cos_angles, -sin_angles, zeros, sin_angles, cos_angles) # Apply rotations to the frames. all_frames = default_frames.compose_rotation(all_rots) # chi2, chi3, and chi4 frames do not transform to the backbone frame but to # the previous frame. So chain them up accordingly. chi1_frame_to_backb = all_frames[:, 4] chi2_frame_to_backb = chi1_frame_to_backb @ all_frames[:, 5] chi3_frame_to_backb = chi2_frame_to_backb @ all_frames[:, 6] chi4_frame_to_backb = chi3_frame_to_backb @ all_frames[:, 7] all_frames_to_backb = jax.tree_map( lambda *x: jnp.concatenate(x, axis=-1), all_frames[:, 0:5], chi2_frame_to_backb[:, None], chi3_frame_to_backb[:, None], chi4_frame_to_backb[:, None]) # Create the global frames. # shape (N, 8) all_frames_to_global = backb_to_global[:, None] @ all_frames_to_backb return all_frames_to_global def frames_and_literature_positions_to_atom14_pos( aatype: jnp.ndarray, # (N) all_frames_to_global: geometry.Rigid3Array # (N, 8) ) -> geometry.Vec3Array: # (N, 14) """Put atom literature positions (atom14 encoding) in each rigid group.""" # Pick the appropriate transform for every atom. residx_to_group_idx = utils.batched_gather( residue_constants.restype_atom14_to_rigid_group, aatype) group_mask = jax.nn.one_hot( residx_to_group_idx, num_classes=8) # shape (N, 14, 8) # geometry.Rigid3Array with shape (N, 14) map_atoms_to_global = jax.tree_map( lambda x: jnp.sum(x[:, None, :] * group_mask, axis=-1), all_frames_to_global) # Gather the literature atom positions for each residue. # geometry.Vec3Array with shape (N, 14) lit_positions = geometry.Vec3Array.from_array( utils.batched_gather( residue_constants.restype_atom14_rigid_group_positions, aatype)) # Transform each atom from its local frame to the global frame. # geometry.Vec3Array with shape (N, 14) pred_positions = map_atoms_to_global.apply_to_point(lit_positions) # Mask out non-existing atoms. mask = utils.batched_gather(residue_constants.restype_atom14_mask, aatype) pred_positions = pred_positions * mask return pred_positions def extreme_ca_ca_distance_violations( positions: geometry.Vec3Array, # (N, 37(14)) mask: jnp.ndarray, # (N, 37(14)) residue_index: jnp.ndarray, # (N) max_angstrom_tolerance=1.5 ) -> jnp.ndarray: """Counts residues whose Ca is a large distance from its neighbor.""" this_ca_pos = positions[:-1, 1] # (N - 1,) this_ca_mask = mask[:-1, 1] # (N - 1) next_ca_pos = positions[1:, 1] # (N - 1,) next_ca_mask = mask[1:, 1] # (N - 1) has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype( jnp.float32) ca_ca_distance = geometry.euclidean_distance(this_ca_pos, next_ca_pos, 1e-6) violations = (ca_ca_distance - residue_constants.ca_ca) > max_angstrom_tolerance mask = this_ca_mask * next_ca_mask * has_no_gap_mask return utils.mask_mean(mask=mask, value=violations) def between_residue_bond_loss( pred_atom_positions: geometry.Vec3Array, # (N, 37(14)) pred_atom_mask: jnp.ndarray, # (N, 37(14)) residue_index: jnp.ndarray, # (N) aatype: jnp.ndarray, # (N) tolerance_factor_soft=12.0, tolerance_factor_hard=12.0) -> Dict[str, jnp.ndarray]: """Flat-bottom loss to penalize structural violations between residues.""" assert len(pred_atom_positions.shape) == 2 assert len(pred_atom_mask.shape) == 2 assert len(residue_index.shape) == 1 assert len(aatype.shape) == 1 # Get the positions of the relevant backbone atoms. this_ca_pos = pred_atom_positions[:-1, 1] # (N - 1) this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1) this_c_pos = pred_atom_positions[:-1, 2] # (N - 1) this_c_mask = pred_atom_mask[:-1, 2] # (N - 1) next_n_pos = pred_atom_positions[1:, 0] # (N - 1) next_n_mask = pred_atom_mask[1:, 0] # (N - 1) next_ca_pos = pred_atom_positions[1:, 1] # (N - 1) next_ca_mask = pred_atom_mask[1:, 1] # (N - 1) has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype( jnp.float32) # Compute loss for the C--N bond. c_n_bond_length = geometry.euclidean_distance(this_c_pos, next_n_pos, 1e-6) # The C-N bond to proline has slightly different length because of the ring. next_is_proline = ( aatype[1:] == residue_constants.restype_order['P']).astype(jnp.float32) gt_length = ( (1. - next_is_proline) * residue_constants.between_res_bond_length_c_n[0] + next_is_proline * residue_constants.between_res_bond_length_c_n[1]) gt_stddev = ( (1. - next_is_proline) * residue_constants.between_res_bond_length_stddev_c_n[0] + next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[1]) c_n_bond_length_error = jnp.sqrt(1e-6 + jnp.square(c_n_bond_length - gt_length)) c_n_loss_per_residue = jax.nn.relu( c_n_bond_length_error - tolerance_factor_soft * gt_stddev) mask = this_c_mask * next_n_mask * has_no_gap_mask c_n_loss = jnp.sum(mask * c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6) c_n_violation_mask = mask * ( c_n_bond_length_error > (tolerance_factor_hard * gt_stddev)) # Compute loss for the angles. c_ca_unit_vec = (this_ca_pos - this_c_pos).normalized(1e-6) c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length n_ca_unit_vec = (next_ca_pos - next_n_pos).normalized(1e-6) ca_c_n_cos_angle = c_ca_unit_vec.dot(c_n_unit_vec) gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0] gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0] ca_c_n_cos_angle_error = jnp.sqrt( 1e-6 + jnp.square(ca_c_n_cos_angle - gt_angle)) ca_c_n_loss_per_residue = jax.nn.relu( ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev) mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask ca_c_n_loss = jnp.sum(mask * ca_c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6) ca_c_n_violation_mask = mask * (ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev)) c_n_ca_cos_angle = (-c_n_unit_vec).dot(n_ca_unit_vec) gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0] gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1] c_n_ca_cos_angle_error = jnp.sqrt( 1e-6 + jnp.square(c_n_ca_cos_angle - gt_angle)) c_n_ca_loss_per_residue = jax.nn.relu( c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev) mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask c_n_ca_loss = jnp.sum(mask * c_n_ca_loss_per_residue) / (jnp.sum(mask) + 1e-6) c_n_ca_violation_mask = mask * ( c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev)) # Compute a per residue loss (equally distribute the loss to both # neighbouring residues). per_residue_loss_sum = (c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue) per_residue_loss_sum = 0.5 * (jnp.pad(per_residue_loss_sum, [[0, 1]]) + jnp.pad(per_residue_loss_sum, [[1, 0]])) # Compute hard violations. violation_mask = jnp.max( jnp.stack([c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask]), axis=0) violation_mask = jnp.maximum( jnp.pad(violation_mask, [[0, 1]]), jnp.pad(violation_mask, [[1, 0]])) return {'c_n_loss_mean': c_n_loss, # shape () 'ca_c_n_loss_mean': ca_c_n_loss, # shape () 'c_n_ca_loss_mean': c_n_ca_loss, # shape () 'per_residue_loss_sum': per_residue_loss_sum, # shape (N) 'per_residue_violation_mask': violation_mask # shape (N) } def between_residue_clash_loss( pred_positions: geometry.Vec3Array, # (N, 14) atom_exists: jnp.ndarray, # (N, 14) atom_radius: jnp.ndarray, # (N, 14) residue_index: jnp.ndarray, # (N) asym_id: jnp.ndarray, # (N) overlap_tolerance_soft=1.5, overlap_tolerance_hard=1.5) -> Dict[str, jnp.ndarray]: """Loss to penalize steric clashes between residues.""" assert len(pred_positions.shape) == 2 assert len(atom_exists.shape) == 2 assert len(atom_radius.shape) == 2 assert len(residue_index.shape) == 1 # Create the distance matrix. # (N, N, 14, 14) dists = geometry.euclidean_distance(pred_positions[:, None, :, None], pred_positions[None, :, None, :], 1e-10) # Create the mask for valid distances. # shape (N, N, 14, 14) dists_mask = (atom_exists[:, None, :, None] * atom_exists[None, :, None, :]) # Mask out all the duplicate entries in the lower triangular matrix. # Also mask out the diagonal (atom-pairs from the same residue) -- these atoms # are handled separately. dists_mask *= ( residue_index[:, None, None, None] < residue_index[None, :, None, None]) # Backbone C--N bond between subsequent residues is no clash. c_one_hot = jax.nn.one_hot(2, num_classes=14) n_one_hot = jax.nn.one_hot(0, num_classes=14) neighbour_mask = ((residue_index[:, None] + 1) == residue_index[None, :]) neighbour_mask &= (asym_id[:, None] == asym_id[None, :]) neighbour_mask = neighbour_mask[..., None, None] c_n_bonds = neighbour_mask * c_one_hot[None, None, :, None] * n_one_hot[None, None, None, :] dists_mask *= (1. - c_n_bonds) # Disulfide bridge between two cysteines is no clash. cys_sg_idx = residue_constants.restype_name_to_atom14_names['CYS'].index('SG') cys_sg_one_hot = jax.nn.one_hot(cys_sg_idx, num_classes=14) disulfide_bonds = (cys_sg_one_hot[None, None, :, None] * cys_sg_one_hot[None, None, None, :]) dists_mask *= (1. - disulfide_bonds) # Compute the lower bound for the allowed distances. # shape (N, N, 14, 14) dists_lower_bound = dists_mask * ( atom_radius[:, None, :, None] + atom_radius[None, :, None, :]) # Compute the error. # shape (N, N, 14, 14) dists_to_low_error = dists_mask * jax.nn.relu( dists_lower_bound - overlap_tolerance_soft - dists) # Compute the mean loss. # shape () mean_loss = (jnp.sum(dists_to_low_error) / (1e-6 + jnp.sum(dists_mask))) # Compute the per atom loss sum. # shape (N, 14) per_atom_loss_sum = (jnp.sum(dists_to_low_error, axis=[0, 2]) + jnp.sum(dists_to_low_error, axis=[1, 3])) # Compute the hard clash mask. # shape (N, N, 14, 14) clash_mask = dists_mask * ( dists < (dists_lower_bound - overlap_tolerance_hard)) # Compute the per atom clash. # shape (N, 14) per_atom_clash_mask = jnp.maximum( jnp.max(clash_mask, axis=[0, 2]), jnp.max(clash_mask, axis=[1, 3])) return {'mean_loss': mean_loss, # shape () 'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14) 'per_atom_clash_mask': per_atom_clash_mask # shape (N, 14) } def within_residue_violations( pred_positions: geometry.Vec3Array, # (N, 14) atom_exists: jnp.ndarray, # (N, 14) dists_lower_bound: jnp.ndarray, # (N, 14, 14) dists_upper_bound: jnp.ndarray, # (N, 14, 14) tighten_bounds_for_loss=0.0, ) -> Dict[str, jnp.ndarray]: """Find within-residue violations.""" assert len(pred_positions.shape) == 2 assert len(atom_exists.shape) == 2 assert len(dists_lower_bound.shape) == 3 assert len(dists_upper_bound.shape) == 3 # Compute the mask for each residue. # shape (N, 14, 14) dists_masks = (1. - jnp.eye(14, 14)[None]) dists_masks *= (atom_exists[:, :, None] * atom_exists[:, None, :]) # Distance matrix # shape (N, 14, 14) dists = geometry.euclidean_distance(pred_positions[:, :, None], pred_positions[:, None, :], 1e-10) # Compute the loss. # shape (N, 14, 14) dists_to_low_error = jax.nn.relu( dists_lower_bound + tighten_bounds_for_loss - dists) dists_to_high_error = jax.nn.relu( dists + tighten_bounds_for_loss - dists_upper_bound) loss = dists_masks * (dists_to_low_error + dists_to_high_error) # Compute the per atom loss sum. # shape (N, 14) per_atom_loss_sum = (jnp.sum(loss, axis=1) + jnp.sum(loss, axis=2)) # Compute the violations mask. # shape (N, 14, 14) violations = dists_masks * ((dists < dists_lower_bound) | (dists > dists_upper_bound)) # Compute the per atom violations. # shape (N, 14) per_atom_violations = jnp.maximum( jnp.max(violations, axis=1), jnp.max(violations, axis=2)) return {'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14) 'per_atom_violations': per_atom_violations # shape (N, 14) } def find_optimal_renaming( gt_positions: geometry.Vec3Array, # (N, 14) alt_gt_positions: geometry.Vec3Array, # (N, 14) atom_is_ambiguous: jnp.ndarray, # (N, 14) gt_exists: jnp.ndarray, # (N, 14) pred_positions: geometry.Vec3Array, # (N, 14) ) -> jnp.ndarray: # (N): """Find optimal renaming for ground truth that maximizes LDDT.""" assert len(gt_positions.shape) == 2 assert len(alt_gt_positions.shape) == 2 assert len(atom_is_ambiguous.shape) == 2 assert len(gt_exists.shape) == 2 assert len(pred_positions.shape) == 2 # Create the pred distance matrix. # shape (N, N, 14, 14) pred_dists = geometry.euclidean_distance(pred_positions[:, None, :, None], pred_positions[None, :, None, :], 1e-10) # Compute distances for ground truth with original and alternative names. # shape (N, N, 14, 14) gt_dists = geometry.euclidean_distance(gt_positions[:, None, :, None], gt_positions[None, :, None, :], 1e-10) alt_gt_dists = geometry.euclidean_distance(alt_gt_positions[:, None, :, None], alt_gt_positions[None, :, None, :], 1e-10) # Compute LDDT's. # shape (N, N, 14, 14) lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, gt_dists)) alt_lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, alt_gt_dists)) # Create a mask for ambiguous atoms in rows vs. non-ambiguous atoms # in cols. # shape (N ,N, 14, 14) mask = ( gt_exists[:, None, :, None] * # rows atom_is_ambiguous[:, None, :, None] * # rows gt_exists[None, :, None, :] * # cols (1. - atom_is_ambiguous[None, :, None, :])) # cols # Aggregate distances for each residue to the non-amibuguous atoms. # shape (N) per_res_lddt = jnp.sum(mask * lddt, axis=[1, 2, 3]) alt_per_res_lddt = jnp.sum(mask * alt_lddt, axis=[1, 2, 3]) # Decide for each residue, whether alternative naming is better. # shape (N) alt_naming_is_better = (alt_per_res_lddt < per_res_lddt).astype(jnp.float32) return alt_naming_is_better # shape (N) def frame_aligned_point_error( pred_frames: geometry.Rigid3Array, # shape (num_frames) target_frames: geometry.Rigid3Array, # shape (num_frames) frames_mask: jnp.ndarray, # shape (num_frames) pred_positions: geometry.Vec3Array, # shape (num_positions) target_positions: geometry.Vec3Array, # shape (num_positions) positions_mask: jnp.ndarray, # shape (num_positions) pair_mask: Optional[jnp.ndarray], # shape (num_frames, num_posiitons) l1_clamp_distance: float, length_scale=20., epsilon=1e-4) -> jnp.ndarray: # shape () """Measure point error under different alignements. Computes error between two structures with B points under A alignments derived form the given pairs of frames. Args: pred_frames: num_frames reference frames for 'pred_positions'. target_frames: num_frames reference frames for 'target_positions'. frames_mask: Mask for frame pairs to use. pred_positions: num_positions predicted positions of the structure. target_positions: num_positions target positions of the structure. positions_mask: Mask on which positions to score. pair_mask: A (num_frames, num_positions) mask to use in the loss, useful for separating intra from inter chain losses. l1_clamp_distance: Distance cutoff on error beyond which gradients will be zero. length_scale: length scale to divide loss by. epsilon: small value used to regularize denominator for masked average. Returns: Masked Frame aligned point error. """ # For now we do not allow any batch dimensions. assert len(pred_frames.rotation.shape) == 1 assert len(target_frames.rotation.shape) == 1 assert frames_mask.ndim == 1 assert pred_positions.x.ndim == 1 assert target_positions.x.ndim == 1 assert positions_mask.ndim == 1 # Compute array of predicted positions in the predicted frames. # geometry.Vec3Array (num_frames, num_positions) local_pred_pos = pred_frames[:, None].inverse().apply_to_point( pred_positions[None, :]) # Compute array of target positions in the target frames. # geometry.Vec3Array (num_frames, num_positions) local_target_pos = target_frames[:, None].inverse().apply_to_point( target_positions[None, :]) # Compute errors between the structures. # jnp.ndarray (num_frames, num_positions) error_dist = geometry.euclidean_distance(local_pred_pos, local_target_pos, epsilon) clipped_error_dist = jnp.clip(error_dist, 0, l1_clamp_distance) normed_error = clipped_error_dist / length_scale normed_error *= jnp.expand_dims(frames_mask, axis=-1) normed_error *= jnp.expand_dims(positions_mask, axis=-2) if pair_mask is not None: normed_error *= pair_mask mask = (jnp.expand_dims(frames_mask, axis=-1) * jnp.expand_dims(positions_mask, axis=-2)) if pair_mask is not None: mask *= pair_mask normalization_factor = jnp.sum(mask, axis=(-1, -2)) return (jnp.sum(normed_error, axis=(-2, -1)) / (epsilon + normalization_factor)) def get_chi_atom_indices(): """Returns atom indices needed to compute chi angles for all residue types. Returns: A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are in the order specified in residue_constants.restypes + unknown residue type at the end. For chi angles which are not defined on the residue, the positions indices are by default set to 0. """ chi_atom_indices = [] for residue_name in residue_constants.restypes: residue_name = residue_constants.restype_1to3[residue_name] residue_chi_angles = residue_constants.chi_angles_atoms[residue_name] atom_indices = [] for chi_angle in residue_chi_angles: atom_indices.append( [residue_constants.atom_order[atom] for atom in chi_angle]) for _ in range(4 - len(atom_indices)): atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA. chi_atom_indices.append(atom_indices) chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue. return jnp.asarray(chi_atom_indices) def compute_chi_angles(positions: geometry.Vec3Array, mask: geometry.Vec3Array, aatype: geometry.Vec3Array): """Computes the chi angles given all atom positions and the amino acid type. Args: positions: A Vec3Array of shape [num_res, residue_constants.atom_type_num], with positions of atoms needed to calculate chi angles. Supports up to 1 batch dimension. mask: An optional tensor of shape [num_res, residue_constants.atom_type_num] that masks which atom positions are set for each residue. If given, then the chi mask will be set to 1 for a chi angle only if the amino acid has that chi angle and all the chi atoms needed to calculate that chi angle are set. If not given (set to None), the chi mask will be set to 1 for a chi angle if the amino acid has that chi angle and whether the actual atoms needed to calculate it were set will be ignored. aatype: A tensor of shape [num_res] with amino acid type integer code (0 to 21). Supports up to 1 batch dimension. Returns: A tuple of tensors (chi_angles, mask), where both have shape [num_res, 4]. The mask masks out unused chi angles for amino acid types that have less than 4 chi angles. If atom_positions_mask is set, the chi mask will also mask out uncomputable chi angles. """ # Don't assert on the num_res and batch dimensions as they might be unknown. assert positions.shape[-1] == residue_constants.atom_type_num assert mask.shape[-1] == residue_constants.atom_type_num # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. chi_atom_indices = get_chi_atom_indices() # Select atoms to compute chis. Shape: [num_res, chis=4, atoms=4]. atom_indices = utils.batched_gather( params=chi_atom_indices, indices=aatype, axis=0) # Gather atom positions. Shape: [num_res, chis=4, atoms=4, xyz=3]. chi_angle_atoms = jax.tree_map( lambda x: utils.batched_gather( # pylint: disable=g-long-lambda params=x, indices=atom_indices, axis=-1, batch_dims=1), positions) a, b, c, d = [chi_angle_atoms[..., i] for i in range(4)] chi_angles = geometry.dihedral_angle(a, b, c, d) # Copy the chi angle mask, add the UNKNOWN residue. Shape: [restypes, 4]. chi_angles_mask = list(residue_constants.chi_angles_mask) chi_angles_mask.append([0.0, 0.0, 0.0, 0.0]) chi_angles_mask = jnp.asarray(chi_angles_mask) # Compute the chi angle mask. Shape [num_res, chis=4]. chi_mask = utils.batched_gather(params=chi_angles_mask, indices=aatype, axis=0) # The chi_mask is set to 1 only when all necessary chi angle atoms were set. # Gather the chi angle atoms mask. Shape: [num_res, chis=4, atoms=4]. chi_angle_atoms_mask = utils.batched_gather( params=mask, indices=atom_indices, axis=-1, batch_dims=1) # Check if all 4 chi angle atoms were set. Shape: [num_res, chis=4]. chi_angle_atoms_mask = jnp.prod(chi_angle_atoms_mask, axis=[-1]) chi_mask = chi_mask * chi_angle_atoms_mask.astype(jnp.float32) return chi_angles, chi_mask def make_transform_from_reference( a_xyz: geometry.Vec3Array, b_xyz: geometry.Vec3Array, c_xyz: geometry.Vec3Array) -> geometry.Rigid3Array: """Returns rotation and translation matrices to convert from reference. Note that this method does not take care of symmetries. If you provide the coordinates in the non-standard way, the A atom will end up in the negative y-axis rather than in the positive y-axis. You need to take care of such cases in your code. Args: a_xyz: A Vec3Array. b_xyz: A Vec3Array. c_xyz: A Vec3Array. Returns: A Rigid3Array which, when applied to coordinates in a canonicalized reference frame, will give coordinates approximately equal the original coordinates (in the global frame). """ rotation = geometry.Rot3Array.from_two_vectors(c_xyz - b_xyz, a_xyz - b_xyz) return geometry.Rigid3Array(rotation, b_xyz)
alphafold-main
alphafold/model/all_atom_multimer.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quaternion geometry modules. This introduces a representation of coordinate frames that is based around a ‘QuatAffine’ object. This object describes an array of coordinate frames. It consists of vectors corresponding to the origin of the frames as well as orientations which are stored in two ways, as unit quaternions as well as a rotation matrices. The rotation matrices are derived from the unit quaternions and the two are kept in sync. For an explanation of the relation between unit quaternions and rotations see https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation This representation is used in the model for the backbone frames. One important thing to note here, is that while we update both representations the jit compiler is going to ensure that only the parts that are actually used are executed. """ import functools from typing import Tuple import jax import jax.numpy as jnp import numpy as np # pylint: disable=bad-whitespace QUAT_TO_ROT = np.zeros((4, 4, 3, 3), dtype=np.float32) QUAT_TO_ROT[0, 0] = [[ 1, 0, 0], [ 0, 1, 0], [ 0, 0, 1]] # rr QUAT_TO_ROT[1, 1] = [[ 1, 0, 0], [ 0,-1, 0], [ 0, 0,-1]] # ii QUAT_TO_ROT[2, 2] = [[-1, 0, 0], [ 0, 1, 0], [ 0, 0,-1]] # jj QUAT_TO_ROT[3, 3] = [[-1, 0, 0], [ 0,-1, 0], [ 0, 0, 1]] # kk QUAT_TO_ROT[1, 2] = [[ 0, 2, 0], [ 2, 0, 0], [ 0, 0, 0]] # ij QUAT_TO_ROT[1, 3] = [[ 0, 0, 2], [ 0, 0, 0], [ 2, 0, 0]] # ik QUAT_TO_ROT[2, 3] = [[ 0, 0, 0], [ 0, 0, 2], [ 0, 2, 0]] # jk QUAT_TO_ROT[0, 1] = [[ 0, 0, 0], [ 0, 0,-2], [ 0, 2, 0]] # ir QUAT_TO_ROT[0, 2] = [[ 0, 0, 2], [ 0, 0, 0], [-2, 0, 0]] # jr QUAT_TO_ROT[0, 3] = [[ 0,-2, 0], [ 2, 0, 0], [ 0, 0, 0]] # kr QUAT_MULTIPLY = np.zeros((4, 4, 4), dtype=np.float32) QUAT_MULTIPLY[:, :, 0] = [[ 1, 0, 0, 0], [ 0,-1, 0, 0], [ 0, 0,-1, 0], [ 0, 0, 0,-1]] QUAT_MULTIPLY[:, :, 1] = [[ 0, 1, 0, 0], [ 1, 0, 0, 0], [ 0, 0, 0, 1], [ 0, 0,-1, 0]] QUAT_MULTIPLY[:, :, 2] = [[ 0, 0, 1, 0], [ 0, 0, 0,-1], [ 1, 0, 0, 0], [ 0, 1, 0, 0]] QUAT_MULTIPLY[:, :, 3] = [[ 0, 0, 0, 1], [ 0, 0, 1, 0], [ 0,-1, 0, 0], [ 1, 0, 0, 0]] QUAT_MULTIPLY_BY_VEC = QUAT_MULTIPLY[:, 1:, :] # pylint: enable=bad-whitespace def rot_to_quat(rot, unstack_inputs=False): """Convert rotation matrix to quaternion. Note that this function calls self_adjoint_eig which is extremely expensive on the GPU. If at all possible, this function should run on the CPU. Args: rot: rotation matrix (see below for format). unstack_inputs: If true, rotation matrix should be shape (..., 3, 3) otherwise the rotation matrix should be a list of lists of tensors. Returns: Quaternion as (..., 4) tensor. """ if unstack_inputs: rot = [jnp.moveaxis(x, -1, 0) for x in jnp.moveaxis(rot, -2, 0)] [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = rot # pylint: disable=bad-whitespace k = [[ xx + yy + zz, zy - yz, xz - zx, yx - xy,], [ zy - yz, xx - yy - zz, xy + yx, xz + zx,], [ xz - zx, xy + yx, yy - xx - zz, yz + zy,], [ yx - xy, xz + zx, yz + zy, zz - xx - yy,]] # pylint: enable=bad-whitespace k = (1./3.) * jnp.stack([jnp.stack(x, axis=-1) for x in k], axis=-2) # Get eigenvalues in non-decreasing order and associated. _, qs = jnp.linalg.eigh(k) return qs[..., -1] def rot_list_to_tensor(rot_list): """Convert list of lists to rotation tensor.""" return jnp.stack( [jnp.stack(rot_list[0], axis=-1), jnp.stack(rot_list[1], axis=-1), jnp.stack(rot_list[2], axis=-1)], axis=-2) def vec_list_to_tensor(vec_list): """Convert list to vector tensor.""" return jnp.stack(vec_list, axis=-1) def quat_to_rot(normalized_quat): """Convert a normalized quaternion to a rotation matrix.""" rot_tensor = jnp.sum( np.reshape(QUAT_TO_ROT, (4, 4, 9)) * normalized_quat[..., :, None, None] * normalized_quat[..., None, :, None], axis=(-3, -2)) rot = jnp.moveaxis(rot_tensor, -1, 0) # Unstack. return [[rot[0], rot[1], rot[2]], [rot[3], rot[4], rot[5]], [rot[6], rot[7], rot[8]]] def quat_multiply_by_vec(quat, vec): """Multiply a quaternion by a pure-vector quaternion.""" return jnp.sum( QUAT_MULTIPLY_BY_VEC * quat[..., :, None, None] * vec[..., None, :, None], axis=(-3, -2)) def quat_multiply(quat1, quat2): """Multiply a quaternion by another quaternion.""" return jnp.sum( QUAT_MULTIPLY * quat1[..., :, None, None] * quat2[..., None, :, None], axis=(-3, -2)) def apply_rot_to_vec(rot, vec, unstack=False): """Multiply rotation matrix by a vector.""" if unstack: x, y, z = [vec[:, i] for i in range(3)] else: x, y, z = vec return [rot[0][0] * x + rot[0][1] * y + rot[0][2] * z, rot[1][0] * x + rot[1][1] * y + rot[1][2] * z, rot[2][0] * x + rot[2][1] * y + rot[2][2] * z] def apply_inverse_rot_to_vec(rot, vec): """Multiply the inverse of a rotation matrix by a vector.""" # Inverse rotation is just transpose return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2], rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2], rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]] class QuatAffine(object): """Affine transformation represented by quaternion and vector.""" def __init__(self, quaternion, translation, rotation=None, normalize=True, unstack_inputs=False): """Initialize from quaternion and translation. Args: quaternion: Rotation represented by a quaternion, to be applied before translation. Must be a unit quaternion unless normalize==True. translation: Translation represented as a vector. rotation: Same rotation as the quaternion, represented as a (..., 3, 3) tensor. If None, rotation will be calculated from the quaternion. normalize: If True, l2 normalize the quaternion on input. unstack_inputs: If True, translation is a vector with last component 3 """ if quaternion is not None: assert quaternion.shape[-1] == 4 if unstack_inputs: if rotation is not None: rotation = [jnp.moveaxis(x, -1, 0) # Unstack. for x in jnp.moveaxis(rotation, -2, 0)] # Unstack. translation = jnp.moveaxis(translation, -1, 0) # Unstack. if normalize and quaternion is not None: quaternion = quaternion / jnp.linalg.norm(quaternion, axis=-1, keepdims=True) if rotation is None: rotation = quat_to_rot(quaternion) self.quaternion = quaternion self.rotation = [list(row) for row in rotation] self.translation = list(translation) assert all(len(row) == 3 for row in self.rotation) assert len(self.translation) == 3 def to_tensor(self): return jnp.concatenate( [self.quaternion] + [jnp.expand_dims(x, axis=-1) for x in self.translation], axis=-1) def apply_tensor_fn(self, tensor_fn): """Return a new QuatAffine with tensor_fn applied (e.g. stop_gradient).""" return QuatAffine( tensor_fn(self.quaternion), [tensor_fn(x) for x in self.translation], rotation=[[tensor_fn(x) for x in row] for row in self.rotation], normalize=False) def apply_rotation_tensor_fn(self, tensor_fn): """Return a new QuatAffine with tensor_fn applied to the rotation part.""" return QuatAffine( tensor_fn(self.quaternion), [x for x in self.translation], rotation=[[tensor_fn(x) for x in row] for row in self.rotation], normalize=False) def scale_translation(self, position_scale): """Return a new quat affine with a different scale for translation.""" return QuatAffine( self.quaternion, [x * position_scale for x in self.translation], rotation=[[x for x in row] for row in self.rotation], normalize=False) @classmethod def from_tensor(cls, tensor, normalize=False): quaternion, tx, ty, tz = jnp.split(tensor, [4, 5, 6], axis=-1) return cls(quaternion, [tx[..., 0], ty[..., 0], tz[..., 0]], normalize=normalize) def pre_compose(self, update): """Return a new QuatAffine which applies the transformation update first. Args: update: Length-6 vector. 3-vector of x, y, and z such that the quaternion update is (1, x, y, z) and zero for the 3-vector is the identity quaternion. 3-vector for translation concatenated. Returns: New QuatAffine object. """ vector_quaternion_update, x, y, z = jnp.split(update, [3, 4, 5], axis=-1) trans_update = [jnp.squeeze(x, axis=-1), jnp.squeeze(y, axis=-1), jnp.squeeze(z, axis=-1)] new_quaternion = (self.quaternion + quat_multiply_by_vec(self.quaternion, vector_quaternion_update)) trans_update = apply_rot_to_vec(self.rotation, trans_update) new_translation = [ self.translation[0] + trans_update[0], self.translation[1] + trans_update[1], self.translation[2] + trans_update[2]] return QuatAffine(new_quaternion, new_translation) def apply_to_point(self, point, extra_dims=0): """Apply affine to a point. Args: point: List of 3 tensors to apply affine. extra_dims: Number of dimensions at the end of the transformed_point shape that are not present in the rotation and translation. The most common use is rotation N points at once with extra_dims=1 for use in a network. Returns: Transformed point after applying affine. """ rotation = self.rotation translation = self.translation for _ in range(extra_dims): expand_fn = functools.partial(jnp.expand_dims, axis=-1) rotation = jax.tree_map(expand_fn, rotation) translation = jax.tree_map(expand_fn, translation) rot_point = apply_rot_to_vec(rotation, point) return [ rot_point[0] + translation[0], rot_point[1] + translation[1], rot_point[2] + translation[2]] def invert_point(self, transformed_point, extra_dims=0): """Apply inverse of transformation to a point. Args: transformed_point: List of 3 tensors to apply affine extra_dims: Number of dimensions at the end of the transformed_point shape that are not present in the rotation and translation. The most common use is rotation N points at once with extra_dims=1 for use in a network. Returns: Transformed point after applying affine. """ rotation = self.rotation translation = self.translation for _ in range(extra_dims): expand_fn = functools.partial(jnp.expand_dims, axis=-1) rotation = jax.tree_map(expand_fn, rotation) translation = jax.tree_map(expand_fn, translation) rot_point = [ transformed_point[0] - translation[0], transformed_point[1] - translation[1], transformed_point[2] - translation[2]] return apply_inverse_rot_to_vec(rotation, rot_point) def __repr__(self): return 'QuatAffine(%r, %r)' % (self.quaternion, self.translation) def _multiply(a, b): return jnp.stack([ jnp.array([a[0][0]*b[0][0] + a[0][1]*b[1][0] + a[0][2]*b[2][0], a[0][0]*b[0][1] + a[0][1]*b[1][1] + a[0][2]*b[2][1], a[0][0]*b[0][2] + a[0][1]*b[1][2] + a[0][2]*b[2][2]]), jnp.array([a[1][0]*b[0][0] + a[1][1]*b[1][0] + a[1][2]*b[2][0], a[1][0]*b[0][1] + a[1][1]*b[1][1] + a[1][2]*b[2][1], a[1][0]*b[0][2] + a[1][1]*b[1][2] + a[1][2]*b[2][2]]), jnp.array([a[2][0]*b[0][0] + a[2][1]*b[1][0] + a[2][2]*b[2][0], a[2][0]*b[0][1] + a[2][1]*b[1][1] + a[2][2]*b[2][1], a[2][0]*b[0][2] + a[2][1]*b[1][2] + a[2][2]*b[2][2]])]) def make_canonical_transform( n_xyz: jnp.ndarray, ca_xyz: jnp.ndarray, c_xyz: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]: """Returns translation and rotation matrices to canonicalize residue atoms. Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You need to take care of such cases in your code. Args: n_xyz: An array of shape [batch, 3] of nitrogen xyz coordinates. ca_xyz: An array of shape [batch, 3] of carbon alpha xyz coordinates. c_xyz: An array of shape [batch, 3] of carbon xyz coordinates. Returns: A tuple (translation, rotation) where: translation is an array of shape [batch, 3] defining the translation. rotation is an array of shape [batch, 3, 3] defining the rotation. After applying the translation and rotation to all atoms in a residue: * All atoms will be shifted so that CA is at the origin, * All atoms will be rotated so that C is at the x-axis, * All atoms will be shifted so that N is in the xy plane. """ assert len(n_xyz.shape) == 2, n_xyz.shape assert n_xyz.shape[-1] == 3, n_xyz.shape assert n_xyz.shape == ca_xyz.shape == c_xyz.shape, ( n_xyz.shape, ca_xyz.shape, c_xyz.shape) # Place CA at the origin. translation = -ca_xyz n_xyz = n_xyz + translation c_xyz = c_xyz + translation # Place C on the x-axis. c_x, c_y, c_z = [c_xyz[:, i] for i in range(3)] # Rotate by angle c1 in the x-y plane (around the z-axis). sin_c1 = -c_y / jnp.sqrt(1e-20 + c_x**2 + c_y**2) cos_c1 = c_x / jnp.sqrt(1e-20 + c_x**2 + c_y**2) zeros = jnp.zeros_like(sin_c1) ones = jnp.ones_like(sin_c1) # pylint: disable=bad-whitespace c1_rot_matrix = jnp.stack([jnp.array([cos_c1, -sin_c1, zeros]), jnp.array([sin_c1, cos_c1, zeros]), jnp.array([zeros, zeros, ones])]) # Rotate by angle c2 in the x-z plane (around the y-axis). sin_c2 = c_z / jnp.sqrt(1e-20 + c_x**2 + c_y**2 + c_z**2) cos_c2 = jnp.sqrt(c_x**2 + c_y**2) / jnp.sqrt( 1e-20 + c_x**2 + c_y**2 + c_z**2) c2_rot_matrix = jnp.stack([jnp.array([cos_c2, zeros, sin_c2]), jnp.array([zeros, ones, zeros]), jnp.array([-sin_c2, zeros, cos_c2])]) c_rot_matrix = _multiply(c2_rot_matrix, c1_rot_matrix) n_xyz = jnp.stack(apply_rot_to_vec(c_rot_matrix, n_xyz, unstack=True)).T # Place N in the x-y plane. _, n_y, n_z = [n_xyz[:, i] for i in range(3)] # Rotate by angle alpha in the y-z plane (around the x-axis). sin_n = -n_z / jnp.sqrt(1e-20 + n_y**2 + n_z**2) cos_n = n_y / jnp.sqrt(1e-20 + n_y**2 + n_z**2) n_rot_matrix = jnp.stack([jnp.array([ones, zeros, zeros]), jnp.array([zeros, cos_n, -sin_n]), jnp.array([zeros, sin_n, cos_n])]) # pylint: enable=bad-whitespace return (translation, jnp.transpose(_multiply(n_rot_matrix, c_rot_matrix), [2, 0, 1])) def make_transform_from_reference( n_xyz: jnp.ndarray, ca_xyz: jnp.ndarray, c_xyz: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]: """Returns rotation and translation matrices to convert from reference. Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You need to take care of such cases in your code. Args: n_xyz: An array of shape [batch, 3] of nitrogen xyz coordinates. ca_xyz: An array of shape [batch, 3] of carbon alpha xyz coordinates. c_xyz: An array of shape [batch, 3] of carbon xyz coordinates. Returns: A tuple (rotation, translation) where: rotation is an array of shape [batch, 3, 3] defining the rotation. translation is an array of shape [batch, 3] defining the translation. After applying the translation and rotation to the reference backbone, the coordinates will approximately equal to the input coordinates. The order of translation and rotation differs from make_canonical_transform because the rotation from this function should be applied before the translation, unlike make_canonical_transform. """ translation, rotation = make_canonical_transform(n_xyz, ca_xyz, c_xyz) return np.transpose(rotation, (0, 2, 1)), -translation
alphafold-main
alphafold/model/quat_affine.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convenience functions for reading data.""" import io import os from alphafold.model import utils import haiku as hk import numpy as np # Internal import (7716). def get_model_haiku_params(model_name: str, data_dir: str) -> hk.Params: """Get the Haiku parameters from a model name.""" path = os.path.join(data_dir, 'params', f'params_{model_name}.npz') with open(path, 'rb') as f: params = np.load(io.BytesIO(f.read()), allow_pickle=False) return utils.flat_params_to_haiku(params)
alphafold-main
alphafold/model/data.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for quat_affine.""" from absl import logging from absl.testing import absltest from alphafold.model import quat_affine import jax import jax.numpy as jnp import numpy as np VERBOSE = False np.set_printoptions(precision=3, suppress=True) r2t = quat_affine.rot_list_to_tensor v2t = quat_affine.vec_list_to_tensor q2r = lambda q: r2t(quat_affine.quat_to_rot(q)) class QuatAffineTest(absltest.TestCase): def _assert_check(self, to_check, tol=1e-5): for k, (correct, generated) in to_check.items(): if VERBOSE: logging.info(k) logging.info('Correct %s', correct) logging.info('Predicted %s', generated) self.assertLess(np.max(np.abs(correct - generated)), tol) def test_conversion(self): quat = jnp.array([-2., 5., -1., 4.]) rotation = jnp.array([ [0.26087, 0.130435, 0.956522], [-0.565217, -0.782609, 0.26087], [0.782609, -0.608696, -0.130435]]) translation = jnp.array([1., -3., 4.]) point = jnp.array([0.7, 3.2, -2.9]) a = quat_affine.QuatAffine(quat, translation, unstack_inputs=True) true_new_point = jnp.matmul(rotation, point[:, None])[:, 0] + translation self._assert_check({ 'rot': (rotation, r2t(a.rotation)), 'trans': (translation, v2t(a.translation)), 'point': (true_new_point, v2t(a.apply_to_point(jnp.moveaxis(point, -1, 0)))), # Because of the double cover, we must be careful and compare rotations 'quat': (q2r(a.quaternion), q2r(quat_affine.rot_to_quat(a.rotation))), }) def test_double_cover(self): """Test that -q is the same rotation as q.""" rng = jax.random.PRNGKey(42) keys = jax.random.split(rng) q = jax.random.normal(keys[0], (2, 4)) trans = jax.random.normal(keys[1], (2, 3)) a1 = quat_affine.QuatAffine(q, trans, unstack_inputs=True) a2 = quat_affine.QuatAffine(-q, trans, unstack_inputs=True) self._assert_check({ 'rot': (r2t(a1.rotation), r2t(a2.rotation)), 'trans': (v2t(a1.translation), v2t(a2.translation)), }) def test_homomorphism(self): rng = jax.random.PRNGKey(42) keys = jax.random.split(rng, 4) vec_q1 = jax.random.normal(keys[0], (2, 3)) q1 = jnp.concatenate([ jnp.ones_like(vec_q1)[:, :1], vec_q1], axis=-1) q2 = jax.random.normal(keys[1], (2, 4)) t1 = jax.random.normal(keys[2], (2, 3)) t2 = jax.random.normal(keys[3], (2, 3)) a1 = quat_affine.QuatAffine(q1, t1, unstack_inputs=True) a2 = quat_affine.QuatAffine(q2, t2, unstack_inputs=True) a21 = a2.pre_compose(jnp.concatenate([vec_q1, t1], axis=-1)) rng, key = jax.random.split(rng) x = jax.random.normal(key, (2, 3)) new_x = a21.apply_to_point(jnp.moveaxis(x, -1, 0)) new_x_apply2 = a2.apply_to_point(a1.apply_to_point(jnp.moveaxis(x, -1, 0))) self._assert_check({ 'quat': (q2r(quat_affine.quat_multiply(a2.quaternion, a1.quaternion)), q2r(a21.quaternion)), 'rot': (jnp.matmul(r2t(a2.rotation), r2t(a1.rotation)), r2t(a21.rotation)), 'point': (v2t(new_x_apply2), v2t(new_x)), 'inverse': (x, v2t(a21.invert_point(new_x))), }) def test_batching(self): """Test that affine applies batchwise.""" rng = jax.random.PRNGKey(42) keys = jax.random.split(rng, 3) q = jax.random.uniform(keys[0], (5, 2, 4)) t = jax.random.uniform(keys[1], (2, 3)) x = jax.random.uniform(keys[2], (5, 1, 3)) a = quat_affine.QuatAffine(q, t, unstack_inputs=True) y = v2t(a.apply_to_point(jnp.moveaxis(x, -1, 0))) y_list = [] for i in range(5): for j in range(2): a_local = quat_affine.QuatAffine(q[i, j], t[j], unstack_inputs=True) y_local = v2t(a_local.apply_to_point(jnp.moveaxis(x[i, 0], -1, 0))) y_list.append(y_local) y_combine = jnp.reshape(jnp.stack(y_list, axis=0), (5, 2, 3)) self._assert_check({ 'batch': (y_combine, y), 'quat': (q2r(a.quaternion), q2r(quat_affine.rot_to_quat(a.rotation))), }) def assertAllClose(self, a, b, rtol=1e-06, atol=1e-06): self.assertTrue(np.allclose(a, b, rtol=rtol, atol=atol)) def assertAllEqual(self, a, b): self.assertTrue(np.all(np.array(a) == np.array(b))) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/model/quat_affine_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utils for tests.""" import dataclasses from alphafold.model.geometry import rigid_matrix_vector from alphafold.model.geometry import rotation_matrix from alphafold.model.geometry import vector import jax.numpy as jnp import numpy as np def assert_rotation_matrix_equal(matrix1: rotation_matrix.Rot3Array, matrix2: rotation_matrix.Rot3Array): for field in dataclasses.fields(rotation_matrix.Rot3Array): field = field.name np.testing.assert_array_equal( getattr(matrix1, field), getattr(matrix2, field)) def assert_rotation_matrix_close(mat1: rotation_matrix.Rot3Array, mat2: rotation_matrix.Rot3Array): np.testing.assert_array_almost_equal(mat1.to_array(), mat2.to_array(), 6) def assert_array_equal_to_rotation_matrix(array: jnp.ndarray, matrix: rotation_matrix.Rot3Array): """Check that array and Matrix match.""" np.testing.assert_array_equal(matrix.xx, array[..., 0, 0]) np.testing.assert_array_equal(matrix.xy, array[..., 0, 1]) np.testing.assert_array_equal(matrix.xz, array[..., 0, 2]) np.testing.assert_array_equal(matrix.yx, array[..., 1, 0]) np.testing.assert_array_equal(matrix.yy, array[..., 1, 1]) np.testing.assert_array_equal(matrix.yz, array[..., 1, 2]) np.testing.assert_array_equal(matrix.zx, array[..., 2, 0]) np.testing.assert_array_equal(matrix.zy, array[..., 2, 1]) np.testing.assert_array_equal(matrix.zz, array[..., 2, 2]) def assert_array_close_to_rotation_matrix(array: jnp.ndarray, matrix: rotation_matrix.Rot3Array): np.testing.assert_array_almost_equal(matrix.to_array(), array, 6) def assert_vectors_equal(vec1: vector.Vec3Array, vec2: vector.Vec3Array): np.testing.assert_array_equal(vec1.x, vec2.x) np.testing.assert_array_equal(vec1.y, vec2.y) np.testing.assert_array_equal(vec1.z, vec2.z) def assert_vectors_close(vec1: vector.Vec3Array, vec2: vector.Vec3Array): np.testing.assert_allclose(vec1.x, vec2.x, atol=1e-6, rtol=0.) np.testing.assert_allclose(vec1.y, vec2.y, atol=1e-6, rtol=0.) np.testing.assert_allclose(vec1.z, vec2.z, atol=1e-6, rtol=0.) def assert_array_close_to_vector(array: jnp.ndarray, vec: vector.Vec3Array): np.testing.assert_allclose(vec.to_array(), array, atol=1e-6, rtol=0.) def assert_array_equal_to_vector(array: jnp.ndarray, vec: vector.Vec3Array): np.testing.assert_array_equal(vec.to_array(), array) def assert_rigid_equal_to_rigid(rigid1: rigid_matrix_vector.Rigid3Array, rigid2: rigid_matrix_vector.Rigid3Array): assert_rot_trans_equal_to_rigid(rigid1.rotation, rigid1.translation, rigid2) def assert_rigid_close_to_rigid(rigid1: rigid_matrix_vector.Rigid3Array, rigid2: rigid_matrix_vector.Rigid3Array): assert_rot_trans_close_to_rigid(rigid1.rotation, rigid1.translation, rigid2) def assert_rot_trans_equal_to_rigid(rot: rotation_matrix.Rot3Array, trans: vector.Vec3Array, rigid: rigid_matrix_vector.Rigid3Array): assert_rotation_matrix_equal(rot, rigid.rotation) assert_vectors_equal(trans, rigid.translation) def assert_rot_trans_close_to_rigid(rot: rotation_matrix.Rot3Array, trans: vector.Vec3Array, rigid: rigid_matrix_vector.Rigid3Array): assert_rotation_matrix_close(rot, rigid.rotation) assert_vectors_close(trans, rigid.translation)
alphafold-main
alphafold/model/geometry/test_utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Vec3Array Class.""" from __future__ import annotations import dataclasses from typing import Union from alphafold.model.geometry import struct_of_array from alphafold.model.geometry import utils import jax import jax.numpy as jnp import numpy as np Float = Union[float, jnp.ndarray] VERSION = '0.1' @struct_of_array.StructOfArray(same_dtype=True) class Vec3Array: """Vec3Array in 3 dimensional Space implemented as struct of arrays. This is done in order to improve performance and precision. On TPU small matrix multiplications are very suboptimal and will waste large compute ressources, furthermore any matrix multiplication on tpu happen in mixed bfloat16/float32 precision, which is often undesirable when handling physical coordinates. In most cases this will also be faster on cpu's/gpu's since it allows for easier use of vector instructions. """ x: jnp.ndarray = dataclasses.field(metadata={'dtype': jnp.float32}) y: jnp.ndarray z: jnp.ndarray def __post_init__(self): if hasattr(self.x, 'dtype'): assert self.x.dtype == self.y.dtype assert self.x.dtype == self.z.dtype assert all([x == y for x, y in zip(self.x.shape, self.y.shape)]) assert all([x == z for x, z in zip(self.x.shape, self.z.shape)]) def __add__(self, other: Vec3Array) -> Vec3Array: return jax.tree_map(lambda x, y: x + y, self, other) def __sub__(self, other: Vec3Array) -> Vec3Array: return jax.tree_map(lambda x, y: x - y, self, other) def __mul__(self, other: Float) -> Vec3Array: return jax.tree_map(lambda x: x * other, self) def __rmul__(self, other: Float) -> Vec3Array: return self * other def __truediv__(self, other: Float) -> Vec3Array: return jax.tree_map(lambda x: x / other, self) def __neg__(self) -> Vec3Array: return jax.tree_map(lambda x: -x, self) def __pos__(self) -> Vec3Array: return jax.tree_map(lambda x: x, self) def cross(self, other: Vec3Array) -> Vec3Array: """Compute cross product between 'self' and 'other'.""" new_x = self.y * other.z - self.z * other.y new_y = self.z * other.x - self.x * other.z new_z = self.x * other.y - self.y * other.x return Vec3Array(new_x, new_y, new_z) def dot(self, other: Vec3Array) -> Float: """Compute dot product between 'self' and 'other'.""" return self.x * other.x + self.y * other.y + self.z * other.z def norm(self, epsilon: float = 1e-6) -> Float: """Compute Norm of Vec3Array, clipped to epsilon.""" # To avoid NaN on the backward pass, we must use maximum before the sqrt norm2 = self.dot(self) if epsilon: norm2 = jnp.maximum(norm2, epsilon**2) return jnp.sqrt(norm2) def norm2(self): return self.dot(self) def normalized(self, epsilon: float = 1e-6) -> Vec3Array: """Return unit vector with optional clipping.""" return self / self.norm(epsilon) @classmethod def zeros(cls, shape, dtype=jnp.float32): """Return Vec3Array corresponding to zeros of given shape.""" return cls( jnp.zeros(shape, dtype), jnp.zeros(shape, dtype), jnp.zeros(shape, dtype)) # pytype: disable=wrong-arg-count # trace-all-classes def to_array(self) -> jnp.ndarray: return jnp.stack([self.x, self.y, self.z], axis=-1) @classmethod def from_array(cls, array): return cls(*utils.unstack(array)) def __getstate__(self): return (VERSION, [np.asarray(self.x), np.asarray(self.y), np.asarray(self.z)]) def __setstate__(self, state): version, state = state del version for i, letter in enumerate('xyz'): object.__setattr__(self, letter, state[i]) def square_euclidean_distance(vec1: Vec3Array, vec2: Vec3Array, epsilon: float = 1e-6) -> Float: """Computes square of euclidean distance between 'vec1' and 'vec2'. Args: vec1: Vec3Array to compute distance to vec2: Vec3Array to compute distance from, should be broadcast compatible with 'vec1' epsilon: distance is clipped from below to be at least epsilon Returns: Array of square euclidean distances; shape will be result of broadcasting 'vec1' and 'vec2' """ difference = vec1 - vec2 distance = difference.dot(difference) if epsilon: distance = jnp.maximum(distance, epsilon) return distance def dot(vector1: Vec3Array, vector2: Vec3Array) -> Float: return vector1.dot(vector2) def cross(vector1: Vec3Array, vector2: Vec3Array) -> Float: return vector1.cross(vector2) def norm(vector: Vec3Array, epsilon: float = 1e-6) -> Float: return vector.norm(epsilon) def normalized(vector: Vec3Array, epsilon: float = 1e-6) -> Vec3Array: return vector.normalized(epsilon) def euclidean_distance(vec1: Vec3Array, vec2: Vec3Array, epsilon: float = 1e-6) -> Float: """Computes euclidean distance between 'vec1' and 'vec2'. Args: vec1: Vec3Array to compute euclidean distance to vec2: Vec3Array to compute euclidean distance from, should be broadcast compatible with 'vec1' epsilon: distance is clipped from below to be at least epsilon Returns: Array of euclidean distances; shape will be result of broadcasting 'vec1' and 'vec2' """ distance_sq = square_euclidean_distance(vec1, vec2, epsilon**2) distance = jnp.sqrt(distance_sq) return distance def dihedral_angle(a: Vec3Array, b: Vec3Array, c: Vec3Array, d: Vec3Array) -> Float: """Computes torsion angle for a quadruple of points. For points (a, b, c, d), this is the angle between the planes defined by points (a, b, c) and (b, c, d). It is also known as the dihedral angle. Arguments: a: A Vec3Array of coordinates. b: A Vec3Array of coordinates. c: A Vec3Array of coordinates. d: A Vec3Array of coordinates. Returns: A tensor of angles in radians: [-pi, pi]. """ v1 = a - b v2 = b - c v3 = d - c c1 = v1.cross(v2) c2 = v3.cross(v2) c3 = c2.cross(c1) v2_mag = v2.norm() return jnp.arctan2(c3.dot(v2), v2_mag * c1.dot(c2)) def random_gaussian_vector(shape, key, dtype=jnp.float32): vec_array = jax.random.normal(key, shape + (3,), dtype) return Vec3Array.from_array(vec_array)
alphafold-main
alphafold/model/geometry/vector.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class decorator to represent (nested) struct of arrays.""" import dataclasses import jax def get_item(instance, key): sliced = {} for field in get_array_fields(instance): num_trailing_dims = field.metadata.get('num_trailing_dims', 0) this_key = key if isinstance(key, tuple) and Ellipsis in this_key: this_key += (slice(None),) * num_trailing_dims sliced[field.name] = getattr(instance, field.name)[this_key] return dataclasses.replace(instance, **sliced) @property def get_shape(instance): """Returns Shape for given instance of dataclass.""" first_field = dataclasses.fields(instance)[0] num_trailing_dims = first_field.metadata.get('num_trailing_dims', None) value = getattr(instance, first_field.name) if num_trailing_dims: return value.shape[:-num_trailing_dims] else: return value.shape def get_len(instance): """Returns length for given instance of dataclass.""" shape = instance.shape if shape: return shape[0] else: raise TypeError('len() of unsized object') # Match jax.numpy behavior. @property def get_dtype(instance): """Returns Dtype for given instance of dataclass.""" fields = dataclasses.fields(instance) sets_dtype = [ field.name for field in fields if field.metadata.get('sets_dtype', False) ] if sets_dtype: assert len(sets_dtype) == 1, 'at most field can set dtype' field_value = getattr(instance, sets_dtype[0]) elif instance.same_dtype: field_value = getattr(instance, fields[0].name) else: # Should this be Value Error? raise AttributeError('Trying to access Dtype on Struct of Array without' 'either "same_dtype" or field setting dtype') if hasattr(field_value, 'dtype'): return field_value.dtype else: # Should this be Value Error? raise AttributeError(f'field_value {field_value} does not have dtype') def replace(instance, **kwargs): return dataclasses.replace(instance, **kwargs) def post_init(instance): """Validate instance has same shapes & dtypes.""" array_fields = get_array_fields(instance) arrays = list(get_array_fields(instance, return_values=True).values()) first_field = array_fields[0] # These slightly weird constructions about checking whether the leaves are # actual arrays is since e.g. vmap internally relies on being able to # construct pytree's with object() as leaves, this would break the checking # as such we are only validating the object when the entries in the dataclass # Are arrays or other dataclasses of arrays. try: dtype = instance.dtype except AttributeError: dtype = None if dtype is not None: first_shape = instance.shape for array, field in zip(arrays, array_fields): field_shape = array.shape num_trailing_dims = field.metadata.get('num_trailing_dims', None) if num_trailing_dims: array_shape = array.shape field_shape = array_shape[:-num_trailing_dims] msg = (f'field {field} should have number of trailing dims' ' {num_trailing_dims}') assert len(array_shape) == len(first_shape) + num_trailing_dims, msg else: field_shape = array.shape shape_msg = (f"Stripped Shape {field_shape} of field {field} doesn't " f"match shape {first_shape} of field {first_field}") assert field_shape == first_shape, shape_msg field_dtype = array.dtype allowed_metadata_dtypes = field.metadata.get('allowed_dtypes', []) if allowed_metadata_dtypes: msg = f'Dtype is {field_dtype} but must be in {allowed_metadata_dtypes}' assert field_dtype in allowed_metadata_dtypes, msg if 'dtype' in field.metadata: target_dtype = field.metadata['dtype'] else: target_dtype = dtype msg = f'Dtype is {field_dtype} but must be {target_dtype}' assert field_dtype == target_dtype, msg def flatten(instance): """Flatten Struct of Array instance.""" array_likes = list(get_array_fields(instance, return_values=True).values()) flat_array_likes = [] inner_treedefs = [] num_arrays = [] for array_like in array_likes: flat_array_like, inner_treedef = jax.tree_util.tree_flatten(array_like) inner_treedefs.append(inner_treedef) flat_array_likes += flat_array_like num_arrays.append(len(flat_array_like)) metadata = get_metadata_fields(instance, return_values=True) metadata = type(instance).metadata_cls(**metadata) return flat_array_likes, (inner_treedefs, metadata, num_arrays) def make_metadata_class(cls): metadata_fields = get_fields(cls, lambda x: x.metadata.get('is_metadata', False)) metadata_cls = dataclasses.make_dataclass( cls_name='Meta' + cls.__name__, fields=[(field.name, field.type, field) for field in metadata_fields], frozen=True, eq=True) return metadata_cls def get_fields(cls_or_instance, filterfn, return_values=False): fields = dataclasses.fields(cls_or_instance) fields = [field for field in fields if filterfn(field)] if return_values: return { field.name: getattr(cls_or_instance, field.name) for field in fields } else: return fields def get_array_fields(cls, return_values=False): return get_fields( cls, lambda x: not x.metadata.get('is_metadata', False), return_values=return_values) def get_metadata_fields(cls, return_values=False): return get_fields( cls, lambda x: x.metadata.get('is_metadata', False), return_values=return_values) class StructOfArray: """Class Decorator for Struct Of Arrays.""" def __init__(self, same_dtype=True): self.same_dtype = same_dtype def __call__(self, cls): cls.__array_ufunc__ = None cls.replace = replace cls.same_dtype = self.same_dtype cls.dtype = get_dtype cls.shape = get_shape cls.__len__ = get_len cls.__getitem__ = get_item cls.__post_init__ = post_init new_cls = dataclasses.dataclass(cls, frozen=True, eq=False) # pytype: disable=wrong-keyword-args # pytree claims to require metadata to be hashable, not sure why, # But making derived dataclass that can just hold metadata new_cls.metadata_cls = make_metadata_class(new_cls) def unflatten(aux, data): inner_treedefs, metadata, num_arrays = aux array_fields = [field.name for field in get_array_fields(new_cls)] value_dict = {} array_start = 0 for num_array, inner_treedef, array_field in zip(num_arrays, inner_treedefs, array_fields): value_dict[array_field] = jax.tree_util.tree_unflatten( inner_treedef, data[array_start:array_start + num_array]) array_start += num_array metadata_fields = get_metadata_fields(new_cls) for field in metadata_fields: value_dict[field.name] = getattr(metadata, field.name) return new_cls(**value_dict) jax.tree_util.register_pytree_node( nodetype=new_cls, flatten_func=flatten, unflatten_func=unflatten) return new_cls
alphafold-main
alphafold/model/geometry/struct_of_array.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Geometry Module.""" from alphafold.model.geometry import rigid_matrix_vector from alphafold.model.geometry import rotation_matrix from alphafold.model.geometry import struct_of_array from alphafold.model.geometry import vector Rot3Array = rotation_matrix.Rot3Array Rigid3Array = rigid_matrix_vector.Rigid3Array StructOfArray = struct_of_array.StructOfArray Vec3Array = vector.Vec3Array square_euclidean_distance = vector.square_euclidean_distance euclidean_distance = vector.euclidean_distance dihedral_angle = vector.dihedral_angle dot = vector.dot cross = vector.cross
alphafold-main
alphafold/model/geometry/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rot3Array Matrix Class.""" from __future__ import annotations import dataclasses from alphafold.model.geometry import struct_of_array from alphafold.model.geometry import utils from alphafold.model.geometry import vector import jax import jax.numpy as jnp import numpy as np COMPONENTS = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz'] VERSION = '0.1' @struct_of_array.StructOfArray(same_dtype=True) class Rot3Array: """Rot3Array Matrix in 3 dimensional Space implemented as struct of arrays.""" xx: jnp.ndarray = dataclasses.field(metadata={'dtype': jnp.float32}) xy: jnp.ndarray xz: jnp.ndarray yx: jnp.ndarray yy: jnp.ndarray yz: jnp.ndarray zx: jnp.ndarray zy: jnp.ndarray zz: jnp.ndarray __array_ufunc__ = None def inverse(self) -> Rot3Array: """Returns inverse of Rot3Array.""" return Rot3Array(self.xx, self.yx, self.zx, self.xy, self.yy, self.zy, self.xz, self.yz, self.zz) def apply_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array: """Applies Rot3Array to point.""" return vector.Vec3Array( self.xx * point.x + self.xy * point.y + self.xz * point.z, self.yx * point.x + self.yy * point.y + self.yz * point.z, self.zx * point.x + self.zy * point.y + self.zz * point.z) def apply_inverse_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array: """Applies inverse Rot3Array to point.""" return self.inverse().apply_to_point(point) def __matmul__(self, other: Rot3Array) -> Rot3Array: """Composes two Rot3Arrays.""" c0 = self.apply_to_point(vector.Vec3Array(other.xx, other.yx, other.zx)) c1 = self.apply_to_point(vector.Vec3Array(other.xy, other.yy, other.zy)) c2 = self.apply_to_point(vector.Vec3Array(other.xz, other.yz, other.zz)) return Rot3Array(c0.x, c1.x, c2.x, c0.y, c1.y, c2.y, c0.z, c1.z, c2.z) @classmethod def identity(cls, shape, dtype=jnp.float32) -> Rot3Array: """Returns identity of given shape.""" ones = jnp.ones(shape, dtype=dtype) zeros = jnp.zeros(shape, dtype=dtype) return cls(ones, zeros, zeros, zeros, ones, zeros, zeros, zeros, ones) # pytype: disable=wrong-arg-count # trace-all-classes @classmethod def from_two_vectors(cls, e0: vector.Vec3Array, e1: vector.Vec3Array) -> Rot3Array: """Construct Rot3Array from two Vectors. Rot3Array is constructed such that in the corresponding frame 'e0' lies on the positive x-Axis and 'e1' lies in the xy plane with positive sign of y. Args: e0: Vector e1: Vector Returns: Rot3Array """ # Normalize the unit vector for the x-axis, e0. e0 = e0.normalized() # make e1 perpendicular to e0. c = e1.dot(e0) e1 = (e1 - c * e0).normalized() # Compute e2 as cross product of e0 and e1. e2 = e0.cross(e1) return cls(e0.x, e1.x, e2.x, e0.y, e1.y, e2.y, e0.z, e1.z, e2.z) # pytype: disable=wrong-arg-count # trace-all-classes @classmethod def from_array(cls, array: jnp.ndarray) -> Rot3Array: """Construct Rot3Array Matrix from array of shape. [..., 3, 3].""" unstacked = utils.unstack(array, axis=-2) unstacked = sum([utils.unstack(x, axis=-1) for x in unstacked], []) return cls(*unstacked) def to_array(self) -> jnp.ndarray: """Convert Rot3Array to array of shape [..., 3, 3].""" return jnp.stack( [jnp.stack([self.xx, self.xy, self.xz], axis=-1), jnp.stack([self.yx, self.yy, self.yz], axis=-1), jnp.stack([self.zx, self.zy, self.zz], axis=-1)], axis=-2) @classmethod def from_quaternion(cls, w: jnp.ndarray, x: jnp.ndarray, y: jnp.ndarray, z: jnp.ndarray, normalize: bool = True, epsilon: float = 1e-6) -> Rot3Array: """Construct Rot3Array from components of quaternion.""" if normalize: inv_norm = jax.lax.rsqrt(jnp.maximum(epsilon, w**2 + x**2 + y**2 + z**2)) w *= inv_norm x *= inv_norm y *= inv_norm z *= inv_norm xx = 1 - 2 * (jnp.square(y) + jnp.square(z)) xy = 2 * (x * y - w * z) xz = 2 * (x * z + w * y) yx = 2 * (x * y + w * z) yy = 1 - 2 * (jnp.square(x) + jnp.square(z)) yz = 2 * (y * z - w * x) zx = 2 * (x * z - w * y) zy = 2 * (y * z + w * x) zz = 1 - 2 * (jnp.square(x) + jnp.square(y)) return cls(xx, xy, xz, yx, yy, yz, zx, zy, zz) # pytype: disable=wrong-arg-count # trace-all-classes @classmethod def random_uniform(cls, key, shape, dtype=jnp.float32) -> Rot3Array: """Samples uniform random Rot3Array according to Haar Measure.""" quat_array = jax.random.normal(key, tuple(shape) + (4,), dtype=dtype) quats = utils.unstack(quat_array) return cls.from_quaternion(*quats) def __getstate__(self): return (VERSION, [np.asarray(getattr(self, field)) for field in COMPONENTS]) def __setstate__(self, state): version, state = state del version for i, field in enumerate(COMPONENTS): object.__setattr__(self, field, state[i])
alphafold-main
alphafold/model/geometry/rotation_matrix.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for geometry library.""" from typing import List import jax.numpy as jnp def unstack(value: jnp.ndarray, axis: int = -1) -> List[jnp.ndarray]: return [jnp.squeeze(v, axis=axis) for v in jnp.split(value, value.shape[axis], axis=axis)]
alphafold-main
alphafold/model/geometry/utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rigid3Array Transformations represented by a Matrix and a Vector.""" from __future__ import annotations from typing import Union from alphafold.model.geometry import rotation_matrix from alphafold.model.geometry import struct_of_array from alphafold.model.geometry import vector import jax import jax.numpy as jnp Float = Union[float, jnp.ndarray] VERSION = '0.1' @struct_of_array.StructOfArray(same_dtype=True) class Rigid3Array: """Rigid Transformation, i.e. element of special euclidean group.""" rotation: rotation_matrix.Rot3Array translation: vector.Vec3Array def __matmul__(self, other: Rigid3Array) -> Rigid3Array: new_rotation = self.rotation @ other.rotation new_translation = self.apply_to_point(other.translation) return Rigid3Array(new_rotation, new_translation) def inverse(self) -> Rigid3Array: """Return Rigid3Array corresponding to inverse transform.""" inv_rotation = self.rotation.inverse() inv_translation = inv_rotation.apply_to_point(-self.translation) return Rigid3Array(inv_rotation, inv_translation) def apply_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array: """Apply Rigid3Array transform to point.""" return self.rotation.apply_to_point(point) + self.translation def apply_inverse_to_point(self, point: vector.Vec3Array) -> vector.Vec3Array: """Apply inverse Rigid3Array transform to point.""" new_point = point - self.translation return self.rotation.apply_inverse_to_point(new_point) def compose_rotation(self, other_rotation): rot = self.rotation @ other_rotation trans = jax.tree_map(lambda x: jnp.broadcast_to(x, rot.shape), self.translation) return Rigid3Array(rot, trans) @classmethod def identity(cls, shape, dtype=jnp.float32) -> Rigid3Array: """Return identity Rigid3Array of given shape.""" return cls( rotation_matrix.Rot3Array.identity(shape, dtype=dtype), vector.Vec3Array.zeros(shape, dtype=dtype)) # pytype: disable=wrong-arg-count # trace-all-classes def scale_translation(self, factor: Float) -> Rigid3Array: """Scale translation in Rigid3Array by 'factor'.""" return Rigid3Array(self.rotation, self.translation * factor) def to_array(self): rot_array = self.rotation.to_array() vec_array = self.translation.to_array() return jnp.concatenate([rot_array, vec_array[..., None]], axis=-1) @classmethod def from_array(cls, array): rot = rotation_matrix.Rot3Array.from_array(array[..., :3]) vec = vector.Vec3Array.from_array(array[..., -1]) return cls(rot, vec) # pytype: disable=wrong-arg-count # trace-all-classes @classmethod def from_array4x4(cls, array: jnp.ndarray) -> Rigid3Array: """Construct Rigid3Array from homogeneous 4x4 array.""" assert array.shape[-1] == 4 assert array.shape[-2] == 4 rotation = rotation_matrix.Rot3Array( array[..., 0, 0], array[..., 0, 1], array[..., 0, 2], array[..., 1, 0], array[..., 1, 1], array[..., 1, 2], array[..., 2, 0], array[..., 2, 1], array[..., 2, 2] ) translation = vector.Vec3Array( array[..., 0, 3], array[..., 1, 3], array[..., 2, 3]) return cls(rotation, translation) # pytype: disable=wrong-arg-count # trace-all-classes def __getstate__(self): return (VERSION, (self.rotation, self.translation)) def __setstate__(self, state): version, (rot, trans) = state del version object.__setattr__(self, 'rotation', rot) object.__setattr__(self, 'translation', trans)
alphafold-main
alphafold/model/geometry/rigid_matrix_vector.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature pre-processing input pipeline for AlphaFold.""" from alphafold.model.tf import data_transforms from alphafold.model.tf import shape_placeholders import tensorflow.compat.v1 as tf import tree # Pylint gets confused by the curry1 decorator because it changes the number # of arguments to the function. # pylint:disable=no-value-for-parameter NUM_RES = shape_placeholders.NUM_RES NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES def nonensembled_map_fns(data_config): """Input pipeline functions which are not ensembled.""" common_cfg = data_config.common map_fns = [ data_transforms.correct_msa_restypes, data_transforms.add_distillation_flag(False), data_transforms.cast_64bit_ints, data_transforms.squeeze_features, # Keep to not disrupt RNG. data_transforms.randomly_replace_msa_with_unknown(0.0), data_transforms.make_seq_mask, data_transforms.make_msa_mask, # Compute the HHblits profile if it's not set. This has to be run before # sampling the MSA. data_transforms.make_hhblits_profile, data_transforms.make_random_crop_to_size_seed, ] if common_cfg.use_templates: map_fns.extend([ data_transforms.fix_templates_aatype, data_transforms.make_template_mask, data_transforms.make_pseudo_beta('template_') ]) map_fns.extend([ data_transforms.make_atom14_masks, ]) return map_fns def ensembled_map_fns(data_config): """Input pipeline functions that can be ensembled and averaged.""" common_cfg = data_config.common eval_cfg = data_config.eval map_fns = [] if common_cfg.reduce_msa_clusters_by_max_templates: pad_msa_clusters = eval_cfg.max_msa_clusters - eval_cfg.max_templates else: pad_msa_clusters = eval_cfg.max_msa_clusters max_msa_clusters = pad_msa_clusters max_extra_msa = common_cfg.max_extra_msa map_fns.append( data_transforms.sample_msa( max_msa_clusters, keep_extra=True)) if 'masked_msa' in common_cfg: # Masked MSA should come *before* MSA clustering so that # the clustering and full MSA profile do not leak information about # the masked locations and secret corrupted locations. map_fns.append( data_transforms.make_masked_msa(common_cfg.masked_msa, eval_cfg.masked_msa_replace_fraction)) if common_cfg.msa_cluster_features: map_fns.append(data_transforms.nearest_neighbor_clusters()) map_fns.append(data_transforms.summarize_clusters()) # Crop after creating the cluster profiles. if max_extra_msa: map_fns.append(data_transforms.crop_extra_msa(max_extra_msa)) else: map_fns.append(data_transforms.delete_extra_msa) map_fns.append(data_transforms.make_msa_feat()) crop_feats = dict(eval_cfg.feat) if eval_cfg.fixed_size: map_fns.append(data_transforms.select_feat(list(crop_feats))) map_fns.append(data_transforms.random_crop_to_size( eval_cfg.crop_size, eval_cfg.max_templates, crop_feats, eval_cfg.subsample_templates)) map_fns.append(data_transforms.make_fixed_size( crop_feats, pad_msa_clusters, common_cfg.max_extra_msa, eval_cfg.crop_size, eval_cfg.max_templates)) else: map_fns.append(data_transforms.crop_templates(eval_cfg.max_templates)) return map_fns def process_tensors_from_config(tensors, data_config): """Apply filters and maps to an existing dataset, based on the config.""" def wrap_ensemble_fn(data, i): """Function to be mapped over the ensemble dimension.""" d = data.copy() fns = ensembled_map_fns(data_config) fn = compose(fns) d['ensemble_index'] = i return fn(d) eval_cfg = data_config.eval tensors = compose( nonensembled_map_fns( data_config))( tensors) tensors_0 = wrap_ensemble_fn(tensors, tf.constant(0)) num_ensemble = eval_cfg.num_ensemble if data_config.common.resample_msa_in_recycling: # Separate batch per ensembling & recycling step. num_ensemble *= data_config.common.num_recycle + 1 if isinstance(num_ensemble, tf.Tensor) or num_ensemble > 1: fn_output_signature = tree.map_structure( tf.TensorSpec.from_tensor, tensors_0) tensors = tf.map_fn( lambda x: wrap_ensemble_fn(tensors, x), tf.range(num_ensemble), parallel_iterations=1, fn_output_signature=fn_output_signature) else: tensors = tree.map_structure(lambda x: x[None], tensors_0) return tensors @data_transforms.curry1 def compose(x, fs): for f in fs: x = f(x) return x
alphafold-main
alphafold/model/tf/input_pipeline.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for protein_features.""" import uuid from absl.testing import absltest from absl.testing import parameterized from alphafold.model.tf import protein_features import tensorflow.compat.v1 as tf def _random_bytes(): return str(uuid.uuid4()).encode('utf-8') class FeaturesTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): super().setUp() tf.disable_v2_behavior() def testFeatureNames(self): self.assertEqual(len(protein_features.FEATURE_SIZES), len(protein_features.FEATURE_TYPES)) sorted_size_names = sorted(protein_features.FEATURE_SIZES.keys()) sorted_type_names = sorted(protein_features.FEATURE_TYPES.keys()) for i, size_name in enumerate(sorted_size_names): self.assertEqual(size_name, sorted_type_names[i]) def testReplacement(self): for name in protein_features.FEATURE_SIZES.keys(): sizes = protein_features.shape(name, num_residues=12, msa_length=24, num_templates=3) for x in sizes: self.assertEqual(type(x), int) self.assertGreater(x, 0) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/model/tf/protein_features_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Alphafold model TensorFlow code."""
alphafold-main
alphafold/model/tf/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data for AlphaFold.""" from alphafold.common import residue_constants from alphafold.model.tf import shape_helpers from alphafold.model.tf import shape_placeholders from alphafold.model.tf import utils import numpy as np import tensorflow.compat.v1 as tf # Pylint gets confused by the curry1 decorator because it changes the number # of arguments to the function. # pylint:disable=no-value-for-parameter NUM_RES = shape_placeholders.NUM_RES NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES def cast_64bit_ints(protein): for k, v in protein.items(): if v.dtype == tf.int64: protein[k] = tf.cast(v, tf.int32) return protein _MSA_FEATURE_NAMES = [ 'msa', 'deletion_matrix', 'msa_mask', 'msa_row_mask', 'bert_mask', 'true_msa' ] def make_seq_mask(protein): protein['seq_mask'] = tf.ones( shape_helpers.shape_list(protein['aatype']), dtype=tf.float32) return protein def make_template_mask(protein): protein['template_mask'] = tf.ones( shape_helpers.shape_list(protein['template_domain_names']), dtype=tf.float32) return protein def curry1(f): """Supply all arguments but the first.""" def fc(*args, **kwargs): return lambda x: f(x, *args, **kwargs) return fc @curry1 def add_distillation_flag(protein, distillation): protein['is_distillation'] = tf.constant(float(distillation), shape=[], dtype=tf.float32) return protein def make_all_atom_aatype(protein): protein['all_atom_aatype'] = protein['aatype'] return protein def fix_templates_aatype(protein): """Fixes aatype encoding of templates.""" # Map one-hot to indices. protein['template_aatype'] = tf.argmax( protein['template_aatype'], output_type=tf.int32, axis=-1) # Map hhsearch-aatype to our aatype. new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE new_order = tf.constant(new_order_list, dtype=tf.int32) protein['template_aatype'] = tf.gather(params=new_order, indices=protein['template_aatype']) return protein def correct_msa_restypes(protein): """Correct MSA restype to have the same order as residue_constants.""" new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE new_order = tf.constant(new_order_list, dtype=protein['msa'].dtype) protein['msa'] = tf.gather(new_order, protein['msa'], axis=0) perm_matrix = np.zeros((22, 22), dtype=np.float32) perm_matrix[range(len(new_order_list)), new_order_list] = 1. for k in protein: if 'profile' in k: # Include both hhblits and psiblast profiles num_dim = protein[k].shape.as_list()[-1] assert num_dim in [20, 21, 22], ( 'num_dim for %s out of expected range: %s' % (k, num_dim)) protein[k] = tf.tensordot(protein[k], perm_matrix[:num_dim, :num_dim], 1) return protein def squeeze_features(protein): """Remove singleton and repeated dimensions in protein features.""" protein['aatype'] = tf.argmax( protein['aatype'], axis=-1, output_type=tf.int32) for k in [ 'domain_name', 'msa', 'num_alignments', 'seq_length', 'sequence', 'superfamily', 'deletion_matrix', 'resolution', 'between_segment_residues', 'residue_index', 'template_all_atom_masks']: if k in protein: final_dim = shape_helpers.shape_list(protein[k])[-1] if isinstance(final_dim, int) and final_dim == 1: protein[k] = tf.squeeze(protein[k], axis=-1) for k in ['seq_length', 'num_alignments']: if k in protein: protein[k] = protein[k][0] # Remove fake sequence dimension return protein def make_random_crop_to_size_seed(protein): """Random seed for cropping residues and templates.""" protein['random_crop_to_size_seed'] = utils.make_random_seed() return protein @curry1 def randomly_replace_msa_with_unknown(protein, replace_proportion): """Replace a proportion of the MSA with 'X'.""" msa_mask = (tf.random.uniform(shape_helpers.shape_list(protein['msa'])) < replace_proportion) x_idx = 20 gap_idx = 21 msa_mask = tf.logical_and(msa_mask, protein['msa'] != gap_idx) protein['msa'] = tf.where(msa_mask, tf.ones_like(protein['msa']) * x_idx, protein['msa']) aatype_mask = ( tf.random.uniform(shape_helpers.shape_list(protein['aatype'])) < replace_proportion) protein['aatype'] = tf.where(aatype_mask, tf.ones_like(protein['aatype']) * x_idx, protein['aatype']) return protein @curry1 def sample_msa(protein, max_seq, keep_extra): """Sample MSA randomly, remaining sequences are stored as `extra_*`. Args: protein: batch to sample msa from. max_seq: number of sequences to sample. keep_extra: When True sequences not sampled are put into fields starting with 'extra_*'. Returns: Protein with sampled msa. """ num_seq = tf.shape(protein['msa'])[0] shuffled = tf.random_shuffle(tf.range(1, num_seq)) index_order = tf.concat([[0], shuffled], axis=0) num_sel = tf.minimum(max_seq, num_seq) sel_seq, not_sel_seq = tf.split(index_order, [num_sel, num_seq - num_sel]) for k in _MSA_FEATURE_NAMES: if k in protein: if keep_extra: protein['extra_' + k] = tf.gather(protein[k], not_sel_seq) protein[k] = tf.gather(protein[k], sel_seq) return protein @curry1 def crop_extra_msa(protein, max_extra_msa): """MSA features are cropped so only `max_extra_msa` sequences are kept.""" num_seq = tf.shape(protein['extra_msa'])[0] num_sel = tf.minimum(max_extra_msa, num_seq) select_indices = tf.random_shuffle(tf.range(0, num_seq))[:num_sel] for k in _MSA_FEATURE_NAMES: if 'extra_' + k in protein: protein['extra_' + k] = tf.gather(protein['extra_' + k], select_indices) return protein def delete_extra_msa(protein): for k in _MSA_FEATURE_NAMES: if 'extra_' + k in protein: del protein['extra_' + k] return protein @curry1 def block_delete_msa(protein, config): """Sample MSA by deleting contiguous blocks. Jumper et al. (2021) Suppl. Alg. 1 "MSABlockDeletion" Arguments: protein: batch dict containing the msa config: ConfigDict with parameters Returns: updated protein """ num_seq = shape_helpers.shape_list(protein['msa'])[0] block_num_seq = tf.cast( tf.floor(tf.cast(num_seq, tf.float32) * config.msa_fraction_per_block), tf.int32) if config.randomize_num_blocks: nb = tf.random.uniform([], 0, config.num_blocks + 1, dtype=tf.int32) else: nb = config.num_blocks del_block_starts = tf.random.uniform([nb], 0, num_seq, dtype=tf.int32) del_blocks = del_block_starts[:, None] + tf.range(block_num_seq) del_blocks = tf.clip_by_value(del_blocks, 0, num_seq - 1) del_indices = tf.unique(tf.sort(tf.reshape(del_blocks, [-1])))[0] # Make sure we keep the original sequence sparse_diff = tf.sets.difference(tf.range(1, num_seq)[None], del_indices[None]) keep_indices = tf.squeeze(tf.sparse.to_dense(sparse_diff), 0) keep_indices = tf.concat([[0], keep_indices], axis=0) for k in _MSA_FEATURE_NAMES: if k in protein: protein[k] = tf.gather(protein[k], keep_indices) return protein @curry1 def nearest_neighbor_clusters(protein, gap_agreement_weight=0.): """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" # Determine how much weight we assign to each agreement. In theory, we could # use a full blosum matrix here, but right now let's just down-weight gap # agreement because it could be spurious. # Never put weight on agreeing on BERT mask weights = tf.concat([ tf.ones(21), gap_agreement_weight * tf.ones(1), np.zeros(1)], 0) # Make agreement score as weighted Hamming distance sample_one_hot = (protein['msa_mask'][:, :, None] * tf.one_hot(protein['msa'], 23)) extra_one_hot = (protein['extra_msa_mask'][:, :, None] * tf.one_hot(protein['extra_msa'], 23)) num_seq, num_res, _ = shape_helpers.shape_list(sample_one_hot) extra_num_seq, _, _ = shape_helpers.shape_list(extra_one_hot) # Compute tf.einsum('mrc,nrc,c->mn', sample_one_hot, extra_one_hot, weights) # in an optimized fashion to avoid possible memory or computation blowup. agreement = tf.matmul( tf.reshape(extra_one_hot, [extra_num_seq, num_res * 23]), tf.reshape(sample_one_hot * weights, [num_seq, num_res * 23]), transpose_b=True) # Assign each sequence in the extra sequences to the closest MSA sample protein['extra_cluster_assignment'] = tf.argmax( agreement, axis=1, output_type=tf.int32) return protein @curry1 def summarize_clusters(protein): """Produce profile and deletion_matrix_mean within each cluster.""" num_seq = shape_helpers.shape_list(protein['msa'])[0] def csum(x): return tf.math.unsorted_segment_sum( x, protein['extra_cluster_assignment'], num_seq) mask = protein['extra_msa_mask'] mask_counts = 1e-6 + protein['msa_mask'] + csum(mask) # Include center msa_sum = csum(mask[:, :, None] * tf.one_hot(protein['extra_msa'], 23)) msa_sum += tf.one_hot(protein['msa'], 23) # Original sequence protein['cluster_profile'] = msa_sum / mask_counts[:, :, None] del msa_sum del_sum = csum(mask * protein['extra_deletion_matrix']) del_sum += protein['deletion_matrix'] # Original sequence protein['cluster_deletion_mean'] = del_sum / mask_counts del del_sum return protein def make_msa_mask(protein): """Mask features are all ones, but will later be zero-padded.""" protein['msa_mask'] = tf.ones( shape_helpers.shape_list(protein['msa']), dtype=tf.float32) protein['msa_row_mask'] = tf.ones( shape_helpers.shape_list(protein['msa'])[0], dtype=tf.float32) return protein def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): """Create pseudo beta features.""" is_gly = tf.equal(aatype, residue_constants.restype_order['G']) ca_idx = residue_constants.atom_order['CA'] cb_idx = residue_constants.atom_order['CB'] pseudo_beta = tf.where( tf.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :]) if all_atom_masks is not None: pseudo_beta_mask = tf.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) pseudo_beta_mask = tf.cast(pseudo_beta_mask, tf.float32) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta @curry1 def make_pseudo_beta(protein, prefix=''): """Create pseudo-beta (alpha for glycine) position and mask.""" assert prefix in ['', 'template_'] protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask'] = ( pseudo_beta_fn( protein['template_aatype' if prefix else 'all_atom_aatype'], protein[prefix + 'all_atom_positions'], protein['template_all_atom_masks' if prefix else 'all_atom_mask'])) return protein @curry1 def add_constant_field(protein, key, value): protein[key] = tf.convert_to_tensor(value) return protein def shaped_categorical(probs, epsilon=1e-10): ds = shape_helpers.shape_list(probs) num_classes = ds[-1] counts = tf.random.categorical( tf.reshape(tf.log(probs + epsilon), [-1, num_classes]), 1, dtype=tf.int32) return tf.reshape(counts, ds[:-1]) def make_hhblits_profile(protein): """Compute the HHblits MSA profile if not already present.""" if 'hhblits_profile' in protein: return protein # Compute the profile for every residue (over all MSA sequences). protein['hhblits_profile'] = tf.reduce_mean( tf.one_hot(protein['msa'], 22), axis=0) return protein @curry1 def make_masked_msa(protein, config, replace_fraction): """Create data for BERT on raw MSA.""" # Add a random amino acid uniformly random_aa = tf.constant([0.05] * 20 + [0., 0.], dtype=tf.float32) categorical_probs = ( config.uniform_prob * random_aa + config.profile_prob * protein['hhblits_profile'] + config.same_prob * tf.one_hot(protein['msa'], 22)) # Put all remaining probability on [MASK] which is a new column pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))] pad_shapes[-1][1] = 1 mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob assert mask_prob >= 0. categorical_probs = tf.pad( categorical_probs, pad_shapes, constant_values=mask_prob) sh = shape_helpers.shape_list(protein['msa']) mask_position = tf.random.uniform(sh) < replace_fraction bert_msa = shaped_categorical(categorical_probs) bert_msa = tf.where(mask_position, bert_msa, protein['msa']) # Mix real and masked MSA protein['bert_mask'] = tf.cast(mask_position, tf.float32) protein['true_msa'] = protein['msa'] protein['msa'] = bert_msa return protein @curry1 def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size, num_res, num_templates=0): """Guess at the MSA and sequence dimensions to make fixed size.""" pad_size_map = { NUM_RES: num_res, NUM_MSA_SEQ: msa_cluster_size, NUM_EXTRA_SEQ: extra_msa_size, NUM_TEMPLATES: num_templates, } for k, v in protein.items(): # Don't transfer this to the accelerator. if k == 'extra_cluster_assignment': continue shape = v.shape.as_list() schema = shape_schema[k] assert len(shape) == len(schema), ( f'Rank mismatch between shape and shape schema for {k}: ' f'{shape} vs {schema}') pad_size = [ pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema) ] padding = [(0, p - tf.shape(v)[i]) for i, p in enumerate(pad_size)] if padding: protein[k] = tf.pad( v, padding, name=f'pad_to_fixed_{k}') protein[k].set_shape(pad_size) return protein @curry1 def make_msa_feat(protein): """Create and concatenate MSA features.""" # Whether there is a domain break. Always zero for chains, but keeping # for compatibility with domain datasets. has_break = tf.clip_by_value( tf.cast(protein['between_segment_residues'], tf.float32), 0, 1) aatype_1hot = tf.one_hot(protein['aatype'], 21, axis=-1) target_feat = [ tf.expand_dims(has_break, axis=-1), aatype_1hot, # Everyone gets the original sequence. ] msa_1hot = tf.one_hot(protein['msa'], 23, axis=-1) has_deletion = tf.clip_by_value(protein['deletion_matrix'], 0., 1.) deletion_value = tf.atan(protein['deletion_matrix'] / 3.) * (2. / np.pi) msa_feat = [ msa_1hot, tf.expand_dims(has_deletion, axis=-1), tf.expand_dims(deletion_value, axis=-1), ] if 'cluster_profile' in protein: deletion_mean_value = ( tf.atan(protein['cluster_deletion_mean'] / 3.) * (2. / np.pi)) msa_feat.extend([ protein['cluster_profile'], tf.expand_dims(deletion_mean_value, axis=-1), ]) if 'extra_deletion_matrix' in protein: protein['extra_has_deletion'] = tf.clip_by_value( protein['extra_deletion_matrix'], 0., 1.) protein['extra_deletion_value'] = tf.atan( protein['extra_deletion_matrix'] / 3.) * (2. / np.pi) protein['msa_feat'] = tf.concat(msa_feat, axis=-1) protein['target_feat'] = tf.concat(target_feat, axis=-1) return protein @curry1 def select_feat(protein, feature_list): return {k: v for k, v in protein.items() if k in feature_list} @curry1 def crop_templates(protein, max_templates): for k, v in protein.items(): if k.startswith('template_'): protein[k] = v[:max_templates] return protein @curry1 def random_crop_to_size(protein, crop_size, max_templates, shape_schema, subsample_templates=False): """Crop randomly to `crop_size`, or keep as is if shorter than that.""" seq_length = protein['seq_length'] if 'template_mask' in protein: num_templates = tf.cast( shape_helpers.shape_list(protein['template_mask'])[0], tf.int32) else: num_templates = tf.constant(0, dtype=tf.int32) num_res_crop_size = tf.math.minimum(seq_length, crop_size) # Ensures that the cropping of residues and templates happens in the same way # across ensembling iterations. # Do not use for randomness that should vary in ensembling. seed_maker = utils.SeedMaker(initial_seed=protein['random_crop_to_size_seed']) if subsample_templates: templates_crop_start = tf.random.stateless_uniform( shape=(), minval=0, maxval=num_templates + 1, dtype=tf.int32, seed=seed_maker()) else: templates_crop_start = 0 num_templates_crop_size = tf.math.minimum( num_templates - templates_crop_start, max_templates) num_res_crop_start = tf.random.stateless_uniform( shape=(), minval=0, maxval=seq_length - num_res_crop_size + 1, dtype=tf.int32, seed=seed_maker()) templates_select_indices = tf.argsort(tf.random.stateless_uniform( [num_templates], seed=seed_maker())) for k, v in protein.items(): if k not in shape_schema or ( 'template' not in k and NUM_RES not in shape_schema[k]): continue # randomly permute the templates before cropping them. if k.startswith('template') and subsample_templates: v = tf.gather(v, templates_select_indices) crop_sizes = [] crop_starts = [] for i, (dim_size, dim) in enumerate(zip(shape_schema[k], shape_helpers.shape_list(v))): is_num_res = (dim_size == NUM_RES) if i == 0 and k.startswith('template'): crop_size = num_templates_crop_size crop_start = templates_crop_start else: crop_start = num_res_crop_start if is_num_res else 0 crop_size = (num_res_crop_size if is_num_res else (-1 if dim is None else dim)) crop_sizes.append(crop_size) crop_starts.append(crop_start) protein[k] = tf.slice(v, crop_starts, crop_sizes) protein['seq_length'] = num_res_crop_size return protein def make_atom14_masks(protein): """Construct denser atom positions (14 dimensions instead of 37).""" restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37 restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14 restype_atom14_mask = [] for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] restype_atom14_to_atom37.append([ (residue_constants.atom_order[name] if name else 0) for name in atom_names ]) atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} restype_atom37_to_atom14.append([ (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in residue_constants.atom_types ]) restype_atom14_mask.append([(1. if name else 0.) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atom14_to_atom37.append([0] * 14) restype_atom37_to_atom14.append([0] * 37) restype_atom14_mask.append([0.] * 14) restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32) restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32) restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein residx_atom14_to_atom37 = tf.gather(restype_atom14_to_atom37, protein['aatype']) residx_atom14_mask = tf.gather(restype_atom14_mask, protein['aatype']) protein['atom14_atom_exists'] = residx_atom14_mask protein['residx_atom14_to_atom37'] = residx_atom14_to_atom37 # create the gather indices for mapping back residx_atom37_to_atom14 = tf.gather(restype_atom37_to_atom14, protein['aatype']) protein['residx_atom37_to_atom14'] = residx_atom37_to_atom14 # create the corresponding mask restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) for restype, restype_letter in enumerate(residue_constants.restypes): restype_name = residue_constants.restype_1to3[restype_letter] atom_names = residue_constants.residue_atoms[restype_name] for atom_name in atom_names: atom_type = residue_constants.atom_order[atom_name] restype_atom37_mask[restype, atom_type] = 1 residx_atom37_mask = tf.gather(restype_atom37_mask, protein['aatype']) protein['atom37_atom_exists'] = residx_atom37_mask return protein
alphafold-main
alphafold/model/tf/data_transforms.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared utilities for various components.""" import tensorflow.compat.v1 as tf def tf_combine_mask(*masks): """Take the intersection of float-valued masks.""" ret = 1 for m in masks: ret *= m return ret class SeedMaker(object): """Return unique seeds.""" def __init__(self, initial_seed=0): self.next_seed = initial_seed def __call__(self): i = self.next_seed self.next_seed += 1 return i seed_maker = SeedMaker() def make_random_seed(): return tf.random.uniform([2], tf.int32.min, tf.int32.max, tf.int32, seed=seed_maker())
alphafold-main
alphafold/model/tf/utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Datasets consisting of proteins.""" from typing import Dict, Mapping, Optional, Sequence from alphafold.model.tf import protein_features import numpy as np import tensorflow.compat.v1 as tf TensorDict = Dict[str, tf.Tensor] def parse_tfexample( raw_data: bytes, features: protein_features.FeaturesMetadata, key: Optional[str] = None) -> Dict[str, tf.train.Feature]: """Read a single TF Example proto and return a subset of its features. Args: raw_data: A serialized tf.Example proto. features: A dictionary of features, mapping string feature names to a tuple (dtype, shape). This dictionary should be a subset of protein_features.FEATURES (or the dictionary itself for all features). key: Optional string with the SSTable key of that tf.Example. This will be added into features as a 'key' but only if requested in features. Returns: A dictionary of features mapping feature names to features. Only the given features are returned, all other ones are filtered out. """ feature_map = { k: tf.io.FixedLenSequenceFeature(shape=(), dtype=v[0], allow_missing=True) for k, v in features.items() } parsed_features = tf.io.parse_single_example(raw_data, feature_map) reshaped_features = parse_reshape_logic(parsed_features, features, key=key) return reshaped_features def _first(tensor: tf.Tensor) -> tf.Tensor: """Returns the 1st element - the input can be a tensor or a scalar.""" return tf.reshape(tensor, shape=(-1,))[0] def parse_reshape_logic( parsed_features: TensorDict, features: protein_features.FeaturesMetadata, key: Optional[str] = None) -> TensorDict: """Transforms parsed serial features to the correct shape.""" # Find out what is the number of sequences and the number of alignments. num_residues = tf.cast(_first(parsed_features["seq_length"]), dtype=tf.int32) if "num_alignments" in parsed_features: num_msa = tf.cast(_first(parsed_features["num_alignments"]), dtype=tf.int32) else: num_msa = 0 if "template_domain_names" in parsed_features: num_templates = tf.cast( tf.shape(parsed_features["template_domain_names"])[0], dtype=tf.int32) else: num_templates = 0 if key is not None and "key" in features: parsed_features["key"] = [key] # Expand dims from () to (1,). # Reshape the tensors according to the sequence length and num alignments. for k, v in parsed_features.items(): new_shape = protein_features.shape( feature_name=k, num_residues=num_residues, msa_length=num_msa, num_templates=num_templates, features=features) new_shape_size = tf.constant(1, dtype=tf.int32) for dim in new_shape: new_shape_size *= tf.cast(dim, tf.int32) assert_equal = tf.assert_equal( tf.size(v), new_shape_size, name="assert_%s_shape_correct" % k, message="The size of feature %s (%s) could not be reshaped " "into %s" % (k, tf.size(v), new_shape)) if "template" not in k: # Make sure the feature we are reshaping is not empty. assert_non_empty = tf.assert_greater( tf.size(v), 0, name="assert_%s_non_empty" % k, message="The feature %s is not set in the tf.Example. Either do not " "request the feature or use a tf.Example that has the " "feature set." % k) with tf.control_dependencies([assert_non_empty, assert_equal]): parsed_features[k] = tf.reshape(v, new_shape, name="reshape_%s" % k) else: with tf.control_dependencies([assert_equal]): parsed_features[k] = tf.reshape(v, new_shape, name="reshape_%s" % k) return parsed_features def _make_features_metadata( feature_names: Sequence[str]) -> protein_features.FeaturesMetadata: """Makes a feature name to type and shape mapping from a list of names.""" # Make sure these features are always read. required_features = ["aatype", "sequence", "seq_length"] feature_names = list(set(feature_names) | set(required_features)) features_metadata = {name: protein_features.FEATURES[name] for name in feature_names} return features_metadata def create_tensor_dict( raw_data: bytes, features: Sequence[str], key: Optional[str] = None, ) -> TensorDict: """Creates a dictionary of tensor features. Args: raw_data: A serialized tf.Example proto. features: A list of strings of feature names to be returned in the dataset. key: Optional string with the SSTable key of that tf.Example. This will be added into features as a 'key' but only if requested in features. Returns: A dictionary of features mapping feature names to features. Only the given features are returned, all other ones are filtered out. """ features_metadata = _make_features_metadata(features) return parse_tfexample(raw_data, features_metadata, key) def np_to_tensor_dict( np_example: Mapping[str, np.ndarray], features: Sequence[str], ) -> TensorDict: """Creates dict of tensors from a dict of NumPy arrays. Args: np_example: A dict of NumPy feature arrays. features: A list of strings of feature names to be returned in the dataset. Returns: A dictionary of features mapping feature names to features. Only the given features are returned, all other ones are filtered out. """ features_metadata = _make_features_metadata(features) tensor_dict = {k: tf.constant(v) for k, v in np_example.items() if k in features_metadata} # Ensures shapes are as expected. Needed for setting size of empty features # e.g. when no template hits were found. tensor_dict = parse_reshape_logic(tensor_dict, features_metadata) return tensor_dict
alphafold-main
alphafold/model/tf/proteins_dataset.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains descriptions of various protein features.""" import enum from typing import Dict, Optional, Sequence, Tuple, Union from alphafold.common import residue_constants import tensorflow.compat.v1 as tf # Type aliases. FeaturesMetadata = Dict[str, Tuple[tf.dtypes.DType, Sequence[Union[str, int]]]] class FeatureType(enum.Enum): ZERO_DIM = 0 # Shape [x] ONE_DIM = 1 # Shape [num_res, x] TWO_DIM = 2 # Shape [num_res, num_res, x] MSA = 3 # Shape [msa_length, num_res, x] # Placeholder values that will be replaced with their true value at runtime. NUM_RES = "num residues placeholder" NUM_SEQ = "length msa placeholder" NUM_TEMPLATES = "num templates placeholder" # Sizes of the protein features, NUM_RES and NUM_SEQ are allowed as placeholders # to be replaced with the number of residues and the number of sequences in the # multiple sequence alignment, respectively. FEATURES = { #### Static features of a protein sequence #### "aatype": (tf.float32, [NUM_RES, 21]), "between_segment_residues": (tf.int64, [NUM_RES, 1]), "deletion_matrix": (tf.float32, [NUM_SEQ, NUM_RES, 1]), "domain_name": (tf.string, [1]), "msa": (tf.int64, [NUM_SEQ, NUM_RES, 1]), "num_alignments": (tf.int64, [NUM_RES, 1]), "residue_index": (tf.int64, [NUM_RES, 1]), "seq_length": (tf.int64, [NUM_RES, 1]), "sequence": (tf.string, [1]), "all_atom_positions": (tf.float32, [NUM_RES, residue_constants.atom_type_num, 3]), "all_atom_mask": (tf.int64, [NUM_RES, residue_constants.atom_type_num]), "resolution": (tf.float32, [1]), "template_domain_names": (tf.string, [NUM_TEMPLATES]), "template_sum_probs": (tf.float32, [NUM_TEMPLATES, 1]), "template_aatype": (tf.float32, [NUM_TEMPLATES, NUM_RES, 22]), "template_all_atom_positions": (tf.float32, [ NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 3 ]), "template_all_atom_masks": (tf.float32, [ NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 1 ]), } FEATURE_TYPES = {k: v[0] for k, v in FEATURES.items()} FEATURE_SIZES = {k: v[1] for k, v in FEATURES.items()} def register_feature(name: str, type_: tf.dtypes.DType, shape_: Tuple[Union[str, int]]): """Register extra features used in custom datasets.""" FEATURES[name] = (type_, shape_) FEATURE_TYPES[name] = type_ FEATURE_SIZES[name] = shape_ def shape(feature_name: str, num_residues: int, msa_length: int, num_templates: Optional[int] = None, features: Optional[FeaturesMetadata] = None): """Get the shape for the given feature name. This is near identical to _get_tf_shape_no_placeholders() but with 2 differences: * This method does not calculate a single placeholder from the total number of elements (eg given <NUM_RES, 3> and size := 12, this won't deduce NUM_RES must be 4) * This method will work with tensors Args: feature_name: String identifier for the feature. If the feature name ends with "_unnormalized", this suffix is stripped off. num_residues: The number of residues in the current domain - some elements of the shape can be dynamic and will be replaced by this value. msa_length: The number of sequences in the multiple sequence alignment, some elements of the shape can be dynamic and will be replaced by this value. If the number of alignments is unknown / not read, please pass None for msa_length. num_templates (optional): The number of templates in this tfexample. features: A feature_name to (tf_dtype, shape) lookup; defaults to FEATURES. Returns: List of ints representation the tensor size. Raises: ValueError: If a feature is requested but no concrete placeholder value is given. """ features = features or FEATURES if feature_name.endswith("_unnormalized"): feature_name = feature_name[:-13] unused_dtype, raw_sizes = features[feature_name] replacements = {NUM_RES: num_residues, NUM_SEQ: msa_length} if num_templates is not None: replacements[NUM_TEMPLATES] = num_templates sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes] for dimension in sizes: if isinstance(dimension, str): raise ValueError("Could not parse %s (shape: %s) with values: %s" % ( feature_name, raw_sizes, replacements)) return sizes
alphafold-main
alphafold/model/tf/protein_features.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for shape_helpers.""" from alphafold.model.tf import shape_helpers import numpy as np import tensorflow.compat.v1 as tf class ShapeTest(tf.test.TestCase): def setUp(self): super().setUp() tf.disable_v2_behavior() def test_shape_list(self): """Test that shape_list can allow for reshaping to dynamic shapes.""" a = tf.zeros([10, 4, 4, 2]) p = tf.placeholder(tf.float32, shape=[None, None, 1, 4, 4]) shape_dyn = shape_helpers.shape_list(p)[:2] + [4, 4] b = tf.reshape(a, shape_dyn) with self.session() as sess: out = sess.run(b, feed_dict={p: np.ones((20, 1, 1, 4, 4))}) self.assertAllEqual(out.shape, (20, 1, 4, 4)) if __name__ == '__main__': tf.test.main()
alphafold-main
alphafold/model/tf/shape_helpers_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for dealing with shapes of TensorFlow tensors.""" import tensorflow.compat.v1 as tf def shape_list(x): """Return list of dimensions of a tensor, statically where possible. Like `x.shape.as_list()` but with tensors instead of `None`s. Args: x: A tensor. Returns: A list with length equal to the rank of the tensor. The n-th element of the list is an integer when that dimension is statically known otherwise it is the n-th element of `tf.shape(x)`. """ x = tf.convert_to_tensor(x) # If unknown rank, return dynamic shape if x.get_shape().dims is None: return tf.shape(x) static = x.get_shape().as_list() shape = tf.shape(x) ret = [] for i in range(len(static)): dim = static[i] if dim is None: dim = shape[i] ret.append(dim) return ret
alphafold-main
alphafold/model/tf/shape_helpers.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Placeholder values for run-time varying dimension sizes.""" NUM_RES = 'num residues placeholder' NUM_MSA_SEQ = 'msa placeholder' NUM_EXTRA_SEQ = 'extra msa placeholder' NUM_TEMPLATES = 'num templates placeholder'
alphafold-main
alphafold/model/tf/shape_placeholders.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for utils.""" import os from absl.testing import absltest from alphafold.common import protein from alphafold.relax import utils import numpy as np # Internal import (7716). class UtilsTest(absltest.TestCase): def test_overwrite_b_factors(self): testdir = os.path.join( absltest.get_default_test_srcdir(), 'alphafold/relax/testdata/' 'multiple_disulfides_target.pdb') with open(testdir) as f: test_pdb = f.read() n_residues = 191 bfactors = np.stack([np.arange(0, n_residues)] * 37, axis=-1) output_pdb = utils.overwrite_b_factors(test_pdb, bfactors) # Check that the atom lines are unchanged apart from the B-factors. atom_lines_original = [l for l in test_pdb.split('\n') if l[:4] == ('ATOM')] atom_lines_new = [l for l in output_pdb.split('\n') if l[:4] == ('ATOM')] for line_original, line_new in zip(atom_lines_original, atom_lines_new): self.assertEqual(line_original[:60].strip(), line_new[:60].strip()) self.assertEqual(line_original[66:].strip(), line_new[66:].strip()) # Check B-factors are correctly set for all atoms present. as_protein = protein.from_pdb_string(output_pdb) np.testing.assert_almost_equal( np.where(as_protein.atom_mask > 0, as_protein.b_factors, 0), np.where(as_protein.atom_mask > 0, bfactors, 0)) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/relax/utils_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Amber relaxation."""
alphafold-main
alphafold/relax/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for relax.""" import os from absl.testing import absltest from alphafold.common import protein from alphafold.relax import relax import numpy as np # Internal import (7716). class RunAmberRelaxTest(absltest.TestCase): def setUp(self): super().setUp() self.test_dir = os.path.join( absltest.get_default_test_srcdir(), 'alphafold/relax/testdata/') self.test_config = { 'max_iterations': 1, 'tolerance': 2.39, 'stiffness': 10.0, 'exclude_residues': [], 'max_outer_iterations': 1, 'use_gpu': False} def test_process(self): amber_relax = relax.AmberRelaxation(**self.test_config) with open(os.path.join(self.test_dir, 'model_output.pdb')) as f: test_prot = protein.from_pdb_string(f.read()) pdb_min, debug_info, num_violations = amber_relax.process(prot=test_prot) self.assertCountEqual(debug_info.keys(), set({'initial_energy', 'final_energy', 'attempts', 'rmsd'})) self.assertLess(debug_info['final_energy'], debug_info['initial_energy']) self.assertGreater(debug_info['rmsd'], 0) prot_min = protein.from_pdb_string(pdb_min) # Most protein properties should be unchanged. np.testing.assert_almost_equal(test_prot.aatype, prot_min.aatype) np.testing.assert_almost_equal(test_prot.residue_index, prot_min.residue_index) # Atom mask and bfactors identical except for terminal OXT of last residue. np.testing.assert_almost_equal(test_prot.atom_mask[:-1, :], prot_min.atom_mask[:-1, :]) np.testing.assert_almost_equal(test_prot.b_factors[:-1, :], prot_min.b_factors[:-1, :]) np.testing.assert_almost_equal(test_prot.atom_mask[:, :-1], prot_min.atom_mask[:, :-1]) np.testing.assert_almost_equal(test_prot.b_factors[:, :-1], prot_min.b_factors[:, :-1]) # There are no residues with violations. np.testing.assert_equal(num_violations, np.zeros_like(num_violations)) def test_unresolved_violations(self): amber_relax = relax.AmberRelaxation(**self.test_config) with open(os.path.join(self.test_dir, 'with_violations_casp14.pdb')) as f: test_prot = protein.from_pdb_string(f.read()) _, _, num_violations = amber_relax.process(prot=test_prot) exp_num_violations = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]) # Check no violations were added. Can't check exactly due to stochasticity. self.assertTrue(np.all(np.array(num_violations) <= exp_num_violations)) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/relax/relax_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Restrained Amber Minimization of a structure.""" import io import time from typing import Collection, Optional, Sequence from absl import logging from alphafold.common import protein from alphafold.common import residue_constants from alphafold.model import folding from alphafold.relax import cleanup from alphafold.relax import utils import ml_collections import numpy as np import jax import openmm from openmm import unit from openmm import app as openmm_app from openmm.app.internal.pdbstructure import PdbStructure ENERGY = unit.kilocalories_per_mole LENGTH = unit.angstroms def will_restrain(atom: openmm_app.Atom, rset: str) -> bool: """Returns True if the atom will be restrained by the given restraint set.""" if rset == "non_hydrogen": return atom.element.name != "hydrogen" elif rset == "c_alpha": return atom.name == "CA" def _add_restraints( system: openmm.System, reference_pdb: openmm_app.PDBFile, stiffness: unit.Unit, rset: str, exclude_residues: Sequence[int]): """Adds a harmonic potential that restrains the system to a structure.""" assert rset in ["non_hydrogen", "c_alpha"] force = openmm.CustomExternalForce( "0.5 * k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)") force.addGlobalParameter("k", stiffness) for p in ["x0", "y0", "z0"]: force.addPerParticleParameter(p) for i, atom in enumerate(reference_pdb.topology.atoms()): if atom.residue.index in exclude_residues: continue if will_restrain(atom, rset): force.addParticle(i, reference_pdb.positions[i]) logging.info("Restraining %d / %d particles.", force.getNumParticles(), system.getNumParticles()) system.addForce(force) def _openmm_minimize( pdb_str: str, max_iterations: int, tolerance: unit.Unit, stiffness: unit.Unit, restraint_set: str, exclude_residues: Sequence[int], use_gpu: bool): """Minimize energy via openmm.""" pdb_file = io.StringIO(pdb_str) pdb = openmm_app.PDBFile(pdb_file) force_field = openmm_app.ForceField("amber99sb.xml") constraints = openmm_app.HBonds system = force_field.createSystem( pdb.topology, constraints=constraints) if stiffness > 0 * ENERGY / (LENGTH**2): _add_restraints(system, pdb, stiffness, restraint_set, exclude_residues) integrator = openmm.LangevinIntegrator(0, 0.01, 0.0) platform = openmm.Platform.getPlatformByName("CUDA" if use_gpu else "CPU") simulation = openmm_app.Simulation( pdb.topology, system, integrator, platform) simulation.context.setPositions(pdb.positions) ret = {} state = simulation.context.getState(getEnergy=True, getPositions=True) ret["einit"] = state.getPotentialEnergy().value_in_unit(ENERGY) ret["posinit"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH) simulation.minimizeEnergy(maxIterations=max_iterations, tolerance=tolerance) state = simulation.context.getState(getEnergy=True, getPositions=True) ret["efinal"] = state.getPotentialEnergy().value_in_unit(ENERGY) ret["pos"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH) ret["min_pdb"] = _get_pdb_string(simulation.topology, state.getPositions()) return ret def _get_pdb_string(topology: openmm_app.Topology, positions: unit.Quantity): """Returns a pdb string provided OpenMM topology and positions.""" with io.StringIO() as f: openmm_app.PDBFile.writeFile(topology, positions, f) return f.getvalue() def _check_cleaned_atoms(pdb_cleaned_string: str, pdb_ref_string: str): """Checks that no atom positions have been altered by cleaning.""" cleaned = openmm_app.PDBFile(io.StringIO(pdb_cleaned_string)) reference = openmm_app.PDBFile(io.StringIO(pdb_ref_string)) cl_xyz = np.array(cleaned.getPositions().value_in_unit(LENGTH)) ref_xyz = np.array(reference.getPositions().value_in_unit(LENGTH)) for ref_res, cl_res in zip(reference.topology.residues(), cleaned.topology.residues()): assert ref_res.name == cl_res.name for rat in ref_res.atoms(): for cat in cl_res.atoms(): if cat.name == rat.name: if not np.array_equal(cl_xyz[cat.index], ref_xyz[rat.index]): raise ValueError(f"Coordinates of cleaned atom {cat} do not match " f"coordinates of reference atom {rat}.") def _check_residues_are_well_defined(prot: protein.Protein): """Checks that all residues contain non-empty atom sets.""" if (prot.atom_mask.sum(axis=-1) == 0).any(): raise ValueError("Amber minimization can only be performed on proteins with" " well-defined residues. This protein contains at least" " one residue with no atoms.") def _check_atom_mask_is_ideal(prot): """Sanity-check the atom mask is ideal, up to a possible OXT.""" atom_mask = prot.atom_mask ideal_atom_mask = protein.ideal_atom_mask(prot) utils.assert_equal_nonterminal_atom_types(atom_mask, ideal_atom_mask) def clean_protein( prot: protein.Protein, checks: bool = True): """Adds missing atoms to Protein instance. Args: prot: A `protein.Protein` instance. checks: A `bool` specifying whether to add additional checks to the cleaning process. Returns: pdb_string: A string of the cleaned protein. """ _check_atom_mask_is_ideal(prot) # Clean pdb. prot_pdb_string = protein.to_pdb(prot) pdb_file = io.StringIO(prot_pdb_string) alterations_info = {} fixed_pdb = cleanup.fix_pdb(pdb_file, alterations_info) fixed_pdb_file = io.StringIO(fixed_pdb) pdb_structure = PdbStructure(fixed_pdb_file) cleanup.clean_structure(pdb_structure, alterations_info) logging.info("alterations info: %s", alterations_info) # Write pdb file of cleaned structure. as_file = openmm_app.PDBFile(pdb_structure) pdb_string = _get_pdb_string(as_file.getTopology(), as_file.getPositions()) if checks: _check_cleaned_atoms(pdb_string, prot_pdb_string) return pdb_string def make_atom14_positions(prot): """Constructs denser atom positions (14 dimensions instead of 37).""" restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37 restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14 restype_atom14_mask = [] for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] restype_atom14_to_atom37.append([ (residue_constants.atom_order[name] if name else 0) for name in atom_names ]) atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} restype_atom37_to_atom14.append([ (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in residue_constants.atom_types ]) restype_atom14_mask.append([(1. if name else 0.) for name in atom_names]) # Add dummy mapping for restype 'UNK'. restype_atom14_to_atom37.append([0] * 14) restype_atom37_to_atom14.append([0] * 37) restype_atom14_mask.append([0.] * 14) restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32) restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32) restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32) # Create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein. residx_atom14_to_atom37 = restype_atom14_to_atom37[prot["aatype"]] residx_atom14_mask = restype_atom14_mask[prot["aatype"]] # Create a mask for known ground truth positions. residx_atom14_gt_mask = residx_atom14_mask * np.take_along_axis( prot["all_atom_mask"], residx_atom14_to_atom37, axis=1).astype(np.float32) # Gather the ground truth positions. residx_atom14_gt_positions = residx_atom14_gt_mask[:, :, None] * ( np.take_along_axis(prot["all_atom_positions"], residx_atom14_to_atom37[..., None], axis=1)) prot["atom14_atom_exists"] = residx_atom14_mask prot["atom14_gt_exists"] = residx_atom14_gt_mask prot["atom14_gt_positions"] = residx_atom14_gt_positions prot["residx_atom14_to_atom37"] = residx_atom14_to_atom37 # Create the gather indices for mapping back. residx_atom37_to_atom14 = restype_atom37_to_atom14[prot["aatype"]] prot["residx_atom37_to_atom14"] = residx_atom37_to_atom14 # Create the corresponding mask. restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) for restype, restype_letter in enumerate(residue_constants.restypes): restype_name = residue_constants.restype_1to3[restype_letter] atom_names = residue_constants.residue_atoms[restype_name] for atom_name in atom_names: atom_type = residue_constants.atom_order[atom_name] restype_atom37_mask[restype, atom_type] = 1 residx_atom37_mask = restype_atom37_mask[prot["aatype"]] prot["atom37_atom_exists"] = residx_atom37_mask # As the atom naming is ambiguous for 7 of the 20 amino acids, provide # alternative ground truth coordinates where the naming is swapped restype_3 = [ residue_constants.restype_1to3[res] for res in residue_constants.restypes ] restype_3 += ["UNK"] # Matrices for renaming ambiguous atoms. all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3} for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): correspondences = np.arange(14) for source_atom_swap, target_atom_swap in swap.items(): source_index = residue_constants.restype_name_to_atom14_names[ resname].index(source_atom_swap) target_index = residue_constants.restype_name_to_atom14_names[ resname].index(target_atom_swap) correspondences[source_index] = target_index correspondences[target_index] = source_index renaming_matrix = np.zeros((14, 14), dtype=np.float32) for index, correspondence in enumerate(correspondences): renaming_matrix[index, correspondence] = 1. all_matrices[resname] = renaming_matrix.astype(np.float32) renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3]) # Pick the transformation matrices for the given residue sequence # shape (num_res, 14, 14). renaming_transform = renaming_matrices[prot["aatype"]] # Apply it to the ground truth positions. shape (num_res, 14, 3). alternative_gt_positions = np.einsum("rac,rab->rbc", residx_atom14_gt_positions, renaming_transform) prot["atom14_alt_gt_positions"] = alternative_gt_positions # Create the mask for the alternative ground truth (differs from the # ground truth mask, if only one of the atoms in an ambiguous pair has a # ground truth position). alternative_gt_mask = np.einsum("ra,rab->rb", residx_atom14_gt_mask, renaming_transform) prot["atom14_alt_gt_exists"] = alternative_gt_mask # Create an ambiguous atoms mask. shape: (21, 14). restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32) for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): for atom_name1, atom_name2 in swap.items(): restype = residue_constants.restype_order[ residue_constants.restype_3to1[resname]] atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index( atom_name1) atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index( atom_name2) restype_atom14_is_ambiguous[restype, atom_idx1] = 1 restype_atom14_is_ambiguous[restype, atom_idx2] = 1 # From this create an ambiguous_mask for the given sequence. prot["atom14_atom_is_ambiguous"] = ( restype_atom14_is_ambiguous[prot["aatype"]]) return prot def find_violations(prot_np: protein.Protein): """Analyzes a protein and returns structural violation information. Args: prot_np: A protein. Returns: violations: A `dict` of structure components with structural violations. violation_metrics: A `dict` of violation metrics. """ batch = { "aatype": prot_np.aatype, "all_atom_positions": prot_np.atom_positions.astype(np.float32), "all_atom_mask": prot_np.atom_mask.astype(np.float32), "residue_index": prot_np.residue_index, } batch["seq_mask"] = np.ones_like(batch["aatype"], np.float32) batch = make_atom14_positions(batch) violations = folding.find_structural_violations( batch=batch, atom14_pred_positions=batch["atom14_gt_positions"], config=ml_collections.ConfigDict( {"violation_tolerance_factor": 12, # Taken from model config. "clash_overlap_tolerance": 1.5, # Taken from model config. })) violation_metrics = folding.compute_violation_metrics( batch=batch, atom14_pred_positions=batch["atom14_gt_positions"], violations=violations, ) return violations, violation_metrics def get_violation_metrics(prot: protein.Protein): """Computes violation and alignment metrics.""" structural_violations, struct_metrics = find_violations(prot) violation_idx = np.flatnonzero( structural_violations["total_per_residue_violations_mask"]) struct_metrics["residue_violations"] = violation_idx struct_metrics["num_residue_violations"] = len(violation_idx) struct_metrics["structural_violations"] = structural_violations return struct_metrics def _run_one_iteration( *, pdb_string: str, max_iterations: int, tolerance: float, stiffness: float, restraint_set: str, max_attempts: int, use_gpu: bool, exclude_residues: Optional[Collection[int]] = None): """Runs the minimization pipeline. Args: pdb_string: A pdb string. max_iterations: An `int` specifying the maximum number of L-BFGS iterations. A value of 0 specifies no limit. tolerance: kcal/mol, the energy tolerance of L-BFGS. stiffness: kcal/mol A**2, spring constant of heavy atom restraining potential. restraint_set: The set of atoms to restrain. max_attempts: The maximum number of minimization attempts. use_gpu: Whether to run on GPU. exclude_residues: An optional list of zero-indexed residues to exclude from restraints. Returns: A `dict` of minimization info. """ exclude_residues = exclude_residues or [] # Assign physical dimensions. tolerance = tolerance * ENERGY stiffness = stiffness * ENERGY / (LENGTH**2) start = time.time() minimized = False attempts = 0 while not minimized and attempts < max_attempts: attempts += 1 try: logging.info("Minimizing protein, attempt %d of %d.", attempts, max_attempts) ret = _openmm_minimize( pdb_string, max_iterations=max_iterations, tolerance=tolerance, stiffness=stiffness, restraint_set=restraint_set, exclude_residues=exclude_residues, use_gpu=use_gpu) minimized = True except Exception as e: # pylint: disable=broad-except logging.info(e) if not minimized: raise ValueError(f"Minimization failed after {max_attempts} attempts.") ret["opt_time"] = time.time() - start ret["min_attempts"] = attempts return ret def run_pipeline( prot: protein.Protein, stiffness: float, use_gpu: bool, max_outer_iterations: int = 1, place_hydrogens_every_iteration: bool = True, max_iterations: int = 0, tolerance: float = 2.39, restraint_set: str = "non_hydrogen", max_attempts: int = 100, checks: bool = True, exclude_residues: Optional[Sequence[int]] = None): """Run iterative amber relax. Successive relax iterations are performed until all violations have been resolved. Each iteration involves a restrained Amber minimization, with restraint exclusions determined by violation-participating residues. Args: prot: A protein to be relaxed. stiffness: kcal/mol A**2, the restraint stiffness. use_gpu: Whether to run on GPU. max_outer_iterations: The maximum number of iterative minimization. place_hydrogens_every_iteration: Whether hydrogens are re-initialized prior to every minimization. max_iterations: An `int` specifying the maximum number of L-BFGS steps per relax iteration. A value of 0 specifies no limit. tolerance: kcal/mol, the energy tolerance of L-BFGS. The default value is the OpenMM default. restraint_set: The set of atoms to restrain. max_attempts: The maximum number of minimization attempts per iteration. checks: Whether to perform cleaning checks. exclude_residues: An optional list of zero-indexed residues to exclude from restraints. Returns: out: A dictionary of output values. """ # `protein.to_pdb` will strip any poorly-defined residues so we need to # perform this check before `clean_protein`. _check_residues_are_well_defined(prot) pdb_string = clean_protein(prot, checks=checks) exclude_residues = exclude_residues or [] exclude_residues = set(exclude_residues) violations = np.inf iteration = 0 while violations > 0 and iteration < max_outer_iterations: ret = _run_one_iteration( pdb_string=pdb_string, exclude_residues=exclude_residues, max_iterations=max_iterations, tolerance=tolerance, stiffness=stiffness, restraint_set=restraint_set, max_attempts=max_attempts, use_gpu=use_gpu) prot = protein.from_pdb_string(ret["min_pdb"]) if place_hydrogens_every_iteration: pdb_string = clean_protein(prot, checks=True) else: pdb_string = ret["min_pdb"] # Calculation of violations can cause CUDA errors for some JAX versions. with jax.default_device(jax.devices("cpu")[0]): ret.update(get_violation_metrics(prot)) ret.update({ "num_exclusions": len(exclude_residues), "iteration": iteration, }) violations = ret["violations_per_residue"] exclude_residues = exclude_residues.union(ret["residue_violations"]) logging.info("Iteration completed: Einit %.2f Efinal %.2f Time %.2f s " "num residue violations %d num residue exclusions %d ", ret["einit"], ret["efinal"], ret["opt_time"], ret["num_residue_violations"], ret["num_exclusions"]) iteration += 1 return ret
alphafold-main
alphafold/relax/amber_minimize.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cleans up a PDB file using pdbfixer in preparation for OpenMM simulations. fix_pdb uses a third-party tool. We also support fixing some additional edge cases like removing chains of length one (see clean_structure). """ import io import pdbfixer from openmm import app from openmm.app import element def fix_pdb(pdbfile, alterations_info): """Apply pdbfixer to the contents of a PDB file; return a PDB string result. 1) Replaces nonstandard residues. 2) Removes heterogens (non protein residues) including water. 3) Adds missing residues and missing atoms within existing residues. 4) Adds hydrogens assuming pH=7.0. 5) KeepIds is currently true, so the fixer must keep the existing chain and residue identifiers. This will fail for some files in wider PDB that have invalid IDs. Args: pdbfile: Input PDB file handle. alterations_info: A dict that will store details of changes made. Returns: A PDB string representing the fixed structure. """ fixer = pdbfixer.PDBFixer(pdbfile=pdbfile) fixer.findNonstandardResidues() alterations_info['nonstandard_residues'] = fixer.nonstandardResidues fixer.replaceNonstandardResidues() _remove_heterogens(fixer, alterations_info, keep_water=False) fixer.findMissingResidues() alterations_info['missing_residues'] = fixer.missingResidues fixer.findMissingAtoms() alterations_info['missing_heavy_atoms'] = fixer.missingAtoms alterations_info['missing_terminals'] = fixer.missingTerminals fixer.addMissingAtoms(seed=0) fixer.addMissingHydrogens() out_handle = io.StringIO() app.PDBFile.writeFile(fixer.topology, fixer.positions, out_handle, keepIds=True) return out_handle.getvalue() def clean_structure(pdb_structure, alterations_info): """Applies additional fixes to an OpenMM structure, to handle edge cases. Args: pdb_structure: An OpenMM structure to modify and fix. alterations_info: A dict that will store details of changes made. """ _replace_met_se(pdb_structure, alterations_info) _remove_chains_of_length_one(pdb_structure, alterations_info) def _remove_heterogens(fixer, alterations_info, keep_water): """Removes the residues that Pdbfixer considers to be heterogens. Args: fixer: A Pdbfixer instance. alterations_info: A dict that will store details of changes made. keep_water: If True, water (HOH) is not considered to be a heterogen. """ initial_resnames = set() for chain in fixer.topology.chains(): for residue in chain.residues(): initial_resnames.add(residue.name) fixer.removeHeterogens(keepWater=keep_water) final_resnames = set() for chain in fixer.topology.chains(): for residue in chain.residues(): final_resnames.add(residue.name) alterations_info['removed_heterogens'] = ( initial_resnames.difference(final_resnames)) def _replace_met_se(pdb_structure, alterations_info): """Replace the Se in any MET residues that were not marked as modified.""" modified_met_residues = [] for res in pdb_structure.iter_residues(): name = res.get_name_with_spaces().strip() if name == 'MET': s_atom = res.get_atom('SD') if s_atom.element_symbol == 'Se': s_atom.element_symbol = 'S' s_atom.element = element.get_by_symbol('S') modified_met_residues.append(s_atom.residue_number) alterations_info['Se_in_MET'] = modified_met_residues def _remove_chains_of_length_one(pdb_structure, alterations_info): """Removes chains that correspond to a single amino acid. A single amino acid in a chain is both N and C terminus. There is no force template for this case. Args: pdb_structure: An OpenMM pdb_structure to modify and fix. alterations_info: A dict that will store details of changes made. """ removed_chains = {} for model in pdb_structure.iter_models(): valid_chains = [c for c in model.iter_chains() if len(c) > 1] invalid_chain_ids = [c.chain_id for c in model.iter_chains() if len(c) <= 1] model.chains = valid_chains for chain_id in invalid_chain_ids: model.chains_by_id.pop(chain_id) removed_chains[model.number] = invalid_chain_ids alterations_info['removed_chains'] = removed_chains
alphafold-main
alphafold/relax/cleanup.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for minimization.""" import io from alphafold.common import residue_constants from Bio import PDB import numpy as np def overwrite_b_factors(pdb_str: str, bfactors: np.ndarray) -> str: """Overwrites the B-factors in pdb_str with contents of bfactors array. Args: pdb_str: An input PDB string. bfactors: A numpy array with shape [1, n_residues, 37]. We assume that the B-factors are per residue; i.e. that the nonzero entries are identical in [0, i, :]. Returns: A new PDB string with the B-factors replaced. """ if bfactors.shape[-1] != residue_constants.atom_type_num: raise ValueError( f'Invalid final dimension size for bfactors: {bfactors.shape[-1]}.') parser = PDB.PDBParser(QUIET=True) handle = io.StringIO(pdb_str) structure = parser.get_structure('', handle) curr_resid = ('', '', '') idx = -1 for atom in structure.get_atoms(): atom_resid = atom.parent.get_id() if atom_resid != curr_resid: idx += 1 if idx >= bfactors.shape[0]: raise ValueError('Index into bfactors exceeds number of residues. ' 'B-factors shape: {shape}, idx: {idx}.') curr_resid = atom_resid atom.bfactor = bfactors[idx, residue_constants.atom_order['CA']] new_pdb = io.StringIO() pdb_io = PDB.PDBIO() pdb_io.set_structure(structure) pdb_io.save(new_pdb) return new_pdb.getvalue() def assert_equal_nonterminal_atom_types( atom_mask: np.ndarray, ref_atom_mask: np.ndarray): """Checks that pre- and post-minimized proteins have same atom set.""" # Ignore any terminal OXT atoms which may have been added by minimization. oxt = residue_constants.atom_order['OXT'] no_oxt_mask = np.ones(shape=atom_mask.shape, dtype=bool) no_oxt_mask[..., oxt] = False np.testing.assert_almost_equal(ref_atom_mask[no_oxt_mask], atom_mask[no_oxt_mask])
alphafold-main
alphafold/relax/utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Amber relaxation.""" from typing import Any, Dict, Sequence, Tuple from alphafold.common import protein from alphafold.relax import amber_minimize from alphafold.relax import utils import numpy as np class AmberRelaxation(object): """Amber relaxation.""" def __init__(self, *, max_iterations: int, tolerance: float, stiffness: float, exclude_residues: Sequence[int], max_outer_iterations: int, use_gpu: bool): """Initialize Amber Relaxer. Args: max_iterations: Maximum number of L-BFGS iterations. 0 means no max. tolerance: kcal/mol, the energy tolerance of L-BFGS. stiffness: kcal/mol A**2, spring constant of heavy atom restraining potential. exclude_residues: Residues to exclude from per-atom restraining. Zero-indexed. max_outer_iterations: Maximum number of violation-informed relax iterations. A value of 1 will run the non-iterative procedure used in CASP14. Use 20 so that >95% of the bad cases are relaxed. Relax finishes as soon as there are no violations, hence in most cases this causes no slowdown. In the worst case we do 20 outer iterations. use_gpu: Whether to run on GPU. """ self._max_iterations = max_iterations self._tolerance = tolerance self._stiffness = stiffness self._exclude_residues = exclude_residues self._max_outer_iterations = max_outer_iterations self._use_gpu = use_gpu def process(self, *, prot: protein.Protein ) -> Tuple[str, Dict[str, Any], Sequence[float]]: """Runs Amber relax on a prediction, adds hydrogens, returns PDB string.""" out = amber_minimize.run_pipeline( prot=prot, max_iterations=self._max_iterations, tolerance=self._tolerance, stiffness=self._stiffness, exclude_residues=self._exclude_residues, max_outer_iterations=self._max_outer_iterations, use_gpu=self._use_gpu) min_pos = out['pos'] start_pos = out['posinit'] rmsd = np.sqrt(np.sum((start_pos - min_pos)**2) / start_pos.shape[0]) debug_data = { 'initial_energy': out['einit'], 'final_energy': out['efinal'], 'attempts': out['min_attempts'], 'rmsd': rmsd } min_pdb = out['min_pdb'] min_pdb = utils.overwrite_b_factors(min_pdb, prot.b_factors) utils.assert_equal_nonterminal_atom_types( protein.from_pdb_string(min_pdb).atom_mask, prot.atom_mask) violations = out['structural_violations'][ 'total_per_residue_violations_mask'].tolist() return min_pdb, debug_data, violations
alphafold-main
alphafold/relax/relax.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for amber_minimize.""" import os from absl.testing import absltest from alphafold.common import protein from alphafold.relax import amber_minimize import numpy as np # Internal import (7716). _USE_GPU = False def _load_test_protein(data_path): pdb_path = os.path.join(absltest.get_default_test_srcdir(), data_path) with open(pdb_path, 'r') as f: return protein.from_pdb_string(f.read()) class AmberMinimizeTest(absltest.TestCase): def test_multiple_disulfides_target(self): prot = _load_test_protein( 'alphafold/relax/testdata/multiple_disulfides_target.pdb' ) ret = amber_minimize.run_pipeline(prot, max_iterations=10, max_attempts=1, stiffness=10., use_gpu=_USE_GPU) self.assertIn('opt_time', ret) self.assertIn('min_attempts', ret) def test_raises_invalid_protein_assertion(self): prot = _load_test_protein( 'alphafold/relax/testdata/multiple_disulfides_target.pdb' ) prot.atom_mask[4, :] = 0 with self.assertRaisesRegex( ValueError, 'Amber minimization can only be performed on proteins with well-defined' ' residues. This protein contains at least one residue with no atoms.'): amber_minimize.run_pipeline(prot, max_iterations=10, stiffness=1., max_attempts=1, use_gpu=_USE_GPU) def test_iterative_relax(self): prot = _load_test_protein( 'alphafold/relax/testdata/with_violations.pdb' ) violations = amber_minimize.get_violation_metrics(prot) self.assertGreater(violations['num_residue_violations'], 0) out = amber_minimize.run_pipeline( prot=prot, max_outer_iterations=10, stiffness=10., use_gpu=_USE_GPU) self.assertLess(out['efinal'], out['einit']) self.assertEqual(0, out['num_residue_violations']) def test_find_violations(self): prot = _load_test_protein( 'alphafold/relax/testdata/multiple_disulfides_target.pdb' ) viols, _ = amber_minimize.find_violations(prot) expected_between_residues_connection_mask = np.zeros((191,), np.float32) for residue in (42, 43, 59, 60, 135, 136): expected_between_residues_connection_mask[residue] = 1.0 expected_clash_indices = np.array([ [8, 4], [8, 5], [13, 3], [14, 1], [14, 4], [26, 4], [26, 5], [31, 8], [31, 10], [39, 0], [39, 1], [39, 2], [39, 3], [39, 4], [42, 5], [42, 6], [42, 7], [42, 8], [47, 7], [47, 8], [47, 9], [47, 10], [64, 4], [85, 5], [102, 4], [102, 5], [109, 13], [111, 5], [118, 6], [118, 7], [118, 8], [124, 4], [124, 5], [131, 5], [139, 7], [147, 4], [152, 7]], dtype=np.int32) expected_between_residues_clash_mask = np.zeros([191, 14]) expected_between_residues_clash_mask[expected_clash_indices[:, 0], expected_clash_indices[:, 1]] += 1 expected_per_atom_violations = np.zeros([191, 14]) np.testing.assert_array_equal( viols['between_residues']['connections_per_residue_violation_mask'], expected_between_residues_connection_mask) np.testing.assert_array_equal( viols['between_residues']['clashes_per_atom_clash_mask'], expected_between_residues_clash_mask) np.testing.assert_array_equal( viols['within_residues']['per_atom_violations'], expected_per_atom_violations) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/relax/amber_minimize_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for relax.cleanup.""" import io from absl.testing import absltest from alphafold.relax import cleanup from openmm.app.internal import pdbstructure def _pdb_to_structure(pdb_str): handle = io.StringIO(pdb_str) return pdbstructure.PdbStructure(handle) def _lines_to_structure(pdb_lines): return _pdb_to_structure('\n'.join(pdb_lines)) class CleanupTest(absltest.TestCase): def test_missing_residues(self): pdb_lines = ['SEQRES 1 C 3 CYS GLY LEU', 'ATOM 1 N CYS C 1 -12.262 20.115 60.959 1.00 ' '19.08 N', 'ATOM 2 CA CYS C 1 -11.065 20.934 60.773 1.00 ' '17.23 C', 'ATOM 3 C CYS C 1 -10.002 20.742 61.844 1.00 ' '15.38 C', 'ATOM 4 O CYS C 1 -10.284 20.225 62.929 1.00 ' '16.04 O', 'ATOM 5 N LEU C 3 -7.688 18.700 62.045 1.00 ' '14.75 N', 'ATOM 6 CA LEU C 3 -7.256 17.320 62.234 1.00 ' '16.81 C', 'ATOM 7 C LEU C 3 -6.380 16.864 61.070 1.00 ' '16.95 C', 'ATOM 8 O LEU C 3 -6.551 17.332 59.947 1.00 ' '16.97 O'] input_handle = io.StringIO('\n'.join(pdb_lines)) alterations = {} result = cleanup.fix_pdb(input_handle, alterations) structure = _pdb_to_structure(result) residue_names = [r.get_name() for r in structure.iter_residues()] self.assertCountEqual(residue_names, ['CYS', 'GLY', 'LEU']) self.assertCountEqual(alterations['missing_residues'].values(), [['GLY']]) def test_missing_atoms(self): pdb_lines = ['SEQRES 1 A 1 PRO', 'ATOM 1 CA PRO A 1 1.000 1.000 1.000 1.00 ' ' 0.00 C'] input_handle = io.StringIO('\n'.join(pdb_lines)) alterations = {} result = cleanup.fix_pdb(input_handle, alterations) structure = _pdb_to_structure(result) atom_names = [a.get_name() for a in structure.iter_atoms()] self.assertCountEqual(atom_names, ['N', 'CD', 'HD2', 'HD3', 'CG', 'HG2', 'HG3', 'CB', 'HB2', 'HB3', 'CA', 'HA', 'C', 'O', 'H2', 'H3', 'OXT']) missing_atoms_by_residue = list(alterations['missing_heavy_atoms'].values()) self.assertLen(missing_atoms_by_residue, 1) atoms_added = [a.name for a in missing_atoms_by_residue[0]] self.assertCountEqual(atoms_added, ['N', 'CD', 'CG', 'CB', 'C', 'O']) missing_terminals_by_residue = alterations['missing_terminals'] self.assertLen(missing_terminals_by_residue, 1) has_missing_terminal = [r.name for r in missing_terminals_by_residue.keys()] self.assertCountEqual(has_missing_terminal, ['PRO']) self.assertCountEqual([t for t in missing_terminals_by_residue.values()], [['OXT']]) def test_remove_heterogens(self): pdb_lines = ['SEQRES 1 A 1 GLY', 'ATOM 1 CA GLY A 1 0.000 0.000 0.000 1.00 ' ' 0.00 C', 'ATOM 2 O HOH A 2 0.000 0.000 0.000 1.00 ' ' 0.00 O'] input_handle = io.StringIO('\n'.join(pdb_lines)) alterations = {} result = cleanup.fix_pdb(input_handle, alterations) structure = _pdb_to_structure(result) self.assertCountEqual([res.get_name() for res in structure.iter_residues()], ['GLY']) self.assertEqual(alterations['removed_heterogens'], set(['HOH'])) def test_fix_nonstandard_residues(self): pdb_lines = ['SEQRES 1 A 1 DAL', 'ATOM 1 CA DAL A 1 0.000 0.000 0.000 1.00 ' ' 0.00 C'] input_handle = io.StringIO('\n'.join(pdb_lines)) alterations = {} result = cleanup.fix_pdb(input_handle, alterations) structure = _pdb_to_structure(result) residue_names = [res.get_name() for res in structure.iter_residues()] self.assertCountEqual(residue_names, ['ALA']) self.assertLen(alterations['nonstandard_residues'], 1) original_res, new_name = alterations['nonstandard_residues'][0] self.assertEqual(original_res.id, '1') self.assertEqual(new_name, 'ALA') def test_replace_met_se(self): pdb_lines = ['SEQRES 1 A 1 MET', 'ATOM 1 SD MET A 1 0.000 0.000 0.000 1.00 ' ' 0.00 Se'] structure = _lines_to_structure(pdb_lines) alterations = {} cleanup._replace_met_se(structure, alterations) sd = [a for a in structure.iter_atoms() if a.get_name() == 'SD'] self.assertLen(sd, 1) self.assertEqual(sd[0].element_symbol, 'S') self.assertCountEqual(alterations['Se_in_MET'], [sd[0].residue_number]) def test_remove_chains_of_length_one(self): pdb_lines = ['SEQRES 1 A 1 GLY', 'ATOM 1 CA GLY A 1 0.000 0.000 0.000 1.00 ' ' 0.00 C'] structure = _lines_to_structure(pdb_lines) alterations = {} cleanup._remove_chains_of_length_one(structure, alterations) chains = list(structure.iter_chains()) self.assertEmpty(chains) self.assertCountEqual(alterations['removed_chains'].values(), [['A']]) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/relax/cleanup_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parses the mmCIF file format.""" import collections import dataclasses import functools import io from typing import Any, Mapping, Optional, Sequence, Tuple from absl import logging from Bio import PDB from Bio.Data import SCOPData # Type aliases: ChainId = str PdbHeader = Mapping[str, Any] PdbStructure = PDB.Structure.Structure SeqRes = str MmCIFDict = Mapping[str, Sequence[str]] @dataclasses.dataclass(frozen=True) class Monomer: id: str num: int # Note - mmCIF format provides no guarantees on the type of author-assigned # sequence numbers. They need not be integers. @dataclasses.dataclass(frozen=True) class AtomSite: residue_name: str author_chain_id: str mmcif_chain_id: str author_seq_num: str mmcif_seq_num: int insertion_code: str hetatm_atom: str model_num: int # Used to map SEQRES index to a residue in the structure. @dataclasses.dataclass(frozen=True) class ResiduePosition: chain_id: str residue_number: int insertion_code: str @dataclasses.dataclass(frozen=True) class ResidueAtPosition: position: Optional[ResiduePosition] name: str is_missing: bool hetflag: str @dataclasses.dataclass(frozen=True) class MmcifObject: """Representation of a parsed mmCIF file. Contains: file_id: A meaningful name, e.g. a pdb_id. Should be unique amongst all files being processed. header: Biopython header. structure: Biopython structure. chain_to_seqres: Dict mapping chain_id to 1 letter amino acid sequence. E.g. {'A': 'ABCDEFG'} seqres_to_structure: Dict; for each chain_id contains a mapping between SEQRES index and a ResidueAtPosition. e.g. {'A': {0: ResidueAtPosition, 1: ResidueAtPosition, ...}} raw_string: The raw string used to construct the MmcifObject. """ file_id: str header: PdbHeader structure: PdbStructure chain_to_seqres: Mapping[ChainId, SeqRes] seqres_to_structure: Mapping[ChainId, Mapping[int, ResidueAtPosition]] raw_string: Any @dataclasses.dataclass(frozen=True) class ParsingResult: """Returned by the parse function. Contains: mmcif_object: A MmcifObject, may be None if no chain could be successfully parsed. errors: A dict mapping (file_id, chain_id) to any exception generated. """ mmcif_object: Optional[MmcifObject] errors: Mapping[Tuple[str, str], Any] class ParseError(Exception): """An error indicating that an mmCIF file could not be parsed.""" def mmcif_loop_to_list(prefix: str, parsed_info: MmCIFDict) -> Sequence[Mapping[str, str]]: """Extracts loop associated with a prefix from mmCIF data as a list. Reference for loop_ in mmCIF: http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html Args: prefix: Prefix shared by each of the data items in the loop. e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num, _entity_poly_seq.mon_id. Should include the trailing period. parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython parser. Returns: Returns a list of dicts; each dict represents 1 entry from an mmCIF loop. """ cols = [] data = [] for key, value in parsed_info.items(): if key.startswith(prefix): cols.append(key) data.append(value) assert all([len(xs) == len(data[0]) for xs in data]), ( 'mmCIF error: Not all loops are the same length: %s' % cols) return [dict(zip(cols, xs)) for xs in zip(*data)] def mmcif_loop_to_dict(prefix: str, index: str, parsed_info: MmCIFDict, ) -> Mapping[str, Mapping[str, str]]: """Extracts loop associated with a prefix from mmCIF data as a dictionary. Args: prefix: Prefix shared by each of the data items in the loop. e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num, _entity_poly_seq.mon_id. Should include the trailing period. index: Which item of loop data should serve as the key. parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython parser. Returns: Returns a dict of dicts; each dict represents 1 entry from an mmCIF loop, indexed by the index column. """ entries = mmcif_loop_to_list(prefix, parsed_info) return {entry[index]: entry for entry in entries} @functools.lru_cache(16, typed=False) def parse(*, file_id: str, mmcif_string: str, catch_all_errors: bool = True) -> ParsingResult: """Entry point, parses an mmcif_string. Args: file_id: A string identifier for this file. Should be unique within the collection of files being processed. mmcif_string: Contents of an mmCIF file. catch_all_errors: If True, all exceptions are caught and error messages are returned as part of the ParsingResult. If False exceptions will be allowed to propagate. Returns: A ParsingResult. """ errors = {} try: parser = PDB.MMCIFParser(QUIET=True) handle = io.StringIO(mmcif_string) full_structure = parser.get_structure('', handle) first_model_structure = _get_first_model(full_structure) # Extract the _mmcif_dict from the parser, which contains useful fields not # reflected in the Biopython structure. parsed_info = parser._mmcif_dict # pylint:disable=protected-access # Ensure all values are lists, even if singletons. for key, value in parsed_info.items(): if not isinstance(value, list): parsed_info[key] = [value] header = _get_header(parsed_info) # Determine the protein chains, and their start numbers according to the # internal mmCIF numbering scheme (likely but not guaranteed to be 1). valid_chains = _get_protein_chains(parsed_info=parsed_info) if not valid_chains: return ParsingResult( None, {(file_id, ''): 'No protein chains found in this file.'}) seq_start_num = {chain_id: min([monomer.num for monomer in seq]) for chain_id, seq in valid_chains.items()} # Loop over the atoms for which we have coordinates. Populate two mappings: # -mmcif_to_author_chain_id (maps internal mmCIF chain ids to chain ids used # the authors / Biopython). # -seq_to_structure_mappings (maps idx into sequence to ResidueAtPosition). mmcif_to_author_chain_id = {} seq_to_structure_mappings = {} for atom in _get_atom_site_list(parsed_info): if atom.model_num != '1': # We only process the first model at the moment. continue mmcif_to_author_chain_id[atom.mmcif_chain_id] = atom.author_chain_id if atom.mmcif_chain_id in valid_chains: hetflag = ' ' if atom.hetatm_atom == 'HETATM': # Water atoms are assigned a special hetflag of W in Biopython. We # need to do the same, so that this hetflag can be used to fetch # a residue from the Biopython structure by id. if atom.residue_name in ('HOH', 'WAT'): hetflag = 'W' else: hetflag = 'H_' + atom.residue_name insertion_code = atom.insertion_code if not _is_set(atom.insertion_code): insertion_code = ' ' position = ResiduePosition(chain_id=atom.author_chain_id, residue_number=int(atom.author_seq_num), insertion_code=insertion_code) seq_idx = int(atom.mmcif_seq_num) - seq_start_num[atom.mmcif_chain_id] current = seq_to_structure_mappings.get(atom.author_chain_id, {}) current[seq_idx] = ResidueAtPosition(position=position, name=atom.residue_name, is_missing=False, hetflag=hetflag) seq_to_structure_mappings[atom.author_chain_id] = current # Add missing residue information to seq_to_structure_mappings. for chain_id, seq_info in valid_chains.items(): author_chain = mmcif_to_author_chain_id[chain_id] current_mapping = seq_to_structure_mappings[author_chain] for idx, monomer in enumerate(seq_info): if idx not in current_mapping: current_mapping[idx] = ResidueAtPosition(position=None, name=monomer.id, is_missing=True, hetflag=' ') author_chain_to_sequence = {} for chain_id, seq_info in valid_chains.items(): author_chain = mmcif_to_author_chain_id[chain_id] seq = [] for monomer in seq_info: code = SCOPData.protein_letters_3to1.get(monomer.id, 'X') seq.append(code if len(code) == 1 else 'X') seq = ''.join(seq) author_chain_to_sequence[author_chain] = seq mmcif_object = MmcifObject( file_id=file_id, header=header, structure=first_model_structure, chain_to_seqres=author_chain_to_sequence, seqres_to_structure=seq_to_structure_mappings, raw_string=parsed_info) return ParsingResult(mmcif_object=mmcif_object, errors=errors) except Exception as e: # pylint:disable=broad-except errors[(file_id, '')] = e if not catch_all_errors: raise return ParsingResult(mmcif_object=None, errors=errors) def _get_first_model(structure: PdbStructure) -> PdbStructure: """Returns the first model in a Biopython structure.""" return next(structure.get_models()) _MIN_LENGTH_OF_CHAIN_TO_BE_COUNTED_AS_PEPTIDE = 21 def get_release_date(parsed_info: MmCIFDict) -> str: """Returns the oldest revision date.""" revision_dates = parsed_info['_pdbx_audit_revision_history.revision_date'] return min(revision_dates) def _get_header(parsed_info: MmCIFDict) -> PdbHeader: """Returns a basic header containing method, release date and resolution.""" header = {} experiments = mmcif_loop_to_list('_exptl.', parsed_info) header['structure_method'] = ','.join([ experiment['_exptl.method'].lower() for experiment in experiments]) # Note: The release_date here corresponds to the oldest revision. We prefer to # use this for dataset filtering over the deposition_date. if '_pdbx_audit_revision_history.revision_date' in parsed_info: header['release_date'] = get_release_date(parsed_info) else: logging.warning('Could not determine release_date: %s', parsed_info['_entry.id']) header['resolution'] = 0.00 for res_key in ('_refine.ls_d_res_high', '_em_3d_reconstruction.resolution', '_reflns.d_resolution_high'): if res_key in parsed_info: try: raw_resolution = parsed_info[res_key][0] header['resolution'] = float(raw_resolution) except ValueError: logging.debug('Invalid resolution format: %s', parsed_info[res_key]) return header def _get_atom_site_list(parsed_info: MmCIFDict) -> Sequence[AtomSite]: """Returns list of atom sites; contains data not present in the structure.""" return [AtomSite(*site) for site in zip( # pylint:disable=g-complex-comprehension parsed_info['_atom_site.label_comp_id'], parsed_info['_atom_site.auth_asym_id'], parsed_info['_atom_site.label_asym_id'], parsed_info['_atom_site.auth_seq_id'], parsed_info['_atom_site.label_seq_id'], parsed_info['_atom_site.pdbx_PDB_ins_code'], parsed_info['_atom_site.group_PDB'], parsed_info['_atom_site.pdbx_PDB_model_num'], )] def _get_protein_chains( *, parsed_info: Mapping[str, Any]) -> Mapping[ChainId, Sequence[Monomer]]: """Extracts polymer information for protein chains only. Args: parsed_info: _mmcif_dict produced by the Biopython parser. Returns: A dict mapping mmcif chain id to a list of Monomers. """ # Get polymer information for each entity in the structure. entity_poly_seqs = mmcif_loop_to_list('_entity_poly_seq.', parsed_info) polymers = collections.defaultdict(list) for entity_poly_seq in entity_poly_seqs: polymers[entity_poly_seq['_entity_poly_seq.entity_id']].append( Monomer(id=entity_poly_seq['_entity_poly_seq.mon_id'], num=int(entity_poly_seq['_entity_poly_seq.num']))) # Get chemical compositions. Will allow us to identify which of these polymers # are proteins. chem_comps = mmcif_loop_to_dict('_chem_comp.', '_chem_comp.id', parsed_info) # Get chains information for each entity. Necessary so that we can return a # dict keyed on chain id rather than entity. struct_asyms = mmcif_loop_to_list('_struct_asym.', parsed_info) entity_to_mmcif_chains = collections.defaultdict(list) for struct_asym in struct_asyms: chain_id = struct_asym['_struct_asym.id'] entity_id = struct_asym['_struct_asym.entity_id'] entity_to_mmcif_chains[entity_id].append(chain_id) # Identify and return the valid protein chains. valid_chains = {} for entity_id, seq_info in polymers.items(): chain_ids = entity_to_mmcif_chains[entity_id] # Reject polymers without any peptide-like components, such as DNA/RNA. if any(['peptide' in chem_comps[monomer.id]['_chem_comp.type'].lower() for monomer in seq_info]): for chain_id in chain_ids: valid_chains[chain_id] = seq_info return valid_chains def _is_set(data: str) -> bool: """Returns False if data is a special mmCIF character indicating 'unset'.""" return data not in ('.', '?')
alphafold-main
alphafold/data/mmcif_parsing.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature processing logic for multimer data pipeline.""" from typing import Iterable, MutableMapping, List from alphafold.common import residue_constants from alphafold.data import msa_pairing from alphafold.data import pipeline import numpy as np REQUIRED_FEATURES = frozenset({ 'aatype', 'all_atom_mask', 'all_atom_positions', 'all_chains_entity_ids', 'all_crops_all_chains_mask', 'all_crops_all_chains_positions', 'all_crops_all_chains_residue_ids', 'assembly_num_chains', 'asym_id', 'bert_mask', 'cluster_bias_mask', 'deletion_matrix', 'deletion_mean', 'entity_id', 'entity_mask', 'mem_peak', 'msa', 'msa_mask', 'num_alignments', 'num_templates', 'queue_size', 'residue_index', 'resolution', 'seq_length', 'seq_mask', 'sym_id', 'template_aatype', 'template_all_atom_mask', 'template_all_atom_positions' }) MAX_TEMPLATES = 4 MSA_CROP_SIZE = 2048 def _is_homomer_or_monomer(chains: Iterable[pipeline.FeatureDict]) -> bool: """Checks if a list of chains represents a homomer/monomer example.""" # Note that an entity_id of 0 indicates padding. num_unique_chains = len(np.unique(np.concatenate( [np.unique(chain['entity_id'][chain['entity_id'] > 0]) for chain in chains]))) return num_unique_chains == 1 def pair_and_merge( all_chain_features: MutableMapping[str, pipeline.FeatureDict] ) -> pipeline.FeatureDict: """Runs processing on features to augment, pair and merge. Args: all_chain_features: A MutableMap of dictionaries of features for each chain. Returns: A dictionary of features. """ process_unmerged_features(all_chain_features) np_chains_list = list(all_chain_features.values()) pair_msa_sequences = not _is_homomer_or_monomer(np_chains_list) if pair_msa_sequences: np_chains_list = msa_pairing.create_paired_features( chains=np_chains_list) np_chains_list = msa_pairing.deduplicate_unpaired_sequences(np_chains_list) np_chains_list = crop_chains( np_chains_list, msa_crop_size=MSA_CROP_SIZE, pair_msa_sequences=pair_msa_sequences, max_templates=MAX_TEMPLATES) np_example = msa_pairing.merge_chain_features( np_chains_list=np_chains_list, pair_msa_sequences=pair_msa_sequences, max_templates=MAX_TEMPLATES) np_example = process_final(np_example) return np_example def crop_chains( chains_list: List[pipeline.FeatureDict], msa_crop_size: int, pair_msa_sequences: bool, max_templates: int) -> List[pipeline.FeatureDict]: """Crops the MSAs for a set of chains. Args: chains_list: A list of chains to be cropped. msa_crop_size: The total number of sequences to crop from the MSA. pair_msa_sequences: Whether we are operating in sequence-pairing mode. max_templates: The maximum templates to use per chain. Returns: The chains cropped. """ # Apply the cropping. cropped_chains = [] for chain in chains_list: cropped_chain = _crop_single_chain( chain, msa_crop_size=msa_crop_size, pair_msa_sequences=pair_msa_sequences, max_templates=max_templates) cropped_chains.append(cropped_chain) return cropped_chains def _crop_single_chain(chain: pipeline.FeatureDict, msa_crop_size: int, pair_msa_sequences: bool, max_templates: int) -> pipeline.FeatureDict: """Crops msa sequences to `msa_crop_size`.""" msa_size = chain['num_alignments'] if pair_msa_sequences: msa_size_all_seq = chain['num_alignments_all_seq'] msa_crop_size_all_seq = np.minimum(msa_size_all_seq, msa_crop_size // 2) # We reduce the number of un-paired sequences, by the number of times a # sequence from this chain's MSA is included in the paired MSA. This keeps # the MSA size for each chain roughly constant. msa_all_seq = chain['msa_all_seq'][:msa_crop_size_all_seq, :] num_non_gapped_pairs = np.sum( np.any(msa_all_seq != msa_pairing.MSA_GAP_IDX, axis=1)) num_non_gapped_pairs = np.minimum(num_non_gapped_pairs, msa_crop_size_all_seq) # Restrict the unpaired crop size so that paired+unpaired sequences do not # exceed msa_seqs_per_chain for each chain. max_msa_crop_size = np.maximum(msa_crop_size - num_non_gapped_pairs, 0) msa_crop_size = np.minimum(msa_size, max_msa_crop_size) else: msa_crop_size = np.minimum(msa_size, msa_crop_size) include_templates = 'template_aatype' in chain and max_templates if include_templates: num_templates = chain['template_aatype'].shape[0] templates_crop_size = np.minimum(num_templates, max_templates) for k in chain: k_split = k.split('_all_seq')[0] if k_split in msa_pairing.TEMPLATE_FEATURES: chain[k] = chain[k][:templates_crop_size, :] elif k_split in msa_pairing.MSA_FEATURES: if '_all_seq' in k and pair_msa_sequences: chain[k] = chain[k][:msa_crop_size_all_seq, :] else: chain[k] = chain[k][:msa_crop_size, :] chain['num_alignments'] = np.asarray(msa_crop_size, dtype=np.int32) if include_templates: chain['num_templates'] = np.asarray(templates_crop_size, dtype=np.int32) if pair_msa_sequences: chain['num_alignments_all_seq'] = np.asarray( msa_crop_size_all_seq, dtype=np.int32) return chain def process_final(np_example: pipeline.FeatureDict) -> pipeline.FeatureDict: """Final processing steps in data pipeline, after merging and pairing.""" np_example = _correct_msa_restypes(np_example) np_example = _make_seq_mask(np_example) np_example = _make_msa_mask(np_example) np_example = _filter_features(np_example) return np_example def _correct_msa_restypes(np_example): """Correct MSA restype to have the same order as residue_constants.""" new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE np_example['msa'] = np.take(new_order_list, np_example['msa'], axis=0) np_example['msa'] = np_example['msa'].astype(np.int32) return np_example def _make_seq_mask(np_example): np_example['seq_mask'] = (np_example['entity_id'] > 0).astype(np.float32) return np_example def _make_msa_mask(np_example): """Mask features are all ones, but will later be zero-padded.""" np_example['msa_mask'] = np.ones_like(np_example['msa'], dtype=np.float32) seq_mask = (np_example['entity_id'] > 0).astype(np.float32) np_example['msa_mask'] *= seq_mask[None] return np_example def _filter_features(np_example: pipeline.FeatureDict) -> pipeline.FeatureDict: """Filters features of example to only those requested.""" return {k: v for (k, v) in np_example.items() if k in REQUIRED_FEATURES} def process_unmerged_features( all_chain_features: MutableMapping[str, pipeline.FeatureDict]): """Postprocessing stage for per-chain features before merging.""" num_chains = len(all_chain_features) for chain_features in all_chain_features.values(): # Convert deletion matrices to float. chain_features['deletion_matrix'] = np.asarray( chain_features.pop('deletion_matrix_int'), dtype=np.float32) if 'deletion_matrix_int_all_seq' in chain_features: chain_features['deletion_matrix_all_seq'] = np.asarray( chain_features.pop('deletion_matrix_int_all_seq'), dtype=np.float32) chain_features['deletion_mean'] = np.mean( chain_features['deletion_matrix'], axis=0) # Add all_atom_mask and dummy all_atom_positions based on aatype. all_atom_mask = residue_constants.STANDARD_ATOM_MASK[ chain_features['aatype']] chain_features['all_atom_mask'] = all_atom_mask chain_features['all_atom_positions'] = np.zeros( list(all_atom_mask.shape) + [3]) # Add assembly_num_chains. chain_features['assembly_num_chains'] = np.asarray(num_chains) # Add entity_mask. for chain_features in all_chain_features.values(): chain_features['entity_mask'] = ( chain_features['entity_id'] != 0).astype(np.int32)
alphafold-main
alphafold/data/feature_processing.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for building the features for the AlphaFold multimer model.""" import collections import contextlib import copy import dataclasses import json import os import tempfile from typing import Mapping, MutableMapping, Sequence from absl import logging from alphafold.common import protein from alphafold.common import residue_constants from alphafold.data import feature_processing from alphafold.data import msa_pairing from alphafold.data import parsers from alphafold.data import pipeline from alphafold.data.tools import jackhmmer import numpy as np # Internal import (7716). @dataclasses.dataclass(frozen=True) class _FastaChain: sequence: str description: str def _make_chain_id_map(*, sequences: Sequence[str], descriptions: Sequence[str], ) -> Mapping[str, _FastaChain]: """Makes a mapping from PDB-format chain ID to sequence and description.""" if len(sequences) != len(descriptions): raise ValueError('sequences and descriptions must have equal length. ' f'Got {len(sequences)} != {len(descriptions)}.') if len(sequences) > protein.PDB_MAX_CHAINS: raise ValueError('Cannot process more chains than the PDB format supports. ' f'Got {len(sequences)} chains.') chain_id_map = {} for chain_id, sequence, description in zip( protein.PDB_CHAIN_IDS, sequences, descriptions): chain_id_map[chain_id] = _FastaChain( sequence=sequence, description=description) return chain_id_map @contextlib.contextmanager def temp_fasta_file(fasta_str: str): with tempfile.NamedTemporaryFile('w', suffix='.fasta') as fasta_file: fasta_file.write(fasta_str) fasta_file.seek(0) yield fasta_file.name def convert_monomer_features( monomer_features: pipeline.FeatureDict, chain_id: str) -> pipeline.FeatureDict: """Reshapes and modifies monomer features for multimer models.""" converted = {} converted['auth_chain_id'] = np.asarray(chain_id, dtype=np.object_) unnecessary_leading_dim_feats = { 'sequence', 'domain_name', 'num_alignments', 'seq_length'} for feature_name, feature in monomer_features.items(): if feature_name in unnecessary_leading_dim_feats: # asarray ensures it's a np.ndarray. feature = np.asarray(feature[0], dtype=feature.dtype) elif feature_name == 'aatype': # The multimer model performs the one-hot operation itself. feature = np.argmax(feature, axis=-1).astype(np.int32) elif feature_name == 'template_aatype': feature = np.argmax(feature, axis=-1).astype(np.int32) new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE feature = np.take(new_order_list, feature.astype(np.int32), axis=0) elif feature_name == 'template_all_atom_masks': feature_name = 'template_all_atom_mask' converted[feature_name] = feature return converted def int_id_to_str_id(num: int) -> str: """Encodes a number as a string, using reverse spreadsheet style naming. Args: num: A positive integer. Returns: A string that encodes the positive integer using reverse spreadsheet style, naming e.g. 1 = A, 2 = B, ..., 27 = AA, 28 = BA, 29 = CA, ... This is the usual way to encode chain IDs in mmCIF files. """ if num <= 0: raise ValueError(f'Only positive integers allowed, got {num}.') num = num - 1 # 1-based indexing. output = [] while num >= 0: output.append(chr(num % 26 + ord('A'))) num = num // 26 - 1 return ''.join(output) def add_assembly_features( all_chain_features: MutableMapping[str, pipeline.FeatureDict], ) -> MutableMapping[str, pipeline.FeatureDict]: """Add features to distinguish between chains. Args: all_chain_features: A dictionary which maps chain_id to a dictionary of features for each chain. Returns: all_chain_features: A dictionary which maps strings of the form `<seq_id>_<sym_id>` to the corresponding chain features. E.g. two chains from a homodimer would have keys A_1 and A_2. Two chains from a heterodimer would have keys A_1 and B_1. """ # Group the chains by sequence seq_to_entity_id = {} grouped_chains = collections.defaultdict(list) for chain_id, chain_features in all_chain_features.items(): seq = str(chain_features['sequence']) if seq not in seq_to_entity_id: seq_to_entity_id[seq] = len(seq_to_entity_id) + 1 grouped_chains[seq_to_entity_id[seq]].append(chain_features) new_all_chain_features = {} chain_id = 1 for entity_id, group_chain_features in grouped_chains.items(): for sym_id, chain_features in enumerate(group_chain_features, start=1): new_all_chain_features[ f'{int_id_to_str_id(entity_id)}_{sym_id}'] = chain_features seq_length = chain_features['seq_length'] chain_features['asym_id'] = chain_id * np.ones(seq_length) chain_features['sym_id'] = sym_id * np.ones(seq_length) chain_features['entity_id'] = entity_id * np.ones(seq_length) chain_id += 1 return new_all_chain_features def pad_msa(np_example, min_num_seq): np_example = dict(np_example) num_seq = np_example['msa'].shape[0] if num_seq < min_num_seq: for feat in ('msa', 'deletion_matrix', 'bert_mask', 'msa_mask'): np_example[feat] = np.pad( np_example[feat], ((0, min_num_seq - num_seq), (0, 0))) np_example['cluster_bias_mask'] = np.pad( np_example['cluster_bias_mask'], ((0, min_num_seq - num_seq),)) return np_example class DataPipeline: """Runs the alignment tools and assembles the input features.""" def __init__(self, monomer_data_pipeline: pipeline.DataPipeline, jackhmmer_binary_path: str, uniprot_database_path: str, max_uniprot_hits: int = 50000, use_precomputed_msas: bool = False): """Initializes the data pipeline. Args: monomer_data_pipeline: An instance of pipeline.DataPipeline - that runs the data pipeline for the monomer AlphaFold system. jackhmmer_binary_path: Location of the jackhmmer binary. uniprot_database_path: Location of the unclustered uniprot sequences, that will be searched with jackhmmer and used for MSA pairing. max_uniprot_hits: The maximum number of hits to return from uniprot. use_precomputed_msas: Whether to use pre-existing MSAs; see run_alphafold. """ self._monomer_data_pipeline = monomer_data_pipeline self._uniprot_msa_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniprot_database_path) self._max_uniprot_hits = max_uniprot_hits self.use_precomputed_msas = use_precomputed_msas def _process_single_chain( self, chain_id: str, sequence: str, description: str, msa_output_dir: str, is_homomer_or_monomer: bool) -> pipeline.FeatureDict: """Runs the monomer pipeline on a single chain.""" chain_fasta_str = f'>chain_{chain_id}\n{sequence}\n' chain_msa_output_dir = os.path.join(msa_output_dir, chain_id) if not os.path.exists(chain_msa_output_dir): os.makedirs(chain_msa_output_dir) with temp_fasta_file(chain_fasta_str) as chain_fasta_path: logging.info('Running monomer pipeline on chain %s: %s', chain_id, description) chain_features = self._monomer_data_pipeline.process( input_fasta_path=chain_fasta_path, msa_output_dir=chain_msa_output_dir) # We only construct the pairing features if there are 2 or more unique # sequences. if not is_homomer_or_monomer: all_seq_msa_features = self._all_seq_msa_features(chain_fasta_path, chain_msa_output_dir) chain_features.update(all_seq_msa_features) return chain_features def _all_seq_msa_features(self, input_fasta_path, msa_output_dir): """Get MSA features for unclustered uniprot, for pairing.""" out_path = os.path.join(msa_output_dir, 'uniprot_hits.sto') result = pipeline.run_msa_tool( self._uniprot_msa_runner, input_fasta_path, out_path, 'sto', self.use_precomputed_msas) msa = parsers.parse_stockholm(result['sto']) msa = msa.truncate(max_seqs=self._max_uniprot_hits) all_seq_features = pipeline.make_msa_features([msa]) valid_feats = msa_pairing.MSA_FEATURES + ( 'msa_species_identifiers', ) feats = {f'{k}_all_seq': v for k, v in all_seq_features.items() if k in valid_feats} return feats def process(self, input_fasta_path: str, msa_output_dir: str) -> pipeline.FeatureDict: """Runs alignment tools on the input sequences and creates features.""" with open(input_fasta_path) as f: input_fasta_str = f.read() input_seqs, input_descs = parsers.parse_fasta(input_fasta_str) chain_id_map = _make_chain_id_map(sequences=input_seqs, descriptions=input_descs) chain_id_map_path = os.path.join(msa_output_dir, 'chain_id_map.json') with open(chain_id_map_path, 'w') as f: chain_id_map_dict = {chain_id: dataclasses.asdict(fasta_chain) for chain_id, fasta_chain in chain_id_map.items()} json.dump(chain_id_map_dict, f, indent=4, sort_keys=True) all_chain_features = {} sequence_features = {} is_homomer_or_monomer = len(set(input_seqs)) == 1 for chain_id, fasta_chain in chain_id_map.items(): if fasta_chain.sequence in sequence_features: all_chain_features[chain_id] = copy.deepcopy( sequence_features[fasta_chain.sequence]) continue chain_features = self._process_single_chain( chain_id=chain_id, sequence=fasta_chain.sequence, description=fasta_chain.description, msa_output_dir=msa_output_dir, is_homomer_or_monomer=is_homomer_or_monomer) chain_features = convert_monomer_features(chain_features, chain_id=chain_id) all_chain_features[chain_id] = chain_features sequence_features[fasta_chain.sequence] = chain_features all_chain_features = add_assembly_features(all_chain_features) np_example = feature_processing.pair_and_merge( all_chain_features=all_chain_features) # Pad MSA to avoid zero-sized extra_msa. np_example = pad_msa(np_example, 512) return np_example
alphafold-main
alphafold/data/pipeline_multimer.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for extracting identifiers from MSA sequence descriptions.""" import dataclasses import re from typing import Optional # Sequences coming from UniProtKB database come in the # `db|UniqueIdentifier|EntryName` format, e.g. `tr|A0A146SKV9|A0A146SKV9_FUNHE` # or `sp|P0C2L1|A3X1_LOXLA` (for TREMBL/Swiss-Prot respectively). _UNIPROT_PATTERN = re.compile( r""" ^ # UniProtKB/TrEMBL or UniProtKB/Swiss-Prot (?:tr|sp) \| # A primary accession number of the UniProtKB entry. (?P<AccessionIdentifier>[A-Za-z0-9]{6,10}) # Occasionally there is a _0 or _1 isoform suffix, which we ignore. (?:_\d)? \| # TREMBL repeats the accession ID here. Swiss-Prot has a mnemonic # protein ID code. (?:[A-Za-z0-9]+) _ # A mnemonic species identification code. (?P<SpeciesIdentifier>([A-Za-z0-9]){1,5}) # Small BFD uses a final value after an underscore, which we ignore. (?:_\d+)? $ """, re.VERBOSE) @dataclasses.dataclass(frozen=True) class Identifiers: species_id: str = '' def _parse_sequence_identifier(msa_sequence_identifier: str) -> Identifiers: """Gets species from an msa sequence identifier. The sequence identifier has the format specified by _UNIPROT_TREMBL_ENTRY_NAME_PATTERN or _UNIPROT_SWISSPROT_ENTRY_NAME_PATTERN. An example of a sequence identifier: `tr|A0A146SKV9|A0A146SKV9_FUNHE` Args: msa_sequence_identifier: a sequence identifier. Returns: An `Identifiers` instance with species_id. These can be empty in the case where no identifier was found. """ matches = re.search(_UNIPROT_PATTERN, msa_sequence_identifier.strip()) if matches: return Identifiers( species_id=matches.group('SpeciesIdentifier')) return Identifiers() def _extract_sequence_identifier(description: str) -> Optional[str]: """Extracts sequence identifier from description. Returns None if no match.""" split_description = description.split() if split_description: return split_description[0].partition('/')[0] else: return None def get_identifiers(description: str) -> Identifiers: """Computes extra MSA features from the description.""" sequence_identifier = _extract_sequence_identifier(description) if sequence_identifier is None: return Identifiers() else: return _parse_sequence_identifier(sequence_identifier)
alphafold-main
alphafold/data/msa_identifiers.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data pipeline for model features."""
alphafold-main
alphafold/data/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for parsing various file formats.""" import collections import dataclasses import itertools import re import string from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Set # Internal import (7716). DeletionMatrix = Sequence[Sequence[int]] @dataclasses.dataclass(frozen=True) class Msa: """Class representing a parsed MSA file.""" sequences: Sequence[str] deletion_matrix: DeletionMatrix descriptions: Sequence[str] def __post_init__(self): if not (len(self.sequences) == len(self.deletion_matrix) == len(self.descriptions)): raise ValueError( 'All fields for an MSA must have the same length. ' f'Got {len(self.sequences)} sequences, ' f'{len(self.deletion_matrix)} rows in the deletion matrix and ' f'{len(self.descriptions)} descriptions.') def __len__(self): return len(self.sequences) def truncate(self, max_seqs: int): return Msa(sequences=self.sequences[:max_seqs], deletion_matrix=self.deletion_matrix[:max_seqs], descriptions=self.descriptions[:max_seqs]) @dataclasses.dataclass(frozen=True) class TemplateHit: """Class representing a template hit.""" index: int name: str aligned_cols: int sum_probs: Optional[float] query: str hit_sequence: str indices_query: List[int] indices_hit: List[int] def parse_fasta(fasta_string: str) -> Tuple[Sequence[str], Sequence[str]]: """Parses FASTA string and returns list of strings with amino-acid sequences. Arguments: fasta_string: The string contents of a FASTA file. Returns: A tuple of two lists: * A list of sequences. * A list of sequence descriptions taken from the comment lines. In the same order as the sequences. """ sequences = [] descriptions = [] index = -1 for line in fasta_string.splitlines(): line = line.strip() if line.startswith('>'): index += 1 descriptions.append(line[1:]) # Remove the '>' at the beginning. sequences.append('') continue elif not line: continue # Skip blank lines. sequences[index] += line return sequences, descriptions def parse_stockholm(stockholm_string: str) -> Msa: """Parses sequences and deletion matrix from stockholm format alignment. Args: stockholm_string: The string contents of a stockholm file. The first sequence in the file should be the query sequence. Returns: A tuple of: * A list of sequences that have been aligned to the query. These might contain duplicates. * The deletion matrix for the alignment as a list of lists. The element at `deletion_matrix[i][j]` is the number of residues deleted from the aligned sequence i at residue position j. * The names of the targets matched, including the jackhmmer subsequence suffix. """ name_to_sequence = collections.OrderedDict() for line in stockholm_string.splitlines(): line = line.strip() if not line or line.startswith(('#', '//')): continue name, sequence = line.split() if name not in name_to_sequence: name_to_sequence[name] = '' name_to_sequence[name] += sequence msa = [] deletion_matrix = [] query = '' keep_columns = [] for seq_index, sequence in enumerate(name_to_sequence.values()): if seq_index == 0: # Gather the columns with gaps from the query query = sequence keep_columns = [i for i, res in enumerate(query) if res != '-'] # Remove the columns with gaps in the query from all sequences. aligned_sequence = ''.join([sequence[c] for c in keep_columns]) msa.append(aligned_sequence) # Count the number of deletions w.r.t. query. deletion_vec = [] deletion_count = 0 for seq_res, query_res in zip(sequence, query): if seq_res != '-' or query_res != '-': if query_res == '-': deletion_count += 1 else: deletion_vec.append(deletion_count) deletion_count = 0 deletion_matrix.append(deletion_vec) return Msa(sequences=msa, deletion_matrix=deletion_matrix, descriptions=list(name_to_sequence.keys())) def parse_a3m(a3m_string: str) -> Msa: """Parses sequences and deletion matrix from a3m format alignment. Args: a3m_string: The string contents of a a3m file. The first sequence in the file should be the query sequence. Returns: A tuple of: * A list of sequences that have been aligned to the query. These might contain duplicates. * The deletion matrix for the alignment as a list of lists. The element at `deletion_matrix[i][j]` is the number of residues deleted from the aligned sequence i at residue position j. * A list of descriptions, one per sequence, from the a3m file. """ sequences, descriptions = parse_fasta(a3m_string) deletion_matrix = [] for msa_sequence in sequences: deletion_vec = [] deletion_count = 0 for j in msa_sequence: if j.islower(): deletion_count += 1 else: deletion_vec.append(deletion_count) deletion_count = 0 deletion_matrix.append(deletion_vec) # Make the MSA matrix out of aligned (deletion-free) sequences. deletion_table = str.maketrans('', '', string.ascii_lowercase) aligned_sequences = [s.translate(deletion_table) for s in sequences] return Msa(sequences=aligned_sequences, deletion_matrix=deletion_matrix, descriptions=descriptions) def _convert_sto_seq_to_a3m( query_non_gaps: Sequence[bool], sto_seq: str) -> Iterable[str]: for is_query_res_non_gap, sequence_res in zip(query_non_gaps, sto_seq): if is_query_res_non_gap: yield sequence_res elif sequence_res != '-': yield sequence_res.lower() def convert_stockholm_to_a3m(stockholm_format: str, max_sequences: Optional[int] = None, remove_first_row_gaps: bool = True) -> str: """Converts MSA in Stockholm format to the A3M format.""" descriptions = {} sequences = {} reached_max_sequences = False for line in stockholm_format.splitlines(): reached_max_sequences = max_sequences and len(sequences) >= max_sequences if line.strip() and not line.startswith(('#', '//')): # Ignore blank lines, markup and end symbols - remainder are alignment # sequence parts. seqname, aligned_seq = line.split(maxsplit=1) if seqname not in sequences: if reached_max_sequences: continue sequences[seqname] = '' sequences[seqname] += aligned_seq for line in stockholm_format.splitlines(): if line[:4] == '#=GS': # Description row - example format is: # #=GS UniRef90_Q9H5Z4/4-78 DE [subseq from] cDNA: FLJ22755 ... columns = line.split(maxsplit=3) seqname, feature = columns[1:3] value = columns[3] if len(columns) == 4 else '' if feature != 'DE': continue if reached_max_sequences and seqname not in sequences: continue descriptions[seqname] = value if len(descriptions) == len(sequences): break # Convert sto format to a3m line by line a3m_sequences = {} if remove_first_row_gaps: # query_sequence is assumed to be the first sequence query_sequence = next(iter(sequences.values())) query_non_gaps = [res != '-' for res in query_sequence] for seqname, sto_sequence in sequences.items(): # Dots are optional in a3m format and are commonly removed. out_sequence = sto_sequence.replace('.', '') if remove_first_row_gaps: out_sequence = ''.join( _convert_sto_seq_to_a3m(query_non_gaps, out_sequence)) a3m_sequences[seqname] = out_sequence fasta_chunks = (f">{k} {descriptions.get(k, '')}\n{a3m_sequences[k]}" for k in a3m_sequences) return '\n'.join(fasta_chunks) + '\n' # Include terminating newline. def _keep_line(line: str, seqnames: Set[str]) -> bool: """Function to decide which lines to keep.""" if not line.strip(): return True if line.strip() == '//': # End tag return True if line.startswith('# STOCKHOLM'): # Start tag return True if line.startswith('#=GC RF'): # Reference Annotation Line return True if line[:4] == '#=GS': # Description lines - keep if sequence in list. _, seqname, _ = line.split(maxsplit=2) return seqname in seqnames elif line.startswith('#'): # Other markup - filter out return False else: # Alignment data - keep if sequence in list. seqname = line.partition(' ')[0] return seqname in seqnames def truncate_stockholm_msa(stockholm_msa_path: str, max_sequences: int) -> str: """Reads + truncates a Stockholm file while preventing excessive RAM usage.""" seqnames = set() filtered_lines = [] with open(stockholm_msa_path) as f: for line in f: if line.strip() and not line.startswith(('#', '//')): # Ignore blank lines, markup and end symbols - remainder are alignment # sequence parts. seqname = line.partition(' ')[0] seqnames.add(seqname) if len(seqnames) >= max_sequences: break f.seek(0) for line in f: if _keep_line(line, seqnames): filtered_lines.append(line) return ''.join(filtered_lines) def remove_empty_columns_from_stockholm_msa(stockholm_msa: str) -> str: """Removes empty columns (dashes-only) from a Stockholm MSA.""" processed_lines = {} unprocessed_lines = {} for i, line in enumerate(stockholm_msa.splitlines()): if line.startswith('#=GC RF'): reference_annotation_i = i reference_annotation_line = line # Reached the end of this chunk of the alignment. Process chunk. _, _, first_alignment = line.rpartition(' ') mask = [] for j in range(len(first_alignment)): for _, unprocessed_line in unprocessed_lines.items(): prefix, _, alignment = unprocessed_line.rpartition(' ') if alignment[j] != '-': mask.append(True) break else: # Every row contained a hyphen - empty column. mask.append(False) # Add reference annotation for processing with mask. unprocessed_lines[reference_annotation_i] = reference_annotation_line if not any(mask): # All columns were empty. Output empty lines for chunk. for line_index in unprocessed_lines: processed_lines[line_index] = '' else: for line_index, unprocessed_line in unprocessed_lines.items(): prefix, _, alignment = unprocessed_line.rpartition(' ') masked_alignment = ''.join(itertools.compress(alignment, mask)) processed_lines[line_index] = f'{prefix} {masked_alignment}' # Clear raw_alignments. unprocessed_lines = {} elif line.strip() and not line.startswith(('#', '//')): unprocessed_lines[i] = line else: processed_lines[i] = line return '\n'.join((processed_lines[i] for i in range(len(processed_lines)))) def deduplicate_stockholm_msa(stockholm_msa: str) -> str: """Remove duplicate sequences (ignoring insertions wrt query).""" sequence_dict = collections.defaultdict(str) # First we must extract all sequences from the MSA. for line in stockholm_msa.splitlines(): # Only consider the alignments - ignore reference annotation, empty lines, # descriptions or markup. if line.strip() and not line.startswith(('#', '//')): line = line.strip() seqname, alignment = line.split() sequence_dict[seqname] += alignment seen_sequences = set() seqnames = set() # First alignment is the query. query_align = next(iter(sequence_dict.values())) mask = [c != '-' for c in query_align] # Mask is False for insertions. for seqname, alignment in sequence_dict.items(): # Apply mask to remove all insertions from the string. masked_alignment = ''.join(itertools.compress(alignment, mask)) if masked_alignment in seen_sequences: continue else: seen_sequences.add(masked_alignment) seqnames.add(seqname) filtered_lines = [] for line in stockholm_msa.splitlines(): if _keep_line(line, seqnames): filtered_lines.append(line) return '\n'.join(filtered_lines) + '\n' def _get_hhr_line_regex_groups( regex_pattern: str, line: str) -> Sequence[Optional[str]]: match = re.match(regex_pattern, line) if match is None: raise RuntimeError(f'Could not parse query line {line}') return match.groups() def _update_hhr_residue_indices_list( sequence: str, start_index: int, indices_list: List[int]): """Computes the relative indices for each residue with respect to the original sequence.""" counter = start_index for symbol in sequence: if symbol == '-': indices_list.append(-1) else: indices_list.append(counter) counter += 1 def _parse_hhr_hit(detailed_lines: Sequence[str]) -> TemplateHit: """Parses the detailed HMM HMM comparison section for a single Hit. This works on .hhr files generated from both HHBlits and HHSearch. Args: detailed_lines: A list of lines from a single comparison section between 2 sequences (which each have their own HMM's) Returns: A dictionary with the information from that detailed comparison section Raises: RuntimeError: If a certain line cannot be processed """ # Parse first 2 lines. number_of_hit = int(detailed_lines[0].split()[-1]) name_hit = detailed_lines[1][1:] # Parse the summary line. pattern = ( 'Probab=(.*)[\t ]*E-value=(.*)[\t ]*Score=(.*)[\t ]*Aligned_cols=(.*)[\t' ' ]*Identities=(.*)%[\t ]*Similarity=(.*)[\t ]*Sum_probs=(.*)[\t ' ']*Template_Neff=(.*)') match = re.match(pattern, detailed_lines[2]) if match is None: raise RuntimeError( 'Could not parse section: %s. Expected this: \n%s to contain summary.' % (detailed_lines, detailed_lines[2])) (_, _, _, aligned_cols, _, _, sum_probs, _) = [float(x) for x in match.groups()] # The next section reads the detailed comparisons. These are in a 'human # readable' format which has a fixed length. The strategy employed is to # assume that each block starts with the query sequence line, and to parse # that with a regexp in order to deduce the fixed length used for that block. query = '' hit_sequence = '' indices_query = [] indices_hit = [] length_block = None for line in detailed_lines[3:]: # Parse the query sequence line if (line.startswith('Q ') and not line.startswith('Q ss_dssp') and not line.startswith('Q ss_pred') and not line.startswith('Q Consensus')): # Thus the first 17 characters must be 'Q <query_name> ', and we can parse # everything after that. # start sequence end total_sequence_length patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*([0-9]*) \([0-9]*\)' groups = _get_hhr_line_regex_groups(patt, line[17:]) # Get the length of the parsed block using the start and finish indices, # and ensure it is the same as the actual block length. start = int(groups[0]) - 1 # Make index zero based. delta_query = groups[1] end = int(groups[2]) num_insertions = len([x for x in delta_query if x == '-']) length_block = end - start + num_insertions assert length_block == len(delta_query) # Update the query sequence and indices list. query += delta_query _update_hhr_residue_indices_list(delta_query, start, indices_query) elif line.startswith('T '): # Parse the hit sequence. if (not line.startswith('T ss_dssp') and not line.startswith('T ss_pred') and not line.startswith('T Consensus')): # Thus the first 17 characters must be 'T <hit_name> ', and we can # parse everything after that. # start sequence end total_sequence_length patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*[0-9]* \([0-9]*\)' groups = _get_hhr_line_regex_groups(patt, line[17:]) start = int(groups[0]) - 1 # Make index zero based. delta_hit_sequence = groups[1] assert length_block == len(delta_hit_sequence) # Update the hit sequence and indices list. hit_sequence += delta_hit_sequence _update_hhr_residue_indices_list(delta_hit_sequence, start, indices_hit) return TemplateHit( index=number_of_hit, name=name_hit, aligned_cols=int(aligned_cols), sum_probs=sum_probs, query=query, hit_sequence=hit_sequence, indices_query=indices_query, indices_hit=indices_hit, ) def parse_hhr(hhr_string: str) -> Sequence[TemplateHit]: """Parses the content of an entire HHR file.""" lines = hhr_string.splitlines() # Each .hhr file starts with a results table, then has a sequence of hit # "paragraphs", each paragraph starting with a line 'No <hit number>'. We # iterate through each paragraph to parse each hit. block_starts = [i for i, line in enumerate(lines) if line.startswith('No ')] hits = [] if block_starts: block_starts.append(len(lines)) # Add the end of the final block. for i in range(len(block_starts) - 1): hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[i + 1]])) return hits def parse_e_values_from_tblout(tblout: str) -> Dict[str, float]: """Parse target to e-value mapping parsed from Jackhmmer tblout string.""" e_values = {'query': 0} lines = [line for line in tblout.splitlines() if line[0] != '#'] # As per http://eddylab.org/software/hmmer/Userguide.pdf fields are # space-delimited. Relevant fields are (1) target name: and # (5) E-value (full sequence) (numbering from 1). for line in lines: fields = line.split() e_value = fields[4] target_name = fields[0] e_values[target_name] = float(e_value) return e_values def _get_indices(sequence: str, start: int) -> List[int]: """Returns indices for non-gap/insert residues starting at the given index.""" indices = [] counter = start for symbol in sequence: # Skip gaps but add a placeholder so that the alignment is preserved. if symbol == '-': indices.append(-1) # Skip deleted residues, but increase the counter. elif symbol.islower(): counter += 1 # Normal aligned residue. Increase the counter and append to indices. else: indices.append(counter) counter += 1 return indices @dataclasses.dataclass(frozen=True) class HitMetadata: pdb_id: str chain: str start: int end: int length: int text: str def _parse_hmmsearch_description(description: str) -> HitMetadata: """Parses the hmmsearch A3M sequence description line.""" # Example 1: >4pqx_A/2-217 [subseq from] mol:protein length:217 Free text # Example 2: >5g3r_A/1-55 [subseq from] mol:protein length:352 match = re.match( r'^>?([a-z0-9]+)_(\w+)/([0-9]+)-([0-9]+).*protein length:([0-9]+) *(.*)$', description.strip()) if not match: raise ValueError(f'Could not parse description: "{description}".') return HitMetadata( pdb_id=match[1], chain=match[2], start=int(match[3]), end=int(match[4]), length=int(match[5]), text=match[6]) def parse_hmmsearch_a3m(query_sequence: str, a3m_string: str, skip_first: bool = True) -> Sequence[TemplateHit]: """Parses an a3m string produced by hmmsearch. Args: query_sequence: The query sequence. a3m_string: The a3m string produced by hmmsearch. skip_first: Whether to skip the first sequence in the a3m string. Returns: A sequence of `TemplateHit` results. """ # Zip the descriptions and MSAs together, skip the first query sequence. parsed_a3m = list(zip(*parse_fasta(a3m_string))) if skip_first: parsed_a3m = parsed_a3m[1:] indices_query = _get_indices(query_sequence, start=0) hits = [] for i, (hit_sequence, hit_description) in enumerate(parsed_a3m, start=1): if 'mol:protein' not in hit_description: continue # Skip non-protein chains. metadata = _parse_hmmsearch_description(hit_description) # Aligned columns are only the match states. aligned_cols = sum([r.isupper() and r != '-' for r in hit_sequence]) indices_hit = _get_indices(hit_sequence, start=metadata.start - 1) hit = TemplateHit( index=i, name=f'{metadata.pdb_id}_{metadata.chain}', aligned_cols=aligned_cols, sum_probs=None, query=query_sequence, hit_sequence=hit_sequence.upper(), indices_query=indices_query, indices_hit=indices_hit, ) hits.append(hit) return hits
alphafold-main
alphafold/data/parsers.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for getting templates and calculating template features.""" import abc import dataclasses import datetime import functools import glob import os import re from typing import Any, Dict, Mapping, Optional, Sequence, Tuple from absl import logging from alphafold.common import residue_constants from alphafold.data import mmcif_parsing from alphafold.data import parsers from alphafold.data.tools import kalign import numpy as np # Internal import (7716). class Error(Exception): """Base class for exceptions.""" class NoChainsError(Error): """An error indicating that template mmCIF didn't have any chains.""" class SequenceNotInTemplateError(Error): """An error indicating that template mmCIF didn't contain the sequence.""" class NoAtomDataInTemplateError(Error): """An error indicating that template mmCIF didn't contain atom positions.""" class TemplateAtomMaskAllZerosError(Error): """An error indicating that template mmCIF had all atom positions masked.""" class QueryToTemplateAlignError(Error): """An error indicating that the query can't be aligned to the template.""" class CaDistanceError(Error): """An error indicating that a CA atom distance exceeds a threshold.""" class MultipleChainsError(Error): """An error indicating that multiple chains were found for a given ID.""" # Prefilter exceptions. class PrefilterError(Exception): """A base class for template prefilter exceptions.""" class DateError(PrefilterError): """An error indicating that the hit date was after the max allowed date.""" class AlignRatioError(PrefilterError): """An error indicating that the hit align ratio to the query was too small.""" class DuplicateError(PrefilterError): """An error indicating that the hit was an exact subsequence of the query.""" class LengthError(PrefilterError): """An error indicating that the hit was too short.""" TEMPLATE_FEATURES = { 'template_aatype': np.float32, 'template_all_atom_masks': np.float32, 'template_all_atom_positions': np.float32, 'template_domain_names': object, 'template_sequence': object, 'template_sum_probs': np.float32, } def _get_pdb_id_and_chain(hit: parsers.TemplateHit) -> Tuple[str, str]: """Returns PDB id and chain id for an HHSearch Hit.""" # PDB ID: 4 letters. Chain ID: 1+ alphanumeric letters or "." if unknown. id_match = re.match(r'[a-zA-Z\d]{4}_[a-zA-Z0-9.]+', hit.name) if not id_match: raise ValueError(f'hit.name did not start with PDBID_chain: {hit.name}') pdb_id, chain_id = id_match.group(0).split('_') return pdb_id.lower(), chain_id def _is_after_cutoff( pdb_id: str, release_dates: Mapping[str, datetime.datetime], release_date_cutoff: Optional[datetime.datetime]) -> bool: """Checks if the template date is after the release date cutoff. Args: pdb_id: 4 letter pdb code. release_dates: Dictionary mapping PDB ids to their structure release dates. release_date_cutoff: Max release date that is valid for this query. Returns: True if the template release date is after the cutoff, False otherwise. """ if release_date_cutoff is None: raise ValueError('The release_date_cutoff must not be None.') if pdb_id in release_dates: return release_dates[pdb_id] > release_date_cutoff else: # Since this is just a quick prefilter to reduce the number of mmCIF files # we need to parse, we don't have to worry about returning True here. return False def _parse_obsolete(obsolete_file_path: str) -> Mapping[str, Optional[str]]: """Parses the data file from PDB that lists which pdb_ids are obsolete.""" with open(obsolete_file_path) as f: result = {} for line in f: line = line.strip() # Format: Date From To # 'OBSLTE 06-NOV-19 6G9Y' - Removed, rare # 'OBSLTE 31-JUL-94 116L 216L' - Replaced, common # 'OBSLTE 26-SEP-06 2H33 2JM5 2OWI' - Replaced by multiple, rare if line.startswith('OBSLTE'): if len(line) > 30: # Replaced by at least one structure. from_id = line[20:24].lower() to_id = line[29:33].lower() result[from_id] = to_id elif len(line) == 24: # Removed. from_id = line[20:24].lower() result[from_id] = None return result def _parse_release_dates(path: str) -> Mapping[str, datetime.datetime]: """Parses release dates file, returns a mapping from PDBs to release dates.""" if path.endswith('txt'): release_dates = {} with open(path, 'r') as f: for line in f: pdb_id, date = line.split(':') date = date.strip() # Python 3.6 doesn't have datetime.date.fromisoformat() which is about # 90x faster than strptime. However, splitting the string manually is # about 10x faster than strptime. release_dates[pdb_id.strip()] = datetime.datetime( year=int(date[:4]), month=int(date[5:7]), day=int(date[8:10])) return release_dates else: raise ValueError('Invalid format of the release date file %s.' % path) def _assess_hhsearch_hit( hit: parsers.TemplateHit, hit_pdb_code: str, query_sequence: str, release_dates: Mapping[str, datetime.datetime], release_date_cutoff: datetime.datetime, max_subsequence_ratio: float = 0.95, min_align_ratio: float = 0.1) -> bool: """Determines if template is valid (without parsing the template mmcif file). Args: hit: HhrHit for the template. hit_pdb_code: The 4 letter pdb code of the template hit. This might be different from the value in the actual hit since the original pdb might have become obsolete. query_sequence: Amino acid sequence of the query. release_dates: Dictionary mapping pdb codes to their structure release dates. release_date_cutoff: Max release date that is valid for this query. max_subsequence_ratio: Exclude any exact matches with this much overlap. min_align_ratio: Minimum overlap between the template and query. Returns: True if the hit passed the prefilter. Raises an exception otherwise. Raises: DateError: If the hit date was after the max allowed date. AlignRatioError: If the hit align ratio to the query was too small. DuplicateError: If the hit was an exact subsequence of the query. LengthError: If the hit was too short. """ aligned_cols = hit.aligned_cols align_ratio = aligned_cols / len(query_sequence) template_sequence = hit.hit_sequence.replace('-', '') length_ratio = float(len(template_sequence)) / len(query_sequence) # Check whether the template is a large subsequence or duplicate of original # query. This can happen due to duplicate entries in the PDB database. duplicate = (template_sequence in query_sequence and length_ratio > max_subsequence_ratio) if _is_after_cutoff(hit_pdb_code, release_dates, release_date_cutoff): raise DateError(f'Date ({release_dates[hit_pdb_code]}) > max template date ' f'({release_date_cutoff}).') if align_ratio <= min_align_ratio: raise AlignRatioError('Proportion of residues aligned to query too small. ' f'Align ratio: {align_ratio}.') if duplicate: raise DuplicateError('Template is an exact subsequence of query with large ' f'coverage. Length ratio: {length_ratio}.') if len(template_sequence) < 10: raise LengthError(f'Template too short. Length: {len(template_sequence)}.') return True def _find_template_in_pdb( template_chain_id: str, template_sequence: str, mmcif_object: mmcif_parsing.MmcifObject) -> Tuple[str, str, int]: """Tries to find the template chain in the given pdb file. This method tries the three following things in order: 1. Tries if there is an exact match in both the chain ID and the sequence. If yes, the chain sequence is returned. Otherwise: 2. Tries if there is an exact match only in the sequence. If yes, the chain sequence is returned. Otherwise: 3. Tries if there is a fuzzy match (X = wildcard) in the sequence. If yes, the chain sequence is returned. If none of these succeed, a SequenceNotInTemplateError is thrown. Args: template_chain_id: The template chain ID. template_sequence: The template chain sequence. mmcif_object: The PDB object to search for the template in. Returns: A tuple with: * The chain sequence that was found to match the template in the PDB object. * The ID of the chain that is being returned. * The offset where the template sequence starts in the chain sequence. Raises: SequenceNotInTemplateError: If no match is found after the steps described above. """ # Try if there is an exact match in both the chain ID and the (sub)sequence. pdb_id = mmcif_object.file_id chain_sequence = mmcif_object.chain_to_seqres.get(template_chain_id) if chain_sequence and (template_sequence in chain_sequence): logging.info( 'Found an exact template match %s_%s.', pdb_id, template_chain_id) mapping_offset = chain_sequence.find(template_sequence) return chain_sequence, template_chain_id, mapping_offset # Try if there is an exact match in the (sub)sequence only. for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items(): if chain_sequence and (template_sequence in chain_sequence): logging.info('Found a sequence-only match %s_%s.', pdb_id, chain_id) mapping_offset = chain_sequence.find(template_sequence) return chain_sequence, chain_id, mapping_offset # Return a chain sequence that fuzzy matches (X = wildcard) the template. # Make parentheses unnamed groups (?:_) to avoid the 100 named groups limit. regex = ['.' if aa == 'X' else '(?:%s|X)' % aa for aa in template_sequence] regex = re.compile(''.join(regex)) for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items(): match = re.search(regex, chain_sequence) if match: logging.info('Found a fuzzy sequence-only match %s_%s.', pdb_id, chain_id) mapping_offset = match.start() return chain_sequence, chain_id, mapping_offset # No hits, raise an error. raise SequenceNotInTemplateError( 'Could not find the template sequence in %s_%s. Template sequence: %s, ' 'chain_to_seqres: %s' % (pdb_id, template_chain_id, template_sequence, mmcif_object.chain_to_seqres)) def _realign_pdb_template_to_query( old_template_sequence: str, template_chain_id: str, mmcif_object: mmcif_parsing.MmcifObject, old_mapping: Mapping[int, int], kalign_binary_path: str) -> Tuple[str, Mapping[int, int]]: """Aligns template from the mmcif_object to the query. In case PDB70 contains a different version of the template sequence, we need to perform a realignment to the actual sequence that is in the mmCIF file. This method performs such realignment, but returns the new sequence and mapping only if the sequence in the mmCIF file is 90% identical to the old sequence. Note that the old_template_sequence comes from the hit, and contains only that part of the chain that matches with the query while the new_template_sequence is the full chain. Args: old_template_sequence: The template sequence that was returned by the PDB template search (typically done using HHSearch). template_chain_id: The template chain id was returned by the PDB template search (typically done using HHSearch). This is used to find the right chain in the mmcif_object chain_to_seqres mapping. mmcif_object: A mmcif_object which holds the actual template data. old_mapping: A mapping from the query sequence to the template sequence. This mapping will be used to compute the new mapping from the query sequence to the actual mmcif_object template sequence by aligning the old_template_sequence and the actual template sequence. kalign_binary_path: The path to a kalign executable. Returns: A tuple (new_template_sequence, new_query_to_template_mapping) where: * new_template_sequence is the actual template sequence that was found in the mmcif_object. * new_query_to_template_mapping is the new mapping from the query to the actual template found in the mmcif_object. Raises: QueryToTemplateAlignError: * If there was an error thrown by the alignment tool. * Or if the actual template sequence differs by more than 10% from the old_template_sequence. """ aligner = kalign.Kalign(binary_path=kalign_binary_path) new_template_sequence = mmcif_object.chain_to_seqres.get( template_chain_id, '') # Sometimes the template chain id is unknown. But if there is only a single # sequence within the mmcif_object, it is safe to assume it is that one. if not new_template_sequence: if len(mmcif_object.chain_to_seqres) == 1: logging.info('Could not find %s in %s, but there is only 1 sequence, so ' 'using that one.', template_chain_id, mmcif_object.file_id) new_template_sequence = list(mmcif_object.chain_to_seqres.values())[0] else: raise QueryToTemplateAlignError( f'Could not find chain {template_chain_id} in {mmcif_object.file_id}. ' 'If there are no mmCIF parsing errors, it is possible it was not a ' 'protein chain.') try: parsed_a3m = parsers.parse_a3m( aligner.align([old_template_sequence, new_template_sequence])) old_aligned_template, new_aligned_template = parsed_a3m.sequences except Exception as e: raise QueryToTemplateAlignError( 'Could not align old template %s to template %s (%s_%s). Error: %s' % (old_template_sequence, new_template_sequence, mmcif_object.file_id, template_chain_id, str(e))) logging.info('Old aligned template: %s\nNew aligned template: %s', old_aligned_template, new_aligned_template) old_to_new_template_mapping = {} old_template_index = -1 new_template_index = -1 num_same = 0 for old_template_aa, new_template_aa in zip( old_aligned_template, new_aligned_template): if old_template_aa != '-': old_template_index += 1 if new_template_aa != '-': new_template_index += 1 if old_template_aa != '-' and new_template_aa != '-': old_to_new_template_mapping[old_template_index] = new_template_index if old_template_aa == new_template_aa: num_same += 1 # Require at least 90 % sequence identity wrt to the shorter of the sequences. if float(num_same) / min( len(old_template_sequence), len(new_template_sequence)) < 0.9: raise QueryToTemplateAlignError( 'Insufficient similarity of the sequence in the database: %s to the ' 'actual sequence in the mmCIF file %s_%s: %s. We require at least ' '90 %% similarity wrt to the shorter of the sequences. This is not a ' 'problem unless you think this is a template that should be included.' % (old_template_sequence, mmcif_object.file_id, template_chain_id, new_template_sequence)) new_query_to_template_mapping = {} for query_index, old_template_index in old_mapping.items(): new_query_to_template_mapping[query_index] = ( old_to_new_template_mapping.get(old_template_index, -1)) new_template_sequence = new_template_sequence.replace('-', '') return new_template_sequence, new_query_to_template_mapping def _check_residue_distances(all_positions: np.ndarray, all_positions_mask: np.ndarray, max_ca_ca_distance: float): """Checks if the distance between unmasked neighbor residues is ok.""" ca_position = residue_constants.atom_order['CA'] prev_is_unmasked = False prev_calpha = None for i, (coords, mask) in enumerate(zip(all_positions, all_positions_mask)): this_is_unmasked = bool(mask[ca_position]) if this_is_unmasked: this_calpha = coords[ca_position] if prev_is_unmasked: distance = np.linalg.norm(this_calpha - prev_calpha) if distance > max_ca_ca_distance: raise CaDistanceError( 'The distance between residues %d and %d is %f > limit %f.' % ( i, i + 1, distance, max_ca_ca_distance)) prev_calpha = this_calpha prev_is_unmasked = this_is_unmasked def _get_atom_positions( mmcif_object: mmcif_parsing.MmcifObject, auth_chain_id: str, max_ca_ca_distance: float) -> Tuple[np.ndarray, np.ndarray]: """Gets atom positions and mask from a list of Biopython Residues.""" num_res = len(mmcif_object.chain_to_seqres[auth_chain_id]) relevant_chains = [c for c in mmcif_object.structure.get_chains() if c.id == auth_chain_id] if len(relevant_chains) != 1: raise MultipleChainsError( f'Expected exactly one chain in structure with id {auth_chain_id}.') chain = relevant_chains[0] all_positions = np.zeros([num_res, residue_constants.atom_type_num, 3]) all_positions_mask = np.zeros([num_res, residue_constants.atom_type_num], dtype=np.int64) for res_index in range(num_res): pos = np.zeros([residue_constants.atom_type_num, 3], dtype=np.float32) mask = np.zeros([residue_constants.atom_type_num], dtype=np.float32) res_at_position = mmcif_object.seqres_to_structure[auth_chain_id][res_index] if not res_at_position.is_missing: res = chain[(res_at_position.hetflag, res_at_position.position.residue_number, res_at_position.position.insertion_code)] for atom in res.get_atoms(): atom_name = atom.get_name() x, y, z = atom.get_coord() if atom_name in residue_constants.atom_order.keys(): pos[residue_constants.atom_order[atom_name]] = [x, y, z] mask[residue_constants.atom_order[atom_name]] = 1.0 elif atom_name.upper() == 'SE' and res.get_resname() == 'MSE': # Put the coordinates of the selenium atom in the sulphur column. pos[residue_constants.atom_order['SD']] = [x, y, z] mask[residue_constants.atom_order['SD']] = 1.0 # Fix naming errors in arginine residues where NH2 is incorrectly # assigned to be closer to CD than NH1. cd = residue_constants.atom_order['CD'] nh1 = residue_constants.atom_order['NH1'] nh2 = residue_constants.atom_order['NH2'] if (res.get_resname() == 'ARG' and all(mask[atom_index] for atom_index in (cd, nh1, nh2)) and (np.linalg.norm(pos[nh1] - pos[cd]) > np.linalg.norm(pos[nh2] - pos[cd]))): pos[nh1], pos[nh2] = pos[nh2].copy(), pos[nh1].copy() mask[nh1], mask[nh2] = mask[nh2].copy(), mask[nh1].copy() all_positions[res_index] = pos all_positions_mask[res_index] = mask _check_residue_distances( all_positions, all_positions_mask, max_ca_ca_distance) return all_positions, all_positions_mask def _extract_template_features( mmcif_object: mmcif_parsing.MmcifObject, pdb_id: str, mapping: Mapping[int, int], template_sequence: str, query_sequence: str, template_chain_id: str, kalign_binary_path: str) -> Tuple[Dict[str, Any], Optional[str]]: """Parses atom positions in the target structure and aligns with the query. Atoms for each residue in the template structure are indexed to coincide with their corresponding residue in the query sequence, according to the alignment mapping provided. Args: mmcif_object: mmcif_parsing.MmcifObject representing the template. pdb_id: PDB code for the template. mapping: Dictionary mapping indices in the query sequence to indices in the template sequence. template_sequence: String describing the amino acid sequence for the template protein. query_sequence: String describing the amino acid sequence for the query protein. template_chain_id: String ID describing which chain in the structure proto should be used. kalign_binary_path: The path to a kalign executable used for template realignment. Returns: A tuple with: * A dictionary containing the extra features derived from the template protein structure. * A warning message if the hit was realigned to the actual mmCIF sequence. Otherwise None. Raises: NoChainsError: If the mmcif object doesn't contain any chains. SequenceNotInTemplateError: If the given chain id / sequence can't be found in the mmcif object. QueryToTemplateAlignError: If the actual template in the mmCIF file can't be aligned to the query. NoAtomDataInTemplateError: If the mmcif object doesn't contain atom positions. TemplateAtomMaskAllZerosError: If the mmcif object doesn't have any unmasked residues. """ if mmcif_object is None or not mmcif_object.chain_to_seqres: raise NoChainsError('No chains in PDB: %s_%s' % (pdb_id, template_chain_id)) warning = None try: seqres, chain_id, mapping_offset = _find_template_in_pdb( template_chain_id=template_chain_id, template_sequence=template_sequence, mmcif_object=mmcif_object) except SequenceNotInTemplateError: # If PDB70 contains a different version of the template, we use the sequence # from the mmcif_object. chain_id = template_chain_id warning = ( f'The exact sequence {template_sequence} was not found in ' f'{pdb_id}_{chain_id}. Realigning the template to the actual sequence.') logging.warning(warning) # This throws an exception if it fails to realign the hit. seqres, mapping = _realign_pdb_template_to_query( old_template_sequence=template_sequence, template_chain_id=template_chain_id, mmcif_object=mmcif_object, old_mapping=mapping, kalign_binary_path=kalign_binary_path) logging.info('Sequence in %s_%s: %s successfully realigned to %s', pdb_id, chain_id, template_sequence, seqres) # The template sequence changed. template_sequence = seqres # No mapping offset, the query is aligned to the actual sequence. mapping_offset = 0 try: # Essentially set to infinity - we don't want to reject templates unless # they're really really bad. all_atom_positions, all_atom_mask = _get_atom_positions( mmcif_object, chain_id, max_ca_ca_distance=150.0) except (CaDistanceError, KeyError) as ex: raise NoAtomDataInTemplateError( 'Could not get atom data (%s_%s): %s' % (pdb_id, chain_id, str(ex)) ) from ex all_atom_positions = np.split(all_atom_positions, all_atom_positions.shape[0]) all_atom_masks = np.split(all_atom_mask, all_atom_mask.shape[0]) output_templates_sequence = [] templates_all_atom_positions = [] templates_all_atom_masks = [] for _ in query_sequence: # Residues in the query_sequence that are not in the template_sequence: templates_all_atom_positions.append( np.zeros((residue_constants.atom_type_num, 3))) templates_all_atom_masks.append(np.zeros(residue_constants.atom_type_num)) output_templates_sequence.append('-') for k, v in mapping.items(): template_index = v + mapping_offset templates_all_atom_positions[k] = all_atom_positions[template_index][0] templates_all_atom_masks[k] = all_atom_masks[template_index][0] output_templates_sequence[k] = template_sequence[v] # Alanine (AA with the lowest number of atoms) has 5 atoms (C, CA, CB, N, O). if np.sum(templates_all_atom_masks) < 5: raise TemplateAtomMaskAllZerosError( 'Template all atom mask was all zeros: %s_%s. Residue range: %d-%d' % (pdb_id, chain_id, min(mapping.values()) + mapping_offset, max(mapping.values()) + mapping_offset)) output_templates_sequence = ''.join(output_templates_sequence) templates_aatype = residue_constants.sequence_to_onehot( output_templates_sequence, residue_constants.HHBLITS_AA_TO_ID) return ( { 'template_all_atom_positions': np.array(templates_all_atom_positions), 'template_all_atom_masks': np.array(templates_all_atom_masks), 'template_sequence': output_templates_sequence.encode(), 'template_aatype': np.array(templates_aatype), 'template_domain_names': f'{pdb_id.lower()}_{chain_id}'.encode(), }, warning) def _build_query_to_hit_index_mapping( hit_query_sequence: str, hit_sequence: str, indices_hit: Sequence[int], indices_query: Sequence[int], original_query_sequence: str) -> Mapping[int, int]: """Gets mapping from indices in original query sequence to indices in the hit. hit_query_sequence and hit_sequence are two aligned sequences containing gap characters. hit_query_sequence contains only the part of the original query sequence that matched the hit. When interpreting the indices from the .hhr, we need to correct for this to recover a mapping from original query sequence to the hit sequence. Args: hit_query_sequence: The portion of the query sequence that is in the .hhr hit hit_sequence: The portion of the hit sequence that is in the .hhr indices_hit: The indices for each aminoacid relative to the hit sequence indices_query: The indices for each aminoacid relative to the original query sequence original_query_sequence: String describing the original query sequence. Returns: Dictionary with indices in the original query sequence as keys and indices in the hit sequence as values. """ # If the hit is empty (no aligned residues), return empty mapping if not hit_query_sequence: return {} # Remove gaps and find the offset of hit.query relative to original query. hhsearch_query_sequence = hit_query_sequence.replace('-', '') hit_sequence = hit_sequence.replace('-', '') hhsearch_query_offset = original_query_sequence.find(hhsearch_query_sequence) # Index of -1 used for gap characters. Subtract the min index ignoring gaps. min_idx = min(x for x in indices_hit if x > -1) fixed_indices_hit = [ x - min_idx if x > -1 else -1 for x in indices_hit ] min_idx = min(x for x in indices_query if x > -1) fixed_indices_query = [x - min_idx if x > -1 else -1 for x in indices_query] # Zip the corrected indices, ignore case where both seqs have gap characters. mapping = {} for q_i, q_t in zip(fixed_indices_query, fixed_indices_hit): if q_t != -1 and q_i != -1: if (q_t >= len(hit_sequence) or q_i + hhsearch_query_offset >= len(original_query_sequence)): continue mapping[q_i + hhsearch_query_offset] = q_t return mapping @dataclasses.dataclass(frozen=True) class SingleHitResult: features: Optional[Mapping[str, Any]] error: Optional[str] warning: Optional[str] @functools.lru_cache(16, typed=False) def _read_file(path): with open(path, 'r') as f: file_data = f.read() return file_data def _process_single_hit( query_sequence: str, hit: parsers.TemplateHit, mmcif_dir: str, max_template_date: datetime.datetime, release_dates: Mapping[str, datetime.datetime], obsolete_pdbs: Mapping[str, Optional[str]], kalign_binary_path: str, strict_error_check: bool = False) -> SingleHitResult: """Tries to extract template features from a single HHSearch hit.""" # Fail hard if we can't get the PDB ID and chain name from the hit. hit_pdb_code, hit_chain_id = _get_pdb_id_and_chain(hit) # This hit has been removed (obsoleted) from PDB, skip it. if hit_pdb_code in obsolete_pdbs and obsolete_pdbs[hit_pdb_code] is None: return SingleHitResult( features=None, error=None, warning=f'Hit {hit_pdb_code} is obsolete.') if hit_pdb_code not in release_dates: if hit_pdb_code in obsolete_pdbs: hit_pdb_code = obsolete_pdbs[hit_pdb_code] # Pass hit_pdb_code since it might have changed due to the pdb being obsolete. try: _assess_hhsearch_hit( hit=hit, hit_pdb_code=hit_pdb_code, query_sequence=query_sequence, release_dates=release_dates, release_date_cutoff=max_template_date) except PrefilterError as e: msg = f'hit {hit_pdb_code}_{hit_chain_id} did not pass prefilter: {str(e)}' logging.info(msg) if strict_error_check and isinstance(e, (DateError, DuplicateError)): # In strict mode we treat some prefilter cases as errors. return SingleHitResult(features=None, error=msg, warning=None) return SingleHitResult(features=None, error=None, warning=None) mapping = _build_query_to_hit_index_mapping( hit.query, hit.hit_sequence, hit.indices_hit, hit.indices_query, query_sequence) # The mapping is from the query to the actual hit sequence, so we need to # remove gaps (which regardless have a missing confidence score). template_sequence = hit.hit_sequence.replace('-', '') cif_path = os.path.join(mmcif_dir, hit_pdb_code + '.cif') logging.debug('Reading PDB entry from %s. Query: %s, template: %s', cif_path, query_sequence, template_sequence) # Fail if we can't find the mmCIF file. cif_string = _read_file(cif_path) parsing_result = mmcif_parsing.parse( file_id=hit_pdb_code, mmcif_string=cif_string) if parsing_result.mmcif_object is not None: hit_release_date = datetime.datetime.strptime( parsing_result.mmcif_object.header['release_date'], '%Y-%m-%d') if hit_release_date > max_template_date: error = ('Template %s date (%s) > max template date (%s).' % (hit_pdb_code, hit_release_date, max_template_date)) if strict_error_check: return SingleHitResult(features=None, error=error, warning=None) else: logging.debug(error) return SingleHitResult(features=None, error=None, warning=None) try: features, realign_warning = _extract_template_features( mmcif_object=parsing_result.mmcif_object, pdb_id=hit_pdb_code, mapping=mapping, template_sequence=template_sequence, query_sequence=query_sequence, template_chain_id=hit_chain_id, kalign_binary_path=kalign_binary_path) if hit.sum_probs is None: features['template_sum_probs'] = [0] else: features['template_sum_probs'] = [hit.sum_probs] # It is possible there were some errors when parsing the other chains in the # mmCIF file, but the template features for the chain we want were still # computed. In such case the mmCIF parsing errors are not relevant. return SingleHitResult( features=features, error=None, warning=realign_warning) except (NoChainsError, NoAtomDataInTemplateError, TemplateAtomMaskAllZerosError) as e: # These 3 errors indicate missing mmCIF experimental data rather than a # problem with the template search, so turn them into warnings. warning = ('%s_%s (sum_probs: %s, rank: %s): feature extracting errors: ' '%s, mmCIF parsing errors: %s' % (hit_pdb_code, hit_chain_id, hit.sum_probs, hit.index, str(e), parsing_result.errors)) if strict_error_check: return SingleHitResult(features=None, error=warning, warning=None) else: return SingleHitResult(features=None, error=None, warning=warning) except Error as e: error = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: ' '%s, mmCIF parsing errors: %s' % (hit_pdb_code, hit_chain_id, hit.sum_probs, hit.index, str(e), parsing_result.errors)) return SingleHitResult(features=None, error=error, warning=None) @dataclasses.dataclass(frozen=True) class TemplateSearchResult: features: Mapping[str, Any] errors: Sequence[str] warnings: Sequence[str] class TemplateHitFeaturizer(abc.ABC): """An abstract base class for turning template hits to template features.""" def __init__( self, mmcif_dir: str, max_template_date: str, max_hits: int, kalign_binary_path: str, release_dates_path: Optional[str], obsolete_pdbs_path: Optional[str], strict_error_check: bool = False): """Initializes the Template Search. Args: mmcif_dir: Path to a directory with mmCIF structures. Once a template ID is found by HHSearch, this directory is used to retrieve the template data. max_template_date: The maximum date permitted for template structures. No template with date higher than this date will be returned. In ISO8601 date format, YYYY-MM-DD. max_hits: The maximum number of templates that will be returned. kalign_binary_path: The path to a kalign executable used for template realignment. release_dates_path: An optional path to a file with a mapping from PDB IDs to their release dates. Thanks to this we don't have to redundantly parse mmCIF files to get that information. obsolete_pdbs_path: An optional path to a file containing a mapping from obsolete PDB IDs to the PDB IDs of their replacements. strict_error_check: If True, then the following will be treated as errors: * If any template date is after the max_template_date. * If any template has identical PDB ID to the query. * If any template is a duplicate of the query. * Any feature computation errors. """ self._mmcif_dir = mmcif_dir if not glob.glob(os.path.join(self._mmcif_dir, '*.cif')): logging.error('Could not find CIFs in %s', self._mmcif_dir) raise ValueError(f'Could not find CIFs in {self._mmcif_dir}') try: self._max_template_date = datetime.datetime.strptime( max_template_date, '%Y-%m-%d') except ValueError: raise ValueError( 'max_template_date must be set and have format YYYY-MM-DD.') self._max_hits = max_hits self._kalign_binary_path = kalign_binary_path self._strict_error_check = strict_error_check if release_dates_path: logging.info('Using precomputed release dates %s.', release_dates_path) self._release_dates = _parse_release_dates(release_dates_path) else: self._release_dates = {} if obsolete_pdbs_path: logging.info('Using precomputed obsolete pdbs %s.', obsolete_pdbs_path) self._obsolete_pdbs = _parse_obsolete(obsolete_pdbs_path) else: self._obsolete_pdbs = {} @abc.abstractmethod def get_templates( self, query_sequence: str, hits: Sequence[parsers.TemplateHit]) -> TemplateSearchResult: """Computes the templates for given query sequence.""" class HhsearchHitFeaturizer(TemplateHitFeaturizer): """A class for turning a3m hits from hhsearch to template features.""" def get_templates( self, query_sequence: str, hits: Sequence[parsers.TemplateHit]) -> TemplateSearchResult: """Computes the templates for given query sequence (more details above).""" logging.info('Searching for template for: %s', query_sequence) template_features = {} for template_feature_name in TEMPLATE_FEATURES: template_features[template_feature_name] = [] num_hits = 0 errors = [] warnings = [] for hit in sorted(hits, key=lambda x: x.sum_probs, reverse=True): # We got all the templates we wanted, stop processing hits. if num_hits >= self._max_hits: break result = _process_single_hit( query_sequence=query_sequence, hit=hit, mmcif_dir=self._mmcif_dir, max_template_date=self._max_template_date, release_dates=self._release_dates, obsolete_pdbs=self._obsolete_pdbs, strict_error_check=self._strict_error_check, kalign_binary_path=self._kalign_binary_path) if result.error: errors.append(result.error) # There could be an error even if there are some results, e.g. thrown by # other unparsable chains in the same mmCIF file. if result.warning: warnings.append(result.warning) if result.features is None: logging.info('Skipped invalid hit %s, error: %s, warning: %s', hit.name, result.error, result.warning) else: # Increment the hit counter, since we got features out of this hit. num_hits += 1 for k in template_features: template_features[k].append(result.features[k]) for name in template_features: if num_hits > 0: template_features[name] = np.stack( template_features[name], axis=0).astype(TEMPLATE_FEATURES[name]) else: # Make sure the feature has correct dtype even if empty. template_features[name] = np.array([], dtype=TEMPLATE_FEATURES[name]) return TemplateSearchResult( features=template_features, errors=errors, warnings=warnings) class HmmsearchHitFeaturizer(TemplateHitFeaturizer): """A class for turning a3m hits from hmmsearch to template features.""" def get_templates( self, query_sequence: str, hits: Sequence[parsers.TemplateHit]) -> TemplateSearchResult: """Computes the templates for given query sequence (more details above).""" logging.info('Searching for template for: %s', query_sequence) template_features = {} for template_feature_name in TEMPLATE_FEATURES: template_features[template_feature_name] = [] already_seen = set() errors = [] warnings = [] if not hits or hits[0].sum_probs is None: sorted_hits = hits else: sorted_hits = sorted(hits, key=lambda x: x.sum_probs, reverse=True) for hit in sorted_hits: # We got all the templates we wanted, stop processing hits. if len(already_seen) >= self._max_hits: break result = _process_single_hit( query_sequence=query_sequence, hit=hit, mmcif_dir=self._mmcif_dir, max_template_date=self._max_template_date, release_dates=self._release_dates, obsolete_pdbs=self._obsolete_pdbs, strict_error_check=self._strict_error_check, kalign_binary_path=self._kalign_binary_path) if result.error: errors.append(result.error) # There could be an error even if there are some results, e.g. thrown by # other unparsable chains in the same mmCIF file. if result.warning: warnings.append(result.warning) if result.features is None: logging.debug('Skipped invalid hit %s, error: %s, warning: %s', hit.name, result.error, result.warning) else: already_seen_key = result.features['template_sequence'] if already_seen_key in already_seen: continue # Increment the hit counter, since we got features out of this hit. already_seen.add(already_seen_key) for k in template_features: template_features[k].append(result.features[k]) if already_seen: for name in template_features: template_features[name] = np.stack( template_features[name], axis=0).astype(TEMPLATE_FEATURES[name]) else: num_res = len(query_sequence) # Construct a default template with all zeros. template_features = { 'template_aatype': np.zeros( (1, num_res, len(residue_constants.restypes_with_x_and_gap)), np.float32), 'template_all_atom_masks': np.zeros( (1, num_res, residue_constants.atom_type_num), np.float32), 'template_all_atom_positions': np.zeros( (1, num_res, residue_constants.atom_type_num, 3), np.float32), 'template_domain_names': np.array([''.encode()], dtype=object), 'template_sequence': np.array([''.encode()], dtype=object), 'template_sum_probs': np.array([0], dtype=np.float32) } return TemplateSearchResult( features=template_features, errors=errors, warnings=warnings)
alphafold-main
alphafold/data/templates.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for building the input features for the AlphaFold model.""" import os from typing import Any, Mapping, MutableMapping, Optional, Sequence, Union from absl import logging from alphafold.common import residue_constants from alphafold.data import msa_identifiers from alphafold.data import parsers from alphafold.data import templates from alphafold.data.tools import hhblits from alphafold.data.tools import hhsearch from alphafold.data.tools import hmmsearch from alphafold.data.tools import jackhmmer import numpy as np # Internal import (7716). FeatureDict = MutableMapping[str, np.ndarray] TemplateSearcher = Union[hhsearch.HHSearch, hmmsearch.Hmmsearch] def make_sequence_features( sequence: str, description: str, num_res: int) -> FeatureDict: """Constructs a feature dict of sequence features.""" features = {} features['aatype'] = residue_constants.sequence_to_onehot( sequence=sequence, mapping=residue_constants.restype_order_with_x, map_unknown_to_x=True) features['between_segment_residues'] = np.zeros((num_res,), dtype=np.int32) features['domain_name'] = np.array([description.encode('utf-8')], dtype=np.object_) features['residue_index'] = np.array(range(num_res), dtype=np.int32) features['seq_length'] = np.array([num_res] * num_res, dtype=np.int32) features['sequence'] = np.array([sequence.encode('utf-8')], dtype=np.object_) return features def make_msa_features(msas: Sequence[parsers.Msa]) -> FeatureDict: """Constructs a feature dict of MSA features.""" if not msas: raise ValueError('At least one MSA must be provided.') int_msa = [] deletion_matrix = [] species_ids = [] seen_sequences = set() for msa_index, msa in enumerate(msas): if not msa: raise ValueError(f'MSA {msa_index} must contain at least one sequence.') for sequence_index, sequence in enumerate(msa.sequences): if sequence in seen_sequences: continue seen_sequences.add(sequence) int_msa.append( [residue_constants.HHBLITS_AA_TO_ID[res] for res in sequence]) deletion_matrix.append(msa.deletion_matrix[sequence_index]) identifiers = msa_identifiers.get_identifiers( msa.descriptions[sequence_index]) species_ids.append(identifiers.species_id.encode('utf-8')) num_res = len(msas[0].sequences[0]) num_alignments = len(int_msa) features = {} features['deletion_matrix_int'] = np.array(deletion_matrix, dtype=np.int32) features['msa'] = np.array(int_msa, dtype=np.int32) features['num_alignments'] = np.array( [num_alignments] * num_res, dtype=np.int32) features['msa_species_identifiers'] = np.array(species_ids, dtype=np.object_) return features def run_msa_tool(msa_runner, input_fasta_path: str, msa_out_path: str, msa_format: str, use_precomputed_msas: bool, max_sto_sequences: Optional[int] = None ) -> Mapping[str, Any]: """Runs an MSA tool, checking if output already exists first.""" if not use_precomputed_msas or not os.path.exists(msa_out_path): if msa_format == 'sto' and max_sto_sequences is not None: result = msa_runner.query(input_fasta_path, max_sto_sequences)[0] # pytype: disable=wrong-arg-count else: result = msa_runner.query(input_fasta_path)[0] with open(msa_out_path, 'w') as f: f.write(result[msa_format]) else: logging.warning('Reading MSA from file %s', msa_out_path) if msa_format == 'sto' and max_sto_sequences is not None: precomputed_msa = parsers.truncate_stockholm_msa( msa_out_path, max_sto_sequences) result = {'sto': precomputed_msa} else: with open(msa_out_path, 'r') as f: result = {msa_format: f.read()} return result class DataPipeline: """Runs the alignment tools and assembles the input features.""" def __init__(self, jackhmmer_binary_path: str, hhblits_binary_path: str, uniref90_database_path: str, mgnify_database_path: str, bfd_database_path: Optional[str], uniref30_database_path: Optional[str], small_bfd_database_path: Optional[str], template_searcher: TemplateSearcher, template_featurizer: templates.TemplateHitFeaturizer, use_small_bfd: bool, mgnify_max_hits: int = 501, uniref_max_hits: int = 10000, use_precomputed_msas: bool = False): """Initializes the data pipeline.""" self._use_small_bfd = use_small_bfd self.jackhmmer_uniref90_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=uniref90_database_path) if use_small_bfd: self.jackhmmer_small_bfd_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=small_bfd_database_path) else: self.hhblits_bfd_uniref_runner = hhblits.HHBlits( binary_path=hhblits_binary_path, databases=[bfd_database_path, uniref30_database_path]) self.jackhmmer_mgnify_runner = jackhmmer.Jackhmmer( binary_path=jackhmmer_binary_path, database_path=mgnify_database_path) self.template_searcher = template_searcher self.template_featurizer = template_featurizer self.mgnify_max_hits = mgnify_max_hits self.uniref_max_hits = uniref_max_hits self.use_precomputed_msas = use_precomputed_msas def process(self, input_fasta_path: str, msa_output_dir: str) -> FeatureDict: """Runs alignment tools on the input sequence and creates features.""" with open(input_fasta_path) as f: input_fasta_str = f.read() input_seqs, input_descs = parsers.parse_fasta(input_fasta_str) if len(input_seqs) != 1: raise ValueError( f'More than one input sequence found in {input_fasta_path}.') input_sequence = input_seqs[0] input_description = input_descs[0] num_res = len(input_sequence) uniref90_out_path = os.path.join(msa_output_dir, 'uniref90_hits.sto') jackhmmer_uniref90_result = run_msa_tool( msa_runner=self.jackhmmer_uniref90_runner, input_fasta_path=input_fasta_path, msa_out_path=uniref90_out_path, msa_format='sto', use_precomputed_msas=self.use_precomputed_msas, max_sto_sequences=self.uniref_max_hits) mgnify_out_path = os.path.join(msa_output_dir, 'mgnify_hits.sto') jackhmmer_mgnify_result = run_msa_tool( msa_runner=self.jackhmmer_mgnify_runner, input_fasta_path=input_fasta_path, msa_out_path=mgnify_out_path, msa_format='sto', use_precomputed_msas=self.use_precomputed_msas, max_sto_sequences=self.mgnify_max_hits) msa_for_templates = jackhmmer_uniref90_result['sto'] msa_for_templates = parsers.deduplicate_stockholm_msa(msa_for_templates) msa_for_templates = parsers.remove_empty_columns_from_stockholm_msa( msa_for_templates) if self.template_searcher.input_format == 'sto': pdb_templates_result = self.template_searcher.query(msa_for_templates) elif self.template_searcher.input_format == 'a3m': uniref90_msa_as_a3m = parsers.convert_stockholm_to_a3m(msa_for_templates) pdb_templates_result = self.template_searcher.query(uniref90_msa_as_a3m) else: raise ValueError('Unrecognized template input format: ' f'{self.template_searcher.input_format}') pdb_hits_out_path = os.path.join( msa_output_dir, f'pdb_hits.{self.template_searcher.output_format}') with open(pdb_hits_out_path, 'w') as f: f.write(pdb_templates_result) uniref90_msa = parsers.parse_stockholm(jackhmmer_uniref90_result['sto']) mgnify_msa = parsers.parse_stockholm(jackhmmer_mgnify_result['sto']) pdb_template_hits = self.template_searcher.get_template_hits( output_string=pdb_templates_result, input_sequence=input_sequence) if self._use_small_bfd: bfd_out_path = os.path.join(msa_output_dir, 'small_bfd_hits.sto') jackhmmer_small_bfd_result = run_msa_tool( msa_runner=self.jackhmmer_small_bfd_runner, input_fasta_path=input_fasta_path, msa_out_path=bfd_out_path, msa_format='sto', use_precomputed_msas=self.use_precomputed_msas) bfd_msa = parsers.parse_stockholm(jackhmmer_small_bfd_result['sto']) else: bfd_out_path = os.path.join(msa_output_dir, 'bfd_uniref_hits.a3m') hhblits_bfd_uniref_result = run_msa_tool( msa_runner=self.hhblits_bfd_uniref_runner, input_fasta_path=input_fasta_path, msa_out_path=bfd_out_path, msa_format='a3m', use_precomputed_msas=self.use_precomputed_msas) bfd_msa = parsers.parse_a3m(hhblits_bfd_uniref_result['a3m']) templates_result = self.template_featurizer.get_templates( query_sequence=input_sequence, hits=pdb_template_hits) sequence_features = make_sequence_features( sequence=input_sequence, description=input_description, num_res=num_res) msa_features = make_msa_features((uniref90_msa, bfd_msa, mgnify_msa)) logging.info('Uniref90 MSA size: %d sequences.', len(uniref90_msa)) logging.info('BFD MSA size: %d sequences.', len(bfd_msa)) logging.info('MGnify MSA size: %d sequences.', len(mgnify_msa)) logging.info('Final (deduplicated) MSA size: %d sequences.', msa_features['num_alignments'][0]) logging.info('Total number of templates (NB: this can include bad ' 'templates and is later filtered to top 4): %d.', templates_result.features['template_domain_names'].shape[0]) return {**sequence_features, **msa_features, **templates_result.features}
alphafold-main
alphafold/data/pipeline.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pairing logic for multimer data pipeline.""" import collections import functools import string from typing import Any, Dict, Iterable, List, Sequence from alphafold.common import residue_constants from alphafold.data import pipeline import numpy as np import pandas as pd import scipy.linalg MSA_GAP_IDX = residue_constants.restypes_with_x_and_gap.index('-') SEQUENCE_GAP_CUTOFF = 0.5 SEQUENCE_SIMILARITY_CUTOFF = 0.9 MSA_PAD_VALUES = {'msa_all_seq': MSA_GAP_IDX, 'msa_mask_all_seq': 1, 'deletion_matrix_all_seq': 0, 'deletion_matrix_int_all_seq': 0, 'msa': MSA_GAP_IDX, 'msa_mask': 1, 'deletion_matrix': 0, 'deletion_matrix_int': 0} MSA_FEATURES = ('msa', 'msa_mask', 'deletion_matrix', 'deletion_matrix_int') SEQ_FEATURES = ('residue_index', 'aatype', 'all_atom_positions', 'all_atom_mask', 'seq_mask', 'between_segment_residues', 'has_alt_locations', 'has_hetatoms', 'asym_id', 'entity_id', 'sym_id', 'entity_mask', 'deletion_mean', 'prediction_atom_mask', 'literature_positions', 'atom_indices_to_group_indices', 'rigid_group_default_frame') TEMPLATE_FEATURES = ('template_aatype', 'template_all_atom_positions', 'template_all_atom_mask') CHAIN_FEATURES = ('num_alignments', 'seq_length') def create_paired_features( chains: Iterable[pipeline.FeatureDict]) -> List[pipeline.FeatureDict]: """Returns the original chains with paired NUM_SEQ features. Args: chains: A list of feature dictionaries for each chain. Returns: A list of feature dictionaries with sequence features including only rows to be paired. """ chains = list(chains) chain_keys = chains[0].keys() if len(chains) < 2: return chains else: updated_chains = [] paired_chains_to_paired_row_indices = pair_sequences(chains) paired_rows = reorder_paired_rows( paired_chains_to_paired_row_indices) for chain_num, chain in enumerate(chains): new_chain = {k: v for k, v in chain.items() if '_all_seq' not in k} for feature_name in chain_keys: if feature_name.endswith('_all_seq'): feats_padded = pad_features(chain[feature_name], feature_name) new_chain[feature_name] = feats_padded[paired_rows[:, chain_num]] new_chain['num_alignments_all_seq'] = np.asarray( len(paired_rows[:, chain_num])) updated_chains.append(new_chain) return updated_chains def pad_features(feature: np.ndarray, feature_name: str) -> np.ndarray: """Add a 'padding' row at the end of the features list. The padding row will be selected as a 'paired' row in the case of partial alignment - for the chain that doesn't have paired alignment. Args: feature: The feature to be padded. feature_name: The name of the feature to be padded. Returns: The feature with an additional padding row. """ assert feature.dtype != np.dtype(np.string_) if feature_name in ('msa_all_seq', 'msa_mask_all_seq', 'deletion_matrix_all_seq', 'deletion_matrix_int_all_seq'): num_res = feature.shape[1] padding = MSA_PAD_VALUES[feature_name] * np.ones([1, num_res], feature.dtype) elif feature_name == 'msa_species_identifiers_all_seq': padding = [b''] else: return feature feats_padded = np.concatenate([feature, padding], axis=0) return feats_padded def _make_msa_df(chain_features: pipeline.FeatureDict) -> pd.DataFrame: """Makes dataframe with msa features needed for msa pairing.""" chain_msa = chain_features['msa_all_seq'] query_seq = chain_msa[0] per_seq_similarity = np.sum( query_seq[None] == chain_msa, axis=-1) / float(len(query_seq)) per_seq_gap = np.sum(chain_msa == 21, axis=-1) / float(len(query_seq)) msa_df = pd.DataFrame({ 'msa_species_identifiers': chain_features['msa_species_identifiers_all_seq'], 'msa_row': np.arange(len( chain_features['msa_species_identifiers_all_seq'])), 'msa_similarity': per_seq_similarity, 'gap': per_seq_gap }) return msa_df def _create_species_dict(msa_df: pd.DataFrame) -> Dict[bytes, pd.DataFrame]: """Creates mapping from species to msa dataframe of that species.""" species_lookup = {} for species, species_df in msa_df.groupby('msa_species_identifiers'): species_lookup[species] = species_df return species_lookup def _match_rows_by_sequence_similarity(this_species_msa_dfs: List[pd.DataFrame] ) -> List[List[int]]: """Finds MSA sequence pairings across chains based on sequence similarity. Each chain's MSA sequences are first sorted by their sequence similarity to their respective target sequence. The sequences are then paired, starting from the sequences most similar to their target sequence. Args: this_species_msa_dfs: a list of dataframes containing MSA features for sequences for a specific species. Returns: A list of lists, each containing M indices corresponding to paired MSA rows, where M is the number of chains. """ all_paired_msa_rows = [] num_seqs = [len(species_df) for species_df in this_species_msa_dfs if species_df is not None] take_num_seqs = np.min(num_seqs) sort_by_similarity = ( lambda x: x.sort_values('msa_similarity', axis=0, ascending=False)) for species_df in this_species_msa_dfs: if species_df is not None: species_df_sorted = sort_by_similarity(species_df) msa_rows = species_df_sorted.msa_row.iloc[:take_num_seqs].values else: msa_rows = [-1] * take_num_seqs # take the last 'padding' row all_paired_msa_rows.append(msa_rows) all_paired_msa_rows = list(np.array(all_paired_msa_rows).transpose()) return all_paired_msa_rows def pair_sequences(examples: List[pipeline.FeatureDict] ) -> Dict[int, np.ndarray]: """Returns indices for paired MSA sequences across chains.""" num_examples = len(examples) all_chain_species_dict = [] common_species = set() for chain_features in examples: msa_df = _make_msa_df(chain_features) species_dict = _create_species_dict(msa_df) all_chain_species_dict.append(species_dict) common_species.update(set(species_dict)) common_species = sorted(common_species) common_species.remove(b'') # Remove target sequence species. all_paired_msa_rows = [np.zeros(len(examples), int)] all_paired_msa_rows_dict = {k: [] for k in range(num_examples)} all_paired_msa_rows_dict[num_examples] = [np.zeros(len(examples), int)] for species in common_species: if not species: continue this_species_msa_dfs = [] species_dfs_present = 0 for species_dict in all_chain_species_dict: if species in species_dict: this_species_msa_dfs.append(species_dict[species]) species_dfs_present += 1 else: this_species_msa_dfs.append(None) # Skip species that are present in only one chain. if species_dfs_present <= 1: continue if np.any( np.array([len(species_df) for species_df in this_species_msa_dfs if isinstance(species_df, pd.DataFrame)]) > 600): continue paired_msa_rows = _match_rows_by_sequence_similarity(this_species_msa_dfs) all_paired_msa_rows.extend(paired_msa_rows) all_paired_msa_rows_dict[species_dfs_present].extend(paired_msa_rows) all_paired_msa_rows_dict = { num_examples: np.array(paired_msa_rows) for num_examples, paired_msa_rows in all_paired_msa_rows_dict.items() } return all_paired_msa_rows_dict def reorder_paired_rows(all_paired_msa_rows_dict: Dict[int, np.ndarray] ) -> np.ndarray: """Creates a list of indices of paired MSA rows across chains. Args: all_paired_msa_rows_dict: a mapping from the number of paired chains to the paired indices. Returns: a list of lists, each containing indices of paired MSA rows across chains. The paired-index lists are ordered by: 1) the number of chains in the paired alignment, i.e, all-chain pairings will come first. 2) e-values """ all_paired_msa_rows = [] for num_pairings in sorted(all_paired_msa_rows_dict, reverse=True): paired_rows = all_paired_msa_rows_dict[num_pairings] paired_rows_product = abs(np.array([np.prod(rows) for rows in paired_rows])) paired_rows_sort_index = np.argsort(paired_rows_product) all_paired_msa_rows.extend(paired_rows[paired_rows_sort_index]) return np.array(all_paired_msa_rows) def block_diag(*arrs: np.ndarray, pad_value: float = 0.0) -> np.ndarray: """Like scipy.linalg.block_diag but with an optional padding value.""" ones_arrs = [np.ones_like(x) for x in arrs] off_diag_mask = 1.0 - scipy.linalg.block_diag(*ones_arrs) diag = scipy.linalg.block_diag(*arrs) diag += (off_diag_mask * pad_value).astype(diag.dtype) return diag def _correct_post_merged_feats( np_example: pipeline.FeatureDict, np_chains_list: Sequence[pipeline.FeatureDict], pair_msa_sequences: bool) -> pipeline.FeatureDict: """Adds features that need to be computed/recomputed post merging.""" np_example['seq_length'] = np.asarray(np_example['aatype'].shape[0], dtype=np.int32) np_example['num_alignments'] = np.asarray(np_example['msa'].shape[0], dtype=np.int32) if not pair_msa_sequences: # Generate a bias that is 1 for the first row of every block in the # block diagonal MSA - i.e. make sure the cluster stack always includes # the query sequences for each chain (since the first row is the query # sequence). cluster_bias_masks = [] for chain in np_chains_list: mask = np.zeros(chain['msa'].shape[0]) mask[0] = 1 cluster_bias_masks.append(mask) np_example['cluster_bias_mask'] = np.concatenate(cluster_bias_masks) # Initialize Bert mask with masked out off diagonals. msa_masks = [np.ones(x['msa'].shape, dtype=np.float32) for x in np_chains_list] np_example['bert_mask'] = block_diag( *msa_masks, pad_value=0) else: np_example['cluster_bias_mask'] = np.zeros(np_example['msa'].shape[0]) np_example['cluster_bias_mask'][0] = 1 # Initialize Bert mask with masked out off diagonals. msa_masks = [np.ones(x['msa'].shape, dtype=np.float32) for x in np_chains_list] msa_masks_all_seq = [np.ones(x['msa_all_seq'].shape, dtype=np.float32) for x in np_chains_list] msa_mask_block_diag = block_diag( *msa_masks, pad_value=0) msa_mask_all_seq = np.concatenate(msa_masks_all_seq, axis=1) np_example['bert_mask'] = np.concatenate( [msa_mask_all_seq, msa_mask_block_diag], axis=0) return np_example def _pad_templates(chains: Sequence[pipeline.FeatureDict], max_templates: int) -> Sequence[pipeline.FeatureDict]: """For each chain pad the number of templates to a fixed size. Args: chains: A list of protein chains. max_templates: Each chain will be padded to have this many templates. Returns: The list of chains, updated to have template features padded to max_templates. """ for chain in chains: for k, v in chain.items(): if k in TEMPLATE_FEATURES: padding = np.zeros_like(v.shape) padding[0] = max_templates - v.shape[0] padding = [(0, p) for p in padding] chain[k] = np.pad(v, padding, mode='constant') return chains def _merge_features_from_multiple_chains( chains: Sequence[pipeline.FeatureDict], pair_msa_sequences: bool) -> pipeline.FeatureDict: """Merge features from multiple chains. Args: chains: A list of feature dictionaries that we want to merge. pair_msa_sequences: Whether to concatenate MSA features along the num_res dimension (if True), or to block diagonalize them (if False). Returns: A feature dictionary for the merged example. """ merged_example = {} for feature_name in chains[0]: feats = [x[feature_name] for x in chains] feature_name_split = feature_name.split('_all_seq')[0] if feature_name_split in MSA_FEATURES: if pair_msa_sequences or '_all_seq' in feature_name: merged_example[feature_name] = np.concatenate(feats, axis=1) else: merged_example[feature_name] = block_diag( *feats, pad_value=MSA_PAD_VALUES[feature_name]) elif feature_name_split in SEQ_FEATURES: merged_example[feature_name] = np.concatenate(feats, axis=0) elif feature_name_split in TEMPLATE_FEATURES: merged_example[feature_name] = np.concatenate(feats, axis=1) elif feature_name_split in CHAIN_FEATURES: merged_example[feature_name] = np.sum(x for x in feats).astype(np.int32) else: merged_example[feature_name] = feats[0] return merged_example def _merge_homomers_dense_msa( chains: Iterable[pipeline.FeatureDict]) -> Sequence[pipeline.FeatureDict]: """Merge all identical chains, making the resulting MSA dense. Args: chains: An iterable of features for each chain. Returns: A list of feature dictionaries. All features with the same entity_id will be merged - MSA features will be concatenated along the num_res dimension - making them dense. """ entity_chains = collections.defaultdict(list) for chain in chains: entity_id = chain['entity_id'][0] entity_chains[entity_id].append(chain) grouped_chains = [] for entity_id in sorted(entity_chains): chains = entity_chains[entity_id] grouped_chains.append(chains) chains = [ _merge_features_from_multiple_chains(chains, pair_msa_sequences=True) for chains in grouped_chains] return chains def _concatenate_paired_and_unpaired_features( example: pipeline.FeatureDict) -> pipeline.FeatureDict: """Merges paired and block-diagonalised features.""" features = MSA_FEATURES for feature_name in features: if feature_name in example: feat = example[feature_name] feat_all_seq = example[feature_name + '_all_seq'] merged_feat = np.concatenate([feat_all_seq, feat], axis=0) example[feature_name] = merged_feat example['num_alignments'] = np.array(example['msa'].shape[0], dtype=np.int32) return example def merge_chain_features(np_chains_list: List[pipeline.FeatureDict], pair_msa_sequences: bool, max_templates: int) -> pipeline.FeatureDict: """Merges features for multiple chains to single FeatureDict. Args: np_chains_list: List of FeatureDicts for each chain. pair_msa_sequences: Whether to merge paired MSAs. max_templates: The maximum number of templates to include. Returns: Single FeatureDict for entire complex. """ np_chains_list = _pad_templates( np_chains_list, max_templates=max_templates) np_chains_list = _merge_homomers_dense_msa(np_chains_list) # Unpaired MSA features will be always block-diagonalised; paired MSA # features will be concatenated. np_example = _merge_features_from_multiple_chains( np_chains_list, pair_msa_sequences=False) if pair_msa_sequences: np_example = _concatenate_paired_and_unpaired_features(np_example) np_example = _correct_post_merged_feats( np_example=np_example, np_chains_list=np_chains_list, pair_msa_sequences=pair_msa_sequences) return np_example def deduplicate_unpaired_sequences( np_chains: List[pipeline.FeatureDict]) -> List[pipeline.FeatureDict]: """Removes unpaired sequences which duplicate a paired sequence.""" feature_names = np_chains[0].keys() msa_features = MSA_FEATURES for chain in np_chains: # Convert the msa_all_seq numpy array to a tuple for hashing. sequence_set = set(tuple(s) for s in chain['msa_all_seq']) keep_rows = [] # Go through unpaired MSA seqs and remove any rows that correspond to the # sequences that are already present in the paired MSA. for row_num, seq in enumerate(chain['msa']): if tuple(seq) not in sequence_set: keep_rows.append(row_num) for feature_name in feature_names: if feature_name in msa_features: chain[feature_name] = chain[feature_name][keep_rows] chain['num_alignments'] = np.array(chain['msa'].shape[0], dtype=np.int32) return np_chains
alphafold-main
alphafold/data/msa_pairing.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Python wrapper for hmmsearch - search profile against a sequence db.""" import os import subprocess from typing import Optional, Sequence from absl import logging from alphafold.data import parsers from alphafold.data.tools import hmmbuild from alphafold.data.tools import utils # Internal import (7716). class Hmmsearch(object): """Python wrapper of the hmmsearch binary.""" def __init__(self, *, binary_path: str, hmmbuild_binary_path: str, database_path: str, flags: Optional[Sequence[str]] = None): """Initializes the Python hmmsearch wrapper. Args: binary_path: The path to the hmmsearch executable. hmmbuild_binary_path: The path to the hmmbuild executable. Used to build an hmm from an input a3m. database_path: The path to the hmmsearch database (FASTA format). flags: List of flags to be used by hmmsearch. Raises: RuntimeError: If hmmsearch binary not found within the path. """ self.binary_path = binary_path self.hmmbuild_runner = hmmbuild.Hmmbuild(binary_path=hmmbuild_binary_path) self.database_path = database_path if flags is None: # Default hmmsearch run settings. flags = ['--F1', '0.1', '--F2', '0.1', '--F3', '0.1', '--incE', '100', '-E', '100', '--domE', '100', '--incdomE', '100'] self.flags = flags if not os.path.exists(self.database_path): logging.error('Could not find hmmsearch database %s', database_path) raise ValueError(f'Could not find hmmsearch database {database_path}') @property def output_format(self) -> str: return 'sto' @property def input_format(self) -> str: return 'sto' def query(self, msa_sto: str) -> str: """Queries the database using hmmsearch using a given stockholm msa.""" hmm = self.hmmbuild_runner.build_profile_from_sto(msa_sto, model_construction='hand') return self.query_with_hmm(hmm) def query_with_hmm(self, hmm: str) -> str: """Queries the database using hmmsearch using a given hmm.""" with utils.tmpdir_manager() as query_tmp_dir: hmm_input_path = os.path.join(query_tmp_dir, 'query.hmm') out_path = os.path.join(query_tmp_dir, 'output.sto') with open(hmm_input_path, 'w') as f: f.write(hmm) cmd = [ self.binary_path, '--noali', # Don't include the alignment in stdout. '--cpu', '8' ] # If adding flags, we have to do so before the output and input: if self.flags: cmd.extend(self.flags) cmd.extend([ '-A', out_path, hmm_input_path, self.database_path, ]) logging.info('Launching sub-process %s', cmd) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing( f'hmmsearch ({os.path.basename(self.database_path)}) query'): stdout, stderr = process.communicate() retcode = process.wait() if retcode: raise RuntimeError( 'hmmsearch failed:\nstdout:\n%s\n\nstderr:\n%s\n' % ( stdout.decode('utf-8'), stderr.decode('utf-8'))) with open(out_path) as f: out_msa = f.read() return out_msa def get_template_hits(self, output_string: str, input_sequence: str) -> Sequence[parsers.TemplateHit]: """Gets parsed template hits from the raw string output by the tool.""" a3m_string = parsers.convert_stockholm_to_a3m(output_string, remove_first_row_gaps=False) template_hits = parsers.parse_hmmsearch_a3m( query_sequence=input_sequence, a3m_string=a3m_string, skip_first=False) return template_hits
alphafold-main
alphafold/data/tools/hmmsearch.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library to run HHblits from Python.""" import glob import os import subprocess from typing import Any, List, Mapping, Optional, Sequence from absl import logging from alphafold.data.tools import utils # Internal import (7716). _HHBLITS_DEFAULT_P = 20 _HHBLITS_DEFAULT_Z = 500 class HHBlits: """Python wrapper of the HHblits binary.""" def __init__(self, *, binary_path: str, databases: Sequence[str], n_cpu: int = 4, n_iter: int = 3, e_value: float = 0.001, maxseq: int = 1_000_000, realign_max: int = 100_000, maxfilt: int = 100_000, min_prefilter_hits: int = 1000, all_seqs: bool = False, alt: Optional[int] = None, p: int = _HHBLITS_DEFAULT_P, z: int = _HHBLITS_DEFAULT_Z): """Initializes the Python HHblits wrapper. Args: binary_path: The path to the HHblits executable. databases: A sequence of HHblits database paths. This should be the common prefix for the database files (i.e. up to but not including _hhm.ffindex etc.) n_cpu: The number of CPUs to give HHblits. n_iter: The number of HHblits iterations. e_value: The E-value, see HHblits docs for more details. maxseq: The maximum number of rows in an input alignment. Note that this parameter is only supported in HHBlits version 3.1 and higher. realign_max: Max number of HMM-HMM hits to realign. HHblits default: 500. maxfilt: Max number of hits allowed to pass the 2nd prefilter. HHblits default: 20000. min_prefilter_hits: Min number of hits to pass prefilter. HHblits default: 100. all_seqs: Return all sequences in the MSA / Do not filter the result MSA. HHblits default: False. alt: Show up to this many alternative alignments. p: Minimum Prob for a hit to be included in the output hhr file. HHblits default: 20. z: Hard cap on number of hits reported in the hhr file. HHblits default: 500. NB: The relevant HHblits flag is -Z not -z. Raises: RuntimeError: If HHblits binary not found within the path. """ self.binary_path = binary_path self.databases = databases for database_path in self.databases: if not glob.glob(database_path + '_*'): logging.error('Could not find HHBlits database %s', database_path) raise ValueError(f'Could not find HHBlits database {database_path}') self.n_cpu = n_cpu self.n_iter = n_iter self.e_value = e_value self.maxseq = maxseq self.realign_max = realign_max self.maxfilt = maxfilt self.min_prefilter_hits = min_prefilter_hits self.all_seqs = all_seqs self.alt = alt self.p = p self.z = z def query(self, input_fasta_path: str) -> List[Mapping[str, Any]]: """Queries the database using HHblits.""" with utils.tmpdir_manager() as query_tmp_dir: a3m_path = os.path.join(query_tmp_dir, 'output.a3m') db_cmd = [] for db_path in self.databases: db_cmd.append('-d') db_cmd.append(db_path) cmd = [ self.binary_path, '-i', input_fasta_path, '-cpu', str(self.n_cpu), '-oa3m', a3m_path, '-o', '/dev/null', '-n', str(self.n_iter), '-e', str(self.e_value), '-maxseq', str(self.maxseq), '-realign_max', str(self.realign_max), '-maxfilt', str(self.maxfilt), '-min_prefilter_hits', str(self.min_prefilter_hits)] if self.all_seqs: cmd += ['-all'] if self.alt: cmd += ['-alt', str(self.alt)] if self.p != _HHBLITS_DEFAULT_P: cmd += ['-p', str(self.p)] if self.z != _HHBLITS_DEFAULT_Z: cmd += ['-Z', str(self.z)] cmd += db_cmd logging.info('Launching subprocess "%s"', ' '.join(cmd)) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing('HHblits query'): stdout, stderr = process.communicate() retcode = process.wait() if retcode: # Logs have a 15k character limit, so log HHblits error line by line. logging.error('HHblits failed. HHblits stderr begin:') for error_line in stderr.decode('utf-8').splitlines(): if error_line.strip(): logging.error(error_line.strip()) logging.error('HHblits stderr end') raise RuntimeError('HHblits failed\nstdout:\n%s\n\nstderr:\n%s\n' % ( stdout.decode('utf-8'), stderr[:500_000].decode('utf-8'))) with open(a3m_path) as f: a3m = f.read() raw_output = dict( a3m=a3m, output=stdout, stderr=stderr, n_iter=self.n_iter, e_value=self.e_value) return [raw_output]
alphafold-main
alphafold/data/tools/hhblits.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python wrappers for third party tools."""
alphafold-main
alphafold/data/tools/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Python wrapper for Kalign.""" import os import subprocess from typing import Sequence from absl import logging from alphafold.data.tools import utils # Internal import (7716). def _to_a3m(sequences: Sequence[str]) -> str: """Converts sequences to an a3m file.""" names = ['sequence %d' % i for i in range(1, len(sequences) + 1)] a3m = [] for sequence, name in zip(sequences, names): a3m.append(u'>' + name + u'\n') a3m.append(sequence + u'\n') return ''.join(a3m) class Kalign: """Python wrapper of the Kalign binary.""" def __init__(self, *, binary_path: str): """Initializes the Python Kalign wrapper. Args: binary_path: The path to the Kalign binary. Raises: RuntimeError: If Kalign binary not found within the path. """ self.binary_path = binary_path def align(self, sequences: Sequence[str]) -> str: """Aligns the sequences and returns the alignment in A3M string. Args: sequences: A list of query sequence strings. The sequences have to be at least 6 residues long (Kalign requires this). Note that the order in which you give the sequences might alter the output slightly as different alignment tree might get constructed. Returns: A string with the alignment in a3m format. Raises: RuntimeError: If Kalign fails. ValueError: If any of the sequences is less than 6 residues long. """ logging.info('Aligning %d sequences', len(sequences)) for s in sequences: if len(s) < 6: raise ValueError('Kalign requires all sequences to be at least 6 ' 'residues long. Got %s (%d residues).' % (s, len(s))) with utils.tmpdir_manager() as query_tmp_dir: input_fasta_path = os.path.join(query_tmp_dir, 'input.fasta') output_a3m_path = os.path.join(query_tmp_dir, 'output.a3m') with open(input_fasta_path, 'w') as f: f.write(_to_a3m(sequences)) cmd = [ self.binary_path, '-i', input_fasta_path, '-o', output_a3m_path, '-format', 'fasta', ] logging.info('Launching subprocess "%s"', ' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing('Kalign query'): stdout, stderr = process.communicate() retcode = process.wait() logging.info('Kalign stdout:\n%s\n\nstderr:\n%s\n', stdout.decode('utf-8'), stderr.decode('utf-8')) if retcode: raise RuntimeError('Kalign failed\nstdout:\n%s\n\nstderr:\n%s\n' % (stdout.decode('utf-8'), stderr.decode('utf-8'))) with open(output_a3m_path) as f: a3m = f.read() return a3m
alphafold-main
alphafold/data/tools/kalign.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common utilities for data pipeline tools.""" import contextlib import shutil import tempfile import time from typing import Optional from absl import logging @contextlib.contextmanager def tmpdir_manager(base_dir: Optional[str] = None): """Context manager that deletes a temporary directory on exit.""" tmpdir = tempfile.mkdtemp(dir=base_dir) try: yield tmpdir finally: shutil.rmtree(tmpdir, ignore_errors=True) @contextlib.contextmanager def timing(msg: str): logging.info('Started %s', msg) tic = time.time() yield toc = time.time() logging.info('Finished %s in %.3f seconds', msg, toc - tic)
alphafold-main
alphafold/data/tools/utils.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Python wrapper for hmmbuild - construct HMM profiles from MSA.""" import os import re import subprocess from absl import logging from alphafold.data.tools import utils # Internal import (7716). class Hmmbuild(object): """Python wrapper of the hmmbuild binary.""" def __init__(self, *, binary_path: str, singlemx: bool = False): """Initializes the Python hmmbuild wrapper. Args: binary_path: The path to the hmmbuild executable. singlemx: Whether to use --singlemx flag. If True, it forces HMMBuild to just use a common substitution score matrix. Raises: RuntimeError: If hmmbuild binary not found within the path. """ self.binary_path = binary_path self.singlemx = singlemx def build_profile_from_sto(self, sto: str, model_construction='fast') -> str: """Builds a HHM for the aligned sequences given as an A3M string. Args: sto: A string with the aligned sequences in the Stockholm format. model_construction: Whether to use reference annotation in the msa to determine consensus columns ('hand') or default ('fast'). Returns: A string with the profile in the HMM format. Raises: RuntimeError: If hmmbuild fails. """ return self._build_profile(sto, model_construction=model_construction) def build_profile_from_a3m(self, a3m: str) -> str: """Builds a HHM for the aligned sequences given as an A3M string. Args: a3m: A string with the aligned sequences in the A3M format. Returns: A string with the profile in the HMM format. Raises: RuntimeError: If hmmbuild fails. """ lines = [] for line in a3m.splitlines(): if not line.startswith('>'): line = re.sub('[a-z]+', '', line) # Remove inserted residues. lines.append(line + '\n') msa = ''.join(lines) return self._build_profile(msa, model_construction='fast') def _build_profile(self, msa: str, model_construction: str = 'fast') -> str: """Builds a HMM for the aligned sequences given as an MSA string. Args: msa: A string with the aligned sequences, in A3M or STO format. model_construction: Whether to use reference annotation in the msa to determine consensus columns ('hand') or default ('fast'). Returns: A string with the profile in the HMM format. Raises: RuntimeError: If hmmbuild fails. ValueError: If unspecified arguments are provided. """ if model_construction not in {'hand', 'fast'}: raise ValueError(f'Invalid model_construction {model_construction} - only' 'hand and fast supported.') with utils.tmpdir_manager() as query_tmp_dir: input_query = os.path.join(query_tmp_dir, 'query.msa') output_hmm_path = os.path.join(query_tmp_dir, 'output.hmm') with open(input_query, 'w') as f: f.write(msa) cmd = [self.binary_path] # If adding flags, we have to do so before the output and input: if model_construction == 'hand': cmd.append(f'--{model_construction}') if self.singlemx: cmd.append('--singlemx') cmd.extend([ '--amino', output_hmm_path, input_query, ]) logging.info('Launching subprocess %s', cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing('hmmbuild query'): stdout, stderr = process.communicate() retcode = process.wait() logging.info('hmmbuild stdout:\n%s\n\nstderr:\n%s\n', stdout.decode('utf-8'), stderr.decode('utf-8')) if retcode: raise RuntimeError('hmmbuild failed\nstdout:\n%s\n\nstderr:\n%s\n' % (stdout.decode('utf-8'), stderr.decode('utf-8'))) with open(output_hmm_path, encoding='utf-8') as f: hmm = f.read() return hmm
alphafold-main
alphafold/data/tools/hmmbuild.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library to run Jackhmmer from Python.""" from concurrent import futures import glob import os import subprocess from typing import Any, Callable, Mapping, Optional, Sequence from urllib import request from absl import logging from alphafold.data import parsers from alphafold.data.tools import utils # Internal import (7716). class Jackhmmer: """Python wrapper of the Jackhmmer binary.""" def __init__(self, *, binary_path: str, database_path: str, n_cpu: int = 8, n_iter: int = 1, e_value: float = 0.0001, z_value: Optional[int] = None, get_tblout: bool = False, filter_f1: float = 0.0005, filter_f2: float = 0.00005, filter_f3: float = 0.0000005, incdom_e: Optional[float] = None, dom_e: Optional[float] = None, num_streamed_chunks: Optional[int] = None, streaming_callback: Optional[Callable[[int], None]] = None): """Initializes the Python Jackhmmer wrapper. Args: binary_path: The path to the jackhmmer executable. database_path: The path to the jackhmmer database (FASTA format). n_cpu: The number of CPUs to give Jackhmmer. n_iter: The number of Jackhmmer iterations. e_value: The E-value, see Jackhmmer docs for more details. z_value: The Z-value, see Jackhmmer docs for more details. get_tblout: Whether to save tblout string. filter_f1: MSV and biased composition pre-filter, set to >1.0 to turn off. filter_f2: Viterbi pre-filter, set to >1.0 to turn off. filter_f3: Forward pre-filter, set to >1.0 to turn off. incdom_e: Domain e-value criteria for inclusion of domains in MSA/next round. dom_e: Domain e-value criteria for inclusion in tblout. num_streamed_chunks: Number of database chunks to stream over. streaming_callback: Callback function run after each chunk iteration with the iteration number as argument. """ self.binary_path = binary_path self.database_path = database_path self.num_streamed_chunks = num_streamed_chunks if not os.path.exists(self.database_path) and num_streamed_chunks is None: logging.error('Could not find Jackhmmer database %s', database_path) raise ValueError(f'Could not find Jackhmmer database {database_path}') self.n_cpu = n_cpu self.n_iter = n_iter self.e_value = e_value self.z_value = z_value self.filter_f1 = filter_f1 self.filter_f2 = filter_f2 self.filter_f3 = filter_f3 self.incdom_e = incdom_e self.dom_e = dom_e self.get_tblout = get_tblout self.streaming_callback = streaming_callback def _query_chunk(self, input_fasta_path: str, database_path: str, max_sequences: Optional[int] = None) -> Mapping[str, Any]: """Queries the database chunk using Jackhmmer.""" with utils.tmpdir_manager() as query_tmp_dir: sto_path = os.path.join(query_tmp_dir, 'output.sto') # The F1/F2/F3 are the expected proportion to pass each of the filtering # stages (which get progressively more expensive), reducing these # speeds up the pipeline at the expensive of sensitivity. They are # currently set very low to make querying Mgnify run in a reasonable # amount of time. cmd_flags = [ # Don't pollute stdout with Jackhmmer output. '-o', '/dev/null', '-A', sto_path, '--noali', '--F1', str(self.filter_f1), '--F2', str(self.filter_f2), '--F3', str(self.filter_f3), '--incE', str(self.e_value), # Report only sequences with E-values <= x in per-sequence output. '-E', str(self.e_value), '--cpu', str(self.n_cpu), '-N', str(self.n_iter) ] if self.get_tblout: tblout_path = os.path.join(query_tmp_dir, 'tblout.txt') cmd_flags.extend(['--tblout', tblout_path]) if self.z_value: cmd_flags.extend(['-Z', str(self.z_value)]) if self.dom_e is not None: cmd_flags.extend(['--domE', str(self.dom_e)]) if self.incdom_e is not None: cmd_flags.extend(['--incdomE', str(self.incdom_e)]) cmd = [self.binary_path] + cmd_flags + [input_fasta_path, database_path] logging.info('Launching subprocess "%s"', ' '.join(cmd)) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing( f'Jackhmmer ({os.path.basename(database_path)}) query'): _, stderr = process.communicate() retcode = process.wait() if retcode: raise RuntimeError( 'Jackhmmer failed\nstderr:\n%s\n' % stderr.decode('utf-8')) # Get e-values for each target name tbl = '' if self.get_tblout: with open(tblout_path) as f: tbl = f.read() if max_sequences is None: with open(sto_path) as f: sto = f.read() else: sto = parsers.truncate_stockholm_msa(sto_path, max_sequences) raw_output = dict( sto=sto, tbl=tbl, stderr=stderr, n_iter=self.n_iter, e_value=self.e_value) return raw_output def query(self, input_fasta_path: str, max_sequences: Optional[int] = None) -> Sequence[Mapping[str, Any]]: """Queries the database using Jackhmmer.""" return self.query_multiple([input_fasta_path], max_sequences)[0] def query_multiple( self, input_fasta_paths: Sequence[str], max_sequences: Optional[int] = None, ) -> Sequence[Sequence[Mapping[str, Any]]]: """Queries the database for multiple queries using Jackhmmer.""" if self.num_streamed_chunks is None: single_chunk_results = [] for input_fasta_path in input_fasta_paths: single_chunk_results.append([self._query_chunk( input_fasta_path, self.database_path, max_sequences)]) return single_chunk_results db_basename = os.path.basename(self.database_path) db_remote_chunk = lambda db_idx: f'{self.database_path}.{db_idx}' db_local_chunk = lambda db_idx: f'/tmp/ramdisk/{db_basename}.{db_idx}' # Remove existing files to prevent OOM for f in glob.glob(db_local_chunk('[0-9]*')): try: os.remove(f) except OSError: print(f'OSError while deleting {f}') # Download the (i+1)-th chunk while Jackhmmer is running on the i-th chunk with futures.ThreadPoolExecutor(max_workers=2) as executor: chunked_outputs = [[] for _ in range(len(input_fasta_paths))] for i in range(1, self.num_streamed_chunks + 1): # Copy the chunk locally if i == 1: future = executor.submit( request.urlretrieve, db_remote_chunk(i), db_local_chunk(i)) if i < self.num_streamed_chunks: next_future = executor.submit( request.urlretrieve, db_remote_chunk(i+1), db_local_chunk(i+1)) # Run Jackhmmer with the chunk future.result() for fasta_index, input_fasta_path in enumerate(input_fasta_paths): chunked_outputs[fasta_index].append(self._query_chunk( input_fasta_path, db_local_chunk(i), max_sequences)) # Remove the local copy of the chunk os.remove(db_local_chunk(i)) # Do not set next_future for the last chunk so that this works even for # databases with only 1 chunk. if i < self.num_streamed_chunks: future = next_future if self.streaming_callback: self.streaming_callback(i) return chunked_outputs
alphafold-main
alphafold/data/tools/jackhmmer.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library to run HHsearch from Python.""" import glob import os import subprocess from typing import Sequence from absl import logging from alphafold.data import parsers from alphafold.data.tools import utils # Internal import (7716). class HHSearch: """Python wrapper of the HHsearch binary.""" def __init__(self, *, binary_path: str, databases: Sequence[str], maxseq: int = 1_000_000): """Initializes the Python HHsearch wrapper. Args: binary_path: The path to the HHsearch executable. databases: A sequence of HHsearch database paths. This should be the common prefix for the database files (i.e. up to but not including _hhm.ffindex etc.) maxseq: The maximum number of rows in an input alignment. Note that this parameter is only supported in HHBlits version 3.1 and higher. Raises: RuntimeError: If HHsearch binary not found within the path. """ self.binary_path = binary_path self.databases = databases self.maxseq = maxseq for database_path in self.databases: if not glob.glob(database_path + '_*'): logging.error('Could not find HHsearch database %s', database_path) raise ValueError(f'Could not find HHsearch database {database_path}') @property def output_format(self) -> str: return 'hhr' @property def input_format(self) -> str: return 'a3m' def query(self, a3m: str) -> str: """Queries the database using HHsearch using a given a3m.""" with utils.tmpdir_manager() as query_tmp_dir: input_path = os.path.join(query_tmp_dir, 'query.a3m') hhr_path = os.path.join(query_tmp_dir, 'output.hhr') with open(input_path, 'w') as f: f.write(a3m) db_cmd = [] for db_path in self.databases: db_cmd.append('-d') db_cmd.append(db_path) cmd = [self.binary_path, '-i', input_path, '-o', hhr_path, '-maxseq', str(self.maxseq) ] + db_cmd logging.info('Launching subprocess "%s"', ' '.join(cmd)) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with utils.timing('HHsearch query'): stdout, stderr = process.communicate() retcode = process.wait() if retcode: # Stderr is truncated to prevent proto size errors in Beam. raise RuntimeError( 'HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n' % ( stdout.decode('utf-8'), stderr[:100_000].decode('utf-8'))) with open(hhr_path) as f: hhr = f.read() return hhr def get_template_hits(self, output_string: str, input_sequence: str) -> Sequence[parsers.TemplateHit]: """Gets parsed template hits from the raw string output by the tool.""" del input_sequence # Used by hmmseach but not needed for hhsearch. return parsers.parse_hhr(output_string)
alphafold-main
alphafold/data/tools/hhsearch.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AlphaFold Colab notebook."""
alphafold-main
alphafold/notebooks/__init__.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for notebook_utils.""" import io from absl.testing import absltest from absl.testing import parameterized from alphafold.data import parsers from alphafold.data import templates from alphafold.notebooks import notebook_utils import mock import numpy as np ONLY_QUERY_HIT = { 'sto': ( '# STOCKHOLM 1.0\n' '#=GF ID query-i1\n' 'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEH\n' '//\n'), 'tbl': '', 'stderr': b'', 'n_iter': 1, 'e_value': 0.0001} # pylint: disable=line-too-long MULTI_SEQUENCE_HIT_1 = { 'sto': ( '# STOCKHOLM 1.0\n' '#=GF ID query-i1\n' '#=GS ERR1700680_4602609/41-109 DE [subseq from] ERR1700680_4602609\n' '#=GS ERR1019366_5760491/40-105 DE [subseq from] ERR1019366_5760491\n' '#=GS SRR5580704_12853319/61-125 DE [subseq from] SRR5580704_12853319\n' 'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH\n' 'ERR1700680_4602609/41-109 --INKGAEYHKKAAEHHELAAKHHREAAKHHEAGSHEKAAHHSEIAAGHGLTAVHHTEEATK-HHPEEHTEK--\n' 'ERR1019366_5760491/40-105 ---RSGAQHHDAAAQHYEEAARHHRMAAKQYQASHHEKAAHYAQLAYAHHMYAEQHAAEAAK-AHAKNHG----\n' 'SRR5580704_12853319/61-125 ----PAADHHMKAAEHHEEAAKHHRAAAEHHTAGDHQKAGHHAHVANGHHVNAVHHAEEASK-HHATDHS----\n' '//\n'), 'tbl': ( 'ERR1700680_4602609 - query - 7.7e-09 47.7 33.8 1.1e-08 47.2 33.8 1.2 1 0 0 1 1 1 1 -\n' 'ERR1019366_5760491 - query - 1.7e-08 46.6 33.1 2.5e-08 46.1 33.1 1.3 1 0 0 1 1 1 1 -\n' 'SRR5580704_12853319 - query - 1.1e-07 44.0 41.6 2e-07 43.1 41.6 1.4 1 0 0 1 1 1 1 -\n'), 'stderr': b'', 'n_iter': 1, 'e_value': 0.0001} MULTI_SEQUENCE_HIT_2 = { 'sto': ( '# STOCKHOLM 1.0\n' '#=GF ID query-i1\n' '#=GS ERR1700719_3476944/70-137 DE [subseq from] ERR1700719_3476944\n' '#=GS ERR1700761_4254522/72-138 DE [subseq from] ERR1700761_4254522\n' '#=GS SRR5438477_9761204/64-132 DE [subseq from] SRR5438477_9761204\n' 'query MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAPKPH\n' 'ERR1700719_3476944/70-137 ---KQAAEHHHQAAEHHEHAARHHREAAKHHEAGDHESAAHHAHTAQGHLHQATHHASEAAKLHVEHHGQK--\n' 'ERR1700761_4254522/72-138 ----QASEHHNLAAEHHEHAARHHRDAAKHHKAGDHEKAAHHAHVAHGHHLHATHHATEAAKHHVEAHGEK--\n' 'SRR5438477_9761204/64-132 MPKHEGAEHHKKAAEHNEHAARHHKEAARHHEEGSHEKVGHHAHIAHGHHLHATHHAEEAAKTHSNQHE----\n' '//\n'), 'tbl': ( 'ERR1700719_3476944 - query - 2e-07 43.2 47.5 3.5e-07 42.4 47.5 1.4 1 0 0 1 1 1 1 -\n' 'ERR1700761_4254522 - query - 6.1e-07 41.6 48.1 8.1e-07 41.3 48.1 1.2 1 0 0 1 1 1 1 -\n' 'SRR5438477_9761204 - query - 1.8e-06 40.2 46.9 2.3e-06 39.8 46.9 1.2 1 0 0 1 1 1 1 -\n'), 'stderr': b'', 'n_iter': 1, 'e_value': 0.0001} # pylint: enable=line-too-long class NotebookUtilsTest(parameterized.TestCase): @parameterized.parameters( ('DeepMind', 'DEEPMIND'), ('A ', 'A'), ('\tA', 'A'), (' A\t\n', 'A'), ('ACDEFGHIKLMNPQRSTVWY', 'ACDEFGHIKLMNPQRSTVWY')) def test_clean_and_validate_sequence_ok(self, sequence, exp_clean): clean = notebook_utils.clean_and_validate_single_sequence( sequence, min_length=1, max_length=100) self.assertEqual(clean, exp_clean) @parameterized.named_parameters( ('too_short', 'AA', 'too short'), ('too_long', 'AAAAAAAAAA', 'too long'), ('bad_amino_acids_B', 'BBBB', 'non-amino acid'), ('bad_amino_acids_J', 'JJJJ', 'non-amino acid'), ('bad_amino_acids_O', 'OOOO', 'non-amino acid'), ('bad_amino_acids_U', 'UUUU', 'non-amino acid'), ('bad_amino_acids_X', 'XXXX', 'non-amino acid'), ('bad_amino_acids_Z', 'ZZZZ', 'non-amino acid')) def test_clean_and_validate_sequence_bad(self, sequence, exp_error): with self.assertRaisesRegex(ValueError, f'.*{exp_error}.*'): notebook_utils.clean_and_validate_single_sequence( sequence, min_length=4, max_length=8) @parameterized.parameters( (['A', '', '', ' ', '\t', ' \t\n', '', ''], ['A']), (['', 'A'], ['A']), (['A', 'C ', ''], ['A', 'C']), (['', 'A', '', 'C '], ['A', 'C'])) def test_validate_input_ok(self, input_sequences, exp_sequences): sequences = notebook_utils.clean_and_validate_input_sequences( input_sequences=input_sequences, min_sequence_length=1, max_sequence_length=100) self.assertSequenceEqual(sequences, exp_sequences) @parameterized.named_parameters( ('no_input_sequence', ['', '\t', '\n'], 'No input amino acid sequence'), ('too_long_single', ['AAAAAAAAA', 'AAAA'], 'Input sequence is too long'), ('too_short_single', ['AAA', 'AAAA'], 'Input sequence is too short')) def test_validate_input_bad(self, input_sequences, exp_error): with self.assertRaisesRegex(ValueError, f'.*{exp_error}.*'): notebook_utils.clean_and_validate_input_sequences( input_sequences=input_sequences, min_sequence_length=4, max_sequence_length=8) def test_merge_chunked_msa_no_hits(self): results = [ONLY_QUERY_HIT, ONLY_QUERY_HIT] merged_msa = notebook_utils.merge_chunked_msa( results=results) self.assertSequenceEqual( merged_msa.sequences, ('MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEH',)) self.assertSequenceEqual(merged_msa.deletion_matrix, ([0] * 56,)) def test_merge_chunked_msa(self): results = [MULTI_SEQUENCE_HIT_1, MULTI_SEQUENCE_HIT_2] merged_msa = notebook_utils.merge_chunked_msa( results=results) self.assertLen(merged_msa.sequences, 7) # The 1st one is the query. self.assertEqual( merged_msa.sequences[0], 'MAAHKGAEHHHKAAEHHEQAAKHHHAAAEHHEKGEHEQAAHHADTAYAHHKHAEEHAAQAAKHDAEHHAP' 'KPH') # The 2nd one is the one with the lowest e-value: ERR1700680_4602609. self.assertEqual( merged_msa.sequences[1], '--INKGAEYHKKAAEHHELAAKHHREAAKHHEAGSHEKAAHHSEIAAGHGLTAVHHTEEATK-HHPEEHT' 'EK-') # The last one is the one with the largest e-value: SRR5438477_9761204. self.assertEqual( merged_msa.sequences[-1], 'MPKHEGAEHHKKAAEHNEHAARHHKEAARHHEEGSHEKVGHHAHIAHGHHLHATHHAEEAAKTHSNQHE-' '---') self.assertLen(merged_msa.deletion_matrix, 7) @mock.patch('sys.stdout', new_callable=io.StringIO) def test_show_msa_info(self, mocked_stdout): single_chain_msas = [ parsers.Msa(sequences=['A', 'B', 'C', 'C'], deletion_matrix=[None] * 4, descriptions=[''] * 4), parsers.Msa(sequences=['A', 'A', 'A', 'D'], deletion_matrix=[None] * 4, descriptions=[''] * 4) ] notebook_utils.show_msa_info( single_chain_msas=single_chain_msas, sequence_index=1) self.assertEqual(mocked_stdout.getvalue(), '\n4 unique sequences found in total for sequence 1\n\n') @parameterized.named_parameters( ('some_templates', 4), ('no_templates', 0)) def test_empty_placeholder_template_features(self, num_templates): template_features = notebook_utils.empty_placeholder_template_features( num_templates=num_templates, num_res=16) self.assertCountEqual(template_features.keys(), templates.TEMPLATE_FEATURES.keys()) self.assertSameElements( [v.shape[0] for v in template_features.values()], [num_templates]) self.assertSequenceEqual( [t.dtype for t in template_features.values()], [np.array([], dtype=templates.TEMPLATE_FEATURES[feat_name]).dtype for feat_name in template_features]) def test_check_cell_execution_order_correct(self): notebook_utils.check_cell_execution_order({1, 2}, 3) @parameterized.named_parameters( ('One missing', 4, {1, 2}, '3'), ('Two missing', 5, {1, 2}, '3, 4'), ) def test_check_cell_execution_order_missing( self, cell_num, cells_ran, cells_missing): with self.assertRaisesRegex(ValueError, f'.+{cells_missing}'): notebook_utils.check_cell_execution_order(cells_ran, cell_num) if __name__ == '__main__': absltest.main()
alphafold-main
alphafold/notebooks/notebook_utils_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper methods for the AlphaFold Colab notebook.""" from typing import AbstractSet, Any, Mapping, Optional, Sequence from alphafold.common import residue_constants from alphafold.data import parsers from matplotlib import pyplot as plt import numpy as np def clean_and_validate_single_sequence( input_sequence: str, min_length: int, max_length: int) -> str: """Checks that the input sequence is ok and returns a clean version of it.""" # Remove all whitespaces, tabs and end lines; upper-case. clean_sequence = input_sequence.translate( str.maketrans('', '', ' \n\t')).upper() aatypes = set(residue_constants.restypes) # 20 standard aatypes. if not set(clean_sequence).issubset(aatypes): raise ValueError( f'Input sequence contains non-amino acid letters: ' f'{set(clean_sequence) - aatypes}. AlphaFold only supports 20 standard ' 'amino acids as inputs.') if len(clean_sequence) < min_length: raise ValueError( f'Input sequence is too short: {len(clean_sequence)} amino acids, ' f'while the minimum is {min_length}') if len(clean_sequence) > max_length: raise ValueError( f'Input sequence is too long: {len(clean_sequence)} amino acids, while ' f'the maximum is {max_length}. You may be able to run it with the full ' f'AlphaFold system depending on your resources (system memory, ' f'GPU memory).') return clean_sequence def clean_and_validate_input_sequences( input_sequences: Sequence[str], min_sequence_length: int, max_sequence_length: int) -> Sequence[str]: """Validates and cleans input sequences.""" sequences = [] for input_sequence in input_sequences: if input_sequence.strip(): input_sequence = clean_and_validate_single_sequence( input_sequence=input_sequence, min_length=min_sequence_length, max_length=max_sequence_length) sequences.append(input_sequence) if sequences: return sequences else: raise ValueError('No input amino acid sequence provided, please provide at ' 'least one sequence.') def merge_chunked_msa( results: Sequence[Mapping[str, Any]], max_hits: Optional[int] = None ) -> parsers.Msa: """Merges chunked database hits together into hits for the full database.""" unsorted_results = [] for chunk_index, chunk in enumerate(results): msa = parsers.parse_stockholm(chunk['sto']) e_values_dict = parsers.parse_e_values_from_tblout(chunk['tbl']) # Jackhmmer lists sequences as <sequence name>/<residue from>-<residue to>. e_values = [e_values_dict[t.partition('/')[0]] for t in msa.descriptions] chunk_results = zip( msa.sequences, msa.deletion_matrix, msa.descriptions, e_values) if chunk_index != 0: next(chunk_results) # Only take query (first hit) from the first chunk. unsorted_results.extend(chunk_results) sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[-1]) merged_sequences, merged_deletion_matrix, merged_descriptions, _ = zip( *sorted_by_evalue) merged_msa = parsers.Msa(sequences=merged_sequences, deletion_matrix=merged_deletion_matrix, descriptions=merged_descriptions) if max_hits is not None: merged_msa = merged_msa.truncate(max_seqs=max_hits) return merged_msa def show_msa_info( single_chain_msas: Sequence[parsers.Msa], sequence_index: int): """Prints info and shows a plot of the deduplicated single chain MSA.""" full_single_chain_msa = [] for single_chain_msa in single_chain_msas: full_single_chain_msa.extend(single_chain_msa.sequences) # Deduplicate but preserve order (hence can't use set). deduped_full_single_chain_msa = list(dict.fromkeys(full_single_chain_msa)) total_msa_size = len(deduped_full_single_chain_msa) print(f'\n{total_msa_size} unique sequences found in total for sequence ' f'{sequence_index}\n') aa_map = {res: i for i, res in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')} msa_arr = np.array( [[aa_map[aa] for aa in seq] for seq in deduped_full_single_chain_msa]) plt.figure(figsize=(12, 3)) plt.title(f'Per-Residue Count of Non-Gap Amino Acids in the MSA for Sequence ' f'{sequence_index}') plt.plot(np.sum(msa_arr != aa_map['-'], axis=0), color='black') plt.ylabel('Non-Gap Count') plt.yticks(range(0, total_msa_size + 1, max(1, int(total_msa_size / 3)))) plt.show() def empty_placeholder_template_features( num_templates: int, num_res: int) -> Mapping[str, np.ndarray]: return { 'template_aatype': np.zeros( (num_templates, num_res, len(residue_constants.restypes_with_x_and_gap)), dtype=np.float32), 'template_all_atom_masks': np.zeros( (num_templates, num_res, residue_constants.atom_type_num), dtype=np.float32), 'template_all_atom_positions': np.zeros( (num_templates, num_res, residue_constants.atom_type_num, 3), dtype=np.float32), 'template_domain_names': np.zeros([num_templates], dtype=object), 'template_sequence': np.zeros([num_templates], dtype=object), 'template_sum_probs': np.zeros([num_templates], dtype=np.float32), } def check_cell_execution_order( cells_ran: AbstractSet[int], cell_number: int) -> None: """Check that the cell execution order is correct. Args: cells_ran: Set of cell numbers that have been executed. cell_number: The number of the cell that this check is called in. Raises: If <1:cell_number> cells haven't been executed, raise error. """ previous_cells = set(range(1, cell_number)) cells_not_ran = previous_cells - cells_ran if cells_not_ran != set(): cells_not_ran_str = ', '.join([str(x) for x in sorted(cells_not_ran)]) raise ValueError( f'You did not execute the cells: {cells_not_ran_str}. Your Colab ' 'runtime may have died during execution. Please restart the runtime ' 'and run from the first cell!')
alphafold-main
alphafold/notebooks/notebook_utils.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Install script for setuptools.""" import os from setuptools import find_namespace_packages from setuptools import setup _CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) def _get_version(): with open('clrs/__init__.py') as fp: for line in fp: if line.startswith('__version__') and '=' in line: version = line[line.find('=') + 1:].strip(' \'"\n') if version: return version raise ValueError('`__version__` not defined in `clrs/__init__.py`') def _parse_requirements(path): with open(os.path.join(_CURRENT_DIR, path)) as f: return [ line.rstrip() for line in f if not (line.isspace() or line.startswith('#')) ] setup( name='dm-clrs', version=_get_version(), url='https://github.com/deepmind/clrs', license='Apache 2.0', author='DeepMind', description=('The CLRS Algorithmic Reasoning Benchmark.'), long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(), long_description_content_type='text/markdown', author_email='clrs-dev@google.com', keywords='python machine learning', packages=find_namespace_packages(exclude=['*_test.py']), install_requires=_parse_requirements( os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')), tests_require=_parse_requirements( os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')), zip_safe=False, # Required for full installation. python_requires='>=3.6', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
clrs-master
setup.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The CLRS Algorithmic Reasoning Benchmark.""" from clrs._src.baselines import BaselineModel from clrs._src.baselines import BaselineModelChunked from clrs._src.nets import Net from clrs._src.nets import NetChunked from clrs._src.processors import GAT from clrs._src.processors import MPNN __all__ = ( "BaselineModel", "BaselineModelChunked", "GAT", "MPNN", "Net", "NetChunked", )
clrs-master
clrs/models.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The CLRS Algorithmic Reasoning Benchmark.""" from clrs import models from clrs._src import algorithms from clrs._src import decoders from clrs._src import processors from clrs._src.dataset import chunkify from clrs._src.dataset import CLRSDataset from clrs._src.dataset import create_chunked_dataset from clrs._src.dataset import create_dataset from clrs._src.dataset import get_clrs_folder from clrs._src.dataset import get_dataset_gcp_url from clrs._src.evaluation import evaluate from clrs._src.evaluation import evaluate_hints from clrs._src.model import Model from clrs._src.probing import DataPoint from clrs._src.probing import predecessor_to_cyclic_predecessor_and_first from clrs._src.processors import get_processor_factory from clrs._src.samplers import build_sampler from clrs._src.samplers import CLRS30 from clrs._src.samplers import Features from clrs._src.samplers import Feedback from clrs._src.samplers import process_permutations from clrs._src.samplers import process_pred_as_input from clrs._src.samplers import process_random_pos from clrs._src.samplers import Sampler from clrs._src.samplers import Trajectory from clrs._src.specs import ALGO_IDX_INPUT_NAME from clrs._src.specs import CLRS_30_ALGS_SETTINGS from clrs._src.specs import Location from clrs._src.specs import OutputClass from clrs._src.specs import Spec from clrs._src.specs import SPECS from clrs._src.specs import Stage from clrs._src.specs import Type __version__ = "1.0.0" __all__ = ( "ALGO_IDX_INPUT_NAME", "build_sampler", "chunkify", "CLRS30", "CLRS_30_ALGS_SETTINGS", "create_chunked_dataset", "create_dataset", "get_clrs_folder", "get_dataset_gcp_url", "get_processor_factory", "DataPoint", "predecessor_to_cyclic_predecessor_and_first", "process_permutations", "process_pred_as_input", "process_random_pos", "evaluate", "evaluate_hints", "Features", "Feedback", "Location", "Model", "Sampler", "Spec", "SPECS", "Stage", "Trajectory", "Type", )
clrs-master
clrs/__init__.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Import test for CLRS.""" from absl.testing import absltest import clrs class ClrsTest(absltest.TestCase): """Test CLRS can be imported correctly.""" def test_import(self): self.assertTrue(hasattr(clrs, 'Model')) if __name__ == '__main__': absltest.main()
clrs-master
clrs/clrs_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Algorithm specs. The "spec" of each algorithm is a static set of `(stage, loc, type)`-tuples. - `stage`: One of either an `input`, `output` or `hint` - `location`: Each datum is associated with either the `node`, `edge` or `graph` - `type`: Either a `scalar`, `categorical`, `mask`, `mask_one` or `pointer` The dataflow for an algorithm is represented by `(stage, loc, type, data)` "probes" that are valid under that algorithm's spec. It contains a single snapshot for each `input` and `output` and a time-series of intermediate algorithmic states (`hint`). At minimum, each node contains a `pos` probe that serves as a unique index e.g. for representing sequential data where appropriate """ import types from typing import Dict, Tuple class Stage: INPUT = 'input' OUTPUT = 'output' HINT = 'hint' class Location: NODE = 'node' EDGE = 'edge' GRAPH = 'graph' class Type: SCALAR = 'scalar' CATEGORICAL = 'categorical' MASK = 'mask' MASK_ONE = 'mask_one' POINTER = 'pointer' SHOULD_BE_PERMUTATION = 'should_be_permutation' PERMUTATION_POINTER = 'permutation_pointer' SOFT_POINTER = 'soft_pointer' class OutputClass: POSITIVE = 1 NEGATIVE = 0 MASKED = -1 Spec = Dict[str, Tuple[str, str, str]] CLRS_30_ALGS = [ 'articulation_points', 'activity_selector', 'bellman_ford', 'bfs', 'binary_search', 'bridges', 'bubble_sort', 'dag_shortest_paths', 'dfs', 'dijkstra', 'find_maximum_subarray_kadane', 'floyd_warshall', 'graham_scan', 'heapsort', 'insertion_sort', 'jarvis_march', 'kmp_matcher', 'lcs_length', 'matrix_chain_order', 'minimum', 'mst_kruskal', 'mst_prim', 'naive_string_matcher', 'optimal_bst', 'quickselect', 'quicksort', 'segments_intersect', 'strongly_connected_components', 'task_scheduling', 'topological_sort', ] ALGO_IDX_INPUT_NAME = 'algo_idx' # Algorithms have varying numbers of signals they are evaluated on. # To compensate for that, we issue more samples for those who use a small # number of signals. CLRS_30_ALGS_SETTINGS = {alg: {'num_samples_multiplier': 1} for alg in CLRS_30_ALGS} CLRS_30_ALGS_SETTINGS['find_maximum_subarray_kadane'][ 'num_samples_multiplier'] = 32 for alg in ['quickselect', 'minimum', 'binary_search', 'naive_string_matcher', 'kmp_matcher', 'segments_intersect']: CLRS_30_ALGS_SETTINGS[alg]['num_samples_multiplier'] = 64 SPECS = types.MappingProxyType({ 'insertion_sort': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'pred': (Stage.OUTPUT, Location.NODE, Type.SHOULD_BE_PERMUTATION), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'bubble_sort': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'pred': (Stage.OUTPUT, Location.NODE, Type.SHOULD_BE_PERMUTATION), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'heapsort': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'pred': (Stage.OUTPUT, Location.NODE, Type.SHOULD_BE_PERMUTATION), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'parent': (Stage.HINT, Location.NODE, Type.POINTER), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'largest': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'heap_size': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL) }, 'quicksort': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'pred': (Stage.OUTPUT, Location.NODE, Type.SHOULD_BE_PERMUTATION), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'p': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'r': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'quickselect': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'median': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'p': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'r': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i_rank': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'target': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'minimum': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'min': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'min_h': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'binary_search': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'target': (Stage.INPUT, Location.GRAPH, Type.SCALAR), 'return': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'find_maximum_subarray': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'left_low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'left_high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'left_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'right_low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'right_high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'right_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'cross_low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'cross_high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'cross_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'ret_low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'ret_high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'ret_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'left_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'right_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL) }, 'find_maximum_subarray_kadane': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.SCALAR), 'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'best_low': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'best_high': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'best_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'matrix_chain_order': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'p': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.OUTPUT, Location.EDGE, Type.POINTER), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'm': (Stage.HINT, Location.EDGE, Type.SCALAR), 's_h': (Stage.HINT, Location.EDGE, Type.POINTER), 'msk': (Stage.HINT, Location.EDGE, Type.MASK) }, 'lcs_length': { 'string': (Stage.INPUT, Location.NODE, Type.MASK), 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL), 'b': (Stage.OUTPUT, Location.EDGE, Type.CATEGORICAL), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'b_h': (Stage.HINT, Location.EDGE, Type.CATEGORICAL), 'c': (Stage.HINT, Location.EDGE, Type.SCALAR) }, 'optimal_bst': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'p': (Stage.INPUT, Location.NODE, Type.SCALAR), 'q': (Stage.INPUT, Location.NODE, Type.SCALAR), 'root': (Stage.OUTPUT, Location.EDGE, Type.POINTER), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'root_h': (Stage.HINT, Location.EDGE, Type.POINTER), 'e': (Stage.HINT, Location.EDGE, Type.SCALAR), 'w': (Stage.HINT, Location.EDGE, Type.SCALAR), 'msk': (Stage.HINT, Location.EDGE, Type.MASK) }, 'activity_selector': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.SCALAR), 'f': (Stage.INPUT, Location.NODE, Type.SCALAR), 'selected': (Stage.OUTPUT, Location.NODE, Type.MASK), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'selected_h': (Stage.HINT, Location.NODE, Type.MASK), 'm': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'k': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'task_scheduling': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'd': (Stage.INPUT, Location.NODE, Type.SCALAR), 'w': (Stage.INPUT, Location.NODE, Type.SCALAR), 'selected': (Stage.OUTPUT, Location.NODE, Type.MASK), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'selected_h': (Stage.HINT, Location.NODE, Type.MASK), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 't': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'dfs': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'f': (Stage.HINT, Location.NODE, Type.SCALAR), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'time': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'topological_sort': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'topo': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'topo_head': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'topo_h': (Stage.HINT, Location.NODE, Type.POINTER), 'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'strongly_connected_components': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'scc_id': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'scc_id_h': (Stage.HINT, Location.NODE, Type.POINTER), 'A_t': (Stage.HINT, Location.EDGE, Type.MASK), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'f': (Stage.HINT, Location.NODE, Type.SCALAR), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'time': (Stage.HINT, Location.GRAPH, Type.SCALAR), 'phase': (Stage.HINT, Location.GRAPH, Type.MASK) }, 'articulation_points': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'is_cut': (Stage.OUTPUT, Location.NODE, Type.MASK), 'is_cut_h': (Stage.HINT, Location.NODE, Type.MASK), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'f': (Stage.HINT, Location.NODE, Type.SCALAR), 'low': (Stage.HINT, Location.NODE, Type.SCALAR), 'child_cnt': (Stage.HINT, Location.NODE, Type.SCALAR), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'time': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'bridges': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'is_bridge': (Stage.OUTPUT, Location.EDGE, Type.MASK), 'is_bridge_h': (Stage.HINT, Location.EDGE, Type.MASK), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'f': (Stage.HINT, Location.NODE, Type.SCALAR), 'low': (Stage.HINT, Location.NODE, Type.SCALAR), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'time': (Stage.HINT, Location.GRAPH, Type.SCALAR) }, 'bfs': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'reach_h': (Stage.HINT, Location.NODE, Type.MASK), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER) }, 'mst_kruskal': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'in_mst': (Stage.OUTPUT, Location.EDGE, Type.MASK), 'in_mst_h': (Stage.HINT, Location.EDGE, Type.MASK), 'pi': (Stage.HINT, Location.NODE, Type.POINTER), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'root_u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'root_v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'mask_u': (Stage.HINT, Location.NODE, Type.MASK), 'mask_v': (Stage.HINT, Location.NODE, Type.MASK), 'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL) }, 'mst_prim': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'key': (Stage.HINT, Location.NODE, Type.SCALAR), 'mark': (Stage.HINT, Location.NODE, Type.MASK), 'in_queue': (Stage.HINT, Location.NODE, Type.MASK), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'bellman_ford': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'msk': (Stage.HINT, Location.NODE, Type.MASK) }, 'dag_shortest_paths': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'mark': (Stage.HINT, Location.NODE, Type.MASK), 'topo_h': (Stage.HINT, Location.NODE, Type.POINTER), 'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL), 's_prev': (Stage.HINT, Location.NODE, Type.POINTER), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'v': (Stage.HINT, Location.NODE, Type.MASK_ONE), 's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.MASK) }, 'dijkstra': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER), 'pi_h': (Stage.HINT, Location.NODE, Type.POINTER), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'mark': (Stage.HINT, Location.NODE, Type.MASK), 'in_queue': (Stage.HINT, Location.NODE, Type.MASK), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'floyd_warshall': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 'Pi': (Stage.OUTPUT, Location.EDGE, Type.POINTER), 'Pi_h': (Stage.HINT, Location.EDGE, Type.POINTER), 'D': (Stage.HINT, Location.EDGE, Type.SCALAR), 'msk': (Stage.HINT, Location.EDGE, Type.MASK), 'k': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'bipartite_matching': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'A': (Stage.INPUT, Location.EDGE, Type.SCALAR), 'adj': (Stage.INPUT, Location.EDGE, Type.MASK), 's': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 't': (Stage.INPUT, Location.NODE, Type.MASK_ONE), 'in_matching': (Stage.OUTPUT, Location.EDGE, Type.MASK), 'in_matching_h': (Stage.HINT, Location.EDGE, Type.MASK), 'A_h': (Stage.HINT, Location.EDGE, Type.SCALAR), 'adj_h': (Stage.HINT, Location.EDGE, Type.MASK), 'd': (Stage.HINT, Location.NODE, Type.SCALAR), 'msk': (Stage.HINT, Location.NODE, Type.MASK), 'pi': (Stage.HINT, Location.NODE, Type.POINTER), 'u': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.MASK) }, 'naive_string_matcher': { 'string': (Stage.INPUT, Location.NODE, Type.MASK), 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL), 'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE) }, 'kmp_matcher': { 'string': (Stage.INPUT, Location.NODE, Type.MASK), 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL), 'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'pi': (Stage.HINT, Location.NODE, Type.POINTER), 'is_reset': (Stage.HINT, Location.NODE, Type.MASK), 'k': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'k_reset': (Stage.HINT, Location.GRAPH, Type.MASK), 'q': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'q_reset': (Stage.HINT, Location.GRAPH, Type.MASK), 's': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.MASK) }, 'segments_intersect': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'x': (Stage.INPUT, Location.NODE, Type.SCALAR), 'y': (Stage.INPUT, Location.NODE, Type.SCALAR), 'intersect': (Stage.OUTPUT, Location.GRAPH, Type.MASK), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'j': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'k': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'dir': (Stage.HINT, Location.NODE, Type.SCALAR), 'on_seg': (Stage.HINT, Location.NODE, Type.MASK) }, 'graham_scan': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'x': (Stage.INPUT, Location.NODE, Type.SCALAR), 'y': (Stage.INPUT, Location.NODE, Type.SCALAR), 'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK), 'best': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'atans': (Stage.HINT, Location.NODE, Type.SCALAR), 'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK), 'stack_prev': (Stage.HINT, Location.NODE, Type.POINTER), 'last_stack': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL) }, 'jarvis_march': { 'pos': (Stage.INPUT, Location.NODE, Type.SCALAR), 'x': (Stage.INPUT, Location.NODE, Type.SCALAR), 'y': (Stage.INPUT, Location.NODE, Type.SCALAR), 'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK), 'pred_h': (Stage.HINT, Location.NODE, Type.POINTER), 'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK), 'best': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'last_point': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'endpoint': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'i': (Stage.HINT, Location.NODE, Type.MASK_ONE), 'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL) } })
clrs-master
clrs/_src/specs.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """JAX implementation of CLRS baseline models.""" import functools import os import pickle from typing import Dict, List, Optional, Tuple, Union import chex from clrs._src import decoders from clrs._src import losses from clrs._src import model from clrs._src import nets from clrs._src import probing from clrs._src import processors from clrs._src import samplers from clrs._src import specs import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax _Array = chex.Array _DataPoint = probing.DataPoint _Features = samplers.Features _FeaturesChunked = samplers.FeaturesChunked _Feedback = samplers.Feedback _Location = specs.Location _Seed = jnp.integer _Spec = specs.Spec _Stage = specs.Stage _Trajectory = samplers.Trajectory _Type = specs.Type _OutputClass = specs.OutputClass # pytype: disable=signature-mismatch def _maybe_pick_first_pmapped(tree): if jax.local_device_count() == 1: return tree return jax.tree_util.tree_map(lambda x: x[0], tree) @jax.jit def _restack_from_pmap(tree): """Stack the results of a pmapped computation across the first two axes.""" restack_array = lambda x: jnp.reshape(x, (-1,) + x.shape[2:]) return jax.tree_util.tree_map(restack_array, tree) def _maybe_restack_from_pmap(tree): if jax.local_device_count() == 1: return tree return _restack_from_pmap(tree) @functools.partial(jax.jit, static_argnums=[1, 2]) def _pmap_reshape(x, n_devices, split_axis=0): """Splits a pytree over n_devices on axis split_axis for pmapping.""" def _reshape(arr): new_shape = (arr.shape[:split_axis] + (n_devices, arr.shape[split_axis] // n_devices) + arr.shape[split_axis + 1:]) return jnp.moveaxis(jnp.reshape(arr, new_shape), split_axis, 0) return jax.tree_util.tree_map(_reshape, x) def _maybe_pmap_reshape(x, split_axis=0): n_devices = jax.local_device_count() if n_devices == 1: return x return _pmap_reshape(x, n_devices, split_axis) @functools.partial(jax.jit, static_argnums=1) def _pmap_data(data: Union[_Feedback, _Features], n_devices: int): """Replicate/split feedback or features for pmapping.""" if isinstance(data, _Feedback): features = data.features else: features = data pmap_data = features._replace( inputs=_pmap_reshape(features.inputs, n_devices), hints=_pmap_reshape(features.hints, n_devices, split_axis=1), lengths=_pmap_reshape(features.lengths, n_devices), ) if isinstance(data, _Feedback): pmap_data = data._replace( features=pmap_data, outputs=_pmap_reshape(data.outputs, n_devices) ) return pmap_data def _maybe_pmap_data(data: Union[_Feedback, _Features]): n_devices = jax.local_device_count() if n_devices == 1: return data return _pmap_data(data, n_devices) def _maybe_put_replicated(tree): if jax.local_device_count() == 1: return jax.device_put(tree) else: return jax.device_put_replicated(tree, jax.local_devices()) def _maybe_pmap_rng_key(rng_key: _Array): n_devices = jax.local_device_count() if n_devices == 1: return rng_key pmap_rng_keys = jax.random.split(rng_key, n_devices) return jax.device_put_sharded(list(pmap_rng_keys), jax.local_devices()) class BaselineModel(model.Model): """Model implementation with selectable message passing algorithm.""" def __init__( self, spec: Union[_Spec, List[_Spec]], dummy_trajectory: Union[List[_Feedback], _Feedback], processor_factory: processors.ProcessorFactory, hidden_dim: int = 32, encode_hints: bool = False, decode_hints: bool = True, encoder_init: str = 'default', use_lstm: bool = False, learning_rate: float = 0.005, grad_clip_max_norm: float = 0.0, checkpoint_path: str = '/tmp/clrs3', freeze_processor: bool = False, dropout_prob: float = 0.0, hint_teacher_forcing: float = 0.0, hint_repred_mode: str = 'soft', name: str = 'base_model', nb_msg_passing_steps: int = 1, ): """Constructor for BaselineModel. The model consists of encoders, processor and decoders. It can train and evaluate either a single algorithm or a set of algorithms; in the latter case, a single processor is shared among all the algorithms, while the encoders and decoders are separate for each algorithm. Args: spec: Either a single spec for one algorithm, or a list of specs for multiple algorithms to be trained and evaluated. dummy_trajectory: Either a single feedback batch, in the single-algorithm case, or a list of feedback batches, in the multi-algorithm case, that comply with the `spec` (or list of specs), to initialize network size. processor_factory: A callable that takes an `out_size` parameter and returns a processor (see `processors.py`). hidden_dim: Size of the hidden state of the model, i.e., size of the message-passing vectors. encode_hints: Whether to provide hints as model inputs. decode_hints: Whether to provide hints as model outputs. encoder_init: The initialiser type to use for the encoders. use_lstm: Whether to insert an LSTM after message passing. learning_rate: Learning rate for training. grad_clip_max_norm: if greater than 0, the maximum norm of the gradients. checkpoint_path: Path for loading/saving checkpoints. freeze_processor: If True, the processor weights will be frozen and only encoders and decoders (and, if used, the lstm) will be trained. dropout_prob: Dropout rate in the message-passing stage. hint_teacher_forcing: Probability of using ground-truth hints instead of predicted hints as inputs during training (only relevant if `encode_hints`=True) hint_repred_mode: How to process predicted hints when fed back as inputs. Only meaningful when `encode_hints` and `decode_hints` are True. Options are: - 'soft', where we use softmaxes for categoricals, pointers and mask_one, and sigmoids for masks. This will allow gradients to flow through hints during training. - 'hard', where we use argmax instead of softmax, and hard thresholding of masks. No gradients will go through the hints during training; even for scalar hints, which don't have any kind of post-processing, gradients will be stopped. - 'hard_on_eval', which is soft for training and hard for evaluation. name: Model name. nb_msg_passing_steps: Number of message passing steps per hint. Raises: ValueError: if `encode_hints=True` and `decode_hints=False`. """ super(BaselineModel, self).__init__(spec=spec) if encode_hints and not decode_hints: raise ValueError('`encode_hints=True`, `decode_hints=False` is invalid.') assert hint_repred_mode in ['soft', 'hard', 'hard_on_eval'] self.decode_hints = decode_hints self.checkpoint_path = checkpoint_path self.name = name self._freeze_processor = freeze_processor if grad_clip_max_norm != 0.0: optax_chain = [optax.clip_by_global_norm(grad_clip_max_norm), optax.scale_by_adam(), optax.scale(-learning_rate)] self.opt = optax.chain(*optax_chain) else: self.opt = optax.adam(learning_rate) self.nb_msg_passing_steps = nb_msg_passing_steps self.nb_dims = [] if isinstance(dummy_trajectory, _Feedback): assert len(self._spec) == 1 dummy_trajectory = [dummy_trajectory] for traj in dummy_trajectory: nb_dims = {} for inp in traj.features.inputs: nb_dims[inp.name] = inp.data.shape[-1] for hint in traj.features.hints: nb_dims[hint.name] = hint.data.shape[-1] for outp in traj.outputs: nb_dims[outp.name] = outp.data.shape[-1] self.nb_dims.append(nb_dims) self._create_net_fns(hidden_dim, encode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode) self._device_params = None self._device_opt_state = None self.opt_state_skeleton = None def _create_net_fns(self, hidden_dim, encode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode): def _use_net(*args, **kwargs): return nets.Net(self._spec, hidden_dim, encode_hints, self.decode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode, self.nb_dims, self.nb_msg_passing_steps)(*args, **kwargs) self.net_fn = hk.transform(_use_net) pmap_args = dict(axis_name='batch', devices=jax.local_devices()) n_devices = jax.local_device_count() func, static_arg, extra_args = ( (jax.jit, 'static_argnums', {}) if n_devices == 1 else (jax.pmap, 'static_broadcasted_argnums', pmap_args)) pmean = functools.partial(jax.lax.pmean, axis_name='batch') self._maybe_pmean = pmean if n_devices > 1 else lambda x: x extra_args[static_arg] = 3 self.jitted_grad = func(self._compute_grad, **extra_args) extra_args[static_arg] = 4 self.jitted_feedback = func(self._feedback, donate_argnums=[0, 3], **extra_args) extra_args[static_arg] = [3, 4, 5] self.jitted_predict = func(self._predict, **extra_args) extra_args[static_arg] = [3, 4] self.jitted_accum_opt_update = func(accum_opt_update, donate_argnums=[0, 2], **extra_args) def init(self, features: Union[_Features, List[_Features]], seed: _Seed): if not isinstance(features, list): assert len(self._spec) == 1 features = [features] self.params = self.net_fn.init(jax.random.PRNGKey(seed), features, True, # pytype: disable=wrong-arg-types # jax-ndarray algorithm_index=-1, return_hints=False, return_all_outputs=False) self.opt_state = self.opt.init(self.params) # We will use the optimizer state skeleton for traversal when we # want to avoid updating the state of params of untrained algorithms. self.opt_state_skeleton = self.opt.init(jnp.zeros(1)) @property def params(self): if self._device_params is None: return None return jax.device_get(_maybe_pick_first_pmapped(self._device_params)) @params.setter def params(self, params): self._device_params = _maybe_put_replicated(params) @property def opt_state(self): if self._device_opt_state is None: return None return jax.device_get(_maybe_pick_first_pmapped(self._device_opt_state)) @opt_state.setter def opt_state(self, opt_state): self._device_opt_state = _maybe_put_replicated(opt_state) def _compute_grad(self, params, rng_key, feedback, algorithm_index): lss, grads = jax.value_and_grad(self._loss)( params, rng_key, feedback, algorithm_index) return self._maybe_pmean(lss), self._maybe_pmean(grads) def _feedback(self, params, rng_key, feedback, opt_state, algorithm_index): lss, grads = jax.value_and_grad(self._loss)( params, rng_key, feedback, algorithm_index) grads = self._maybe_pmean(grads) params, opt_state = self._update_params(params, grads, opt_state, algorithm_index) lss = self._maybe_pmean(lss) return lss, params, opt_state def _predict(self, params, rng_key: hk.PRNGSequence, features: _Features, algorithm_index: int, return_hints: bool, return_all_outputs: bool): outs, hint_preds = self.net_fn.apply( params, rng_key, [features], repred=True, algorithm_index=algorithm_index, return_hints=return_hints, return_all_outputs=return_all_outputs) outs = decoders.postprocess(self._spec[algorithm_index], outs, sinkhorn_temperature=0.1, sinkhorn_steps=50, hard=True, ) return outs, hint_preds def compute_grad( self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index: Optional[int] = None, ) -> Tuple[float, _Array]: """Compute gradients.""" if algorithm_index is None: assert len(self._spec) == 1 algorithm_index = 0 assert algorithm_index >= 0 # Calculate gradients. rng_keys = _maybe_pmap_rng_key(rng_key) # pytype: disable=wrong-arg-types # numpy-scalars feedback = _maybe_pmap_data(feedback) loss, grads = self.jitted_grad( self._device_params, rng_keys, feedback, algorithm_index) loss = _maybe_pick_first_pmapped(loss) grads = _maybe_pick_first_pmapped(grads) return loss, grads def feedback(self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index=None) -> float: if algorithm_index is None: assert len(self._spec) == 1 algorithm_index = 0 # Calculate and apply gradients. rng_keys = _maybe_pmap_rng_key(rng_key) # pytype: disable=wrong-arg-types # numpy-scalars feedback = _maybe_pmap_data(feedback) loss, self._device_params, self._device_opt_state = self.jitted_feedback( self._device_params, rng_keys, feedback, self._device_opt_state, algorithm_index) loss = _maybe_pick_first_pmapped(loss) return loss def predict(self, rng_key: hk.PRNGSequence, features: _Features, algorithm_index: Optional[int] = None, return_hints: bool = False, return_all_outputs: bool = False): """Model inference step.""" if algorithm_index is None: assert len(self._spec) == 1 algorithm_index = 0 rng_keys = _maybe_pmap_rng_key(rng_key) # pytype: disable=wrong-arg-types # numpy-scalars features = _maybe_pmap_data(features) return _maybe_restack_from_pmap( self.jitted_predict( self._device_params, rng_keys, features, algorithm_index, return_hints, return_all_outputs)) def _loss(self, params, rng_key, feedback, algorithm_index): """Calculates model loss f(feedback; params).""" output_preds, hint_preds = self.net_fn.apply( params, rng_key, [feedback.features], repred=False, algorithm_index=algorithm_index, return_hints=True, return_all_outputs=False) nb_nodes = _nb_nodes(feedback, is_chunked=False) lengths = feedback.features.lengths total_loss = 0.0 # Calculate output loss. for truth in feedback.outputs: total_loss += losses.output_loss( truth=truth, pred=output_preds[truth.name], nb_nodes=nb_nodes, ) # Optionally accumulate hint losses. if self.decode_hints: for truth in feedback.features.hints: total_loss += losses.hint_loss( truth=truth, preds=[x[truth.name] for x in hint_preds], lengths=lengths, nb_nodes=nb_nodes, ) return total_loss def _update_params(self, params, grads, opt_state, algorithm_index): updates, opt_state = filter_null_grads( grads, self.opt, opt_state, self.opt_state_skeleton, algorithm_index) if self._freeze_processor: params_subset = _filter_out_processor(params) updates_subset = _filter_out_processor(updates) assert len(params) > len(params_subset) assert params_subset new_params = optax.apply_updates(params_subset, updates_subset) new_params = hk.data_structures.merge(params, new_params) else: new_params = optax.apply_updates(params, updates) return new_params, opt_state def update_model_params_accum(self, grads) -> None: grads = _maybe_put_replicated(grads) self._device_params, self._device_opt_state = self.jitted_accum_opt_update( self._device_params, grads, self._device_opt_state, self.opt, self._freeze_processor) def verbose_loss(self, feedback: _Feedback, extra_info) -> Dict[str, _Array]: """Gets verbose loss information.""" hint_preds = extra_info nb_nodes = _nb_nodes(feedback, is_chunked=False) lengths = feedback.features.lengths losses_ = {} # Optionally accumulate hint losses. if self.decode_hints: for truth in feedback.features.hints: losses_.update( losses.hint_loss( truth=truth, preds=[x[truth.name] for x in hint_preds], lengths=lengths, nb_nodes=nb_nodes, verbose=True, )) return losses_ def restore_model(self, file_name: str, only_load_processor: bool = False): """Restore model from `file_name`.""" path = os.path.join(self.checkpoint_path, file_name) with open(path, 'rb') as f: restored_state = pickle.load(f) if only_load_processor: restored_params = _filter_in_processor(restored_state['params']) else: restored_params = restored_state['params'] self.params = hk.data_structures.merge(self.params, restored_params) self.opt_state = restored_state['opt_state'] def save_model(self, file_name: str): """Save model (processor weights only) to `file_name`.""" os.makedirs(self.checkpoint_path, exist_ok=True) to_save = {'params': self.params, 'opt_state': self.opt_state} path = os.path.join(self.checkpoint_path, file_name) with open(path, 'wb') as f: pickle.dump(to_save, f) class BaselineModelChunked(BaselineModel): """Model that processes time-chunked data. Unlike `BaselineModel`, which processes full samples, `BaselineModelChunked` processes fixed-timelength chunks of data. Each tensor of inputs and hints has dimensions chunk_length x batch_size x ... The beginning of a new sample withing the chunk is signalled by a tensor called `is_first` of dimensions chunk_length x batch_size. The chunked model is intended for training. For validation and test, use `BaselineModel`. """ mp_states: List[List[nets.MessagePassingStateChunked]] init_mp_states: List[List[nets.MessagePassingStateChunked]] def _create_net_fns(self, hidden_dim, encode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode): def _use_net(*args, **kwargs): return nets.NetChunked( self._spec, hidden_dim, encode_hints, self.decode_hints, processor_factory, use_lstm, encoder_init, dropout_prob, hint_teacher_forcing, hint_repred_mode, self.nb_dims, self.nb_msg_passing_steps)(*args, **kwargs) self.net_fn = hk.transform(_use_net) pmap_args = dict(axis_name='batch', devices=jax.local_devices()) n_devices = jax.local_device_count() func, static_arg, extra_args = ( (jax.jit, 'static_argnums', {}) if n_devices == 1 else (jax.pmap, 'static_broadcasted_argnums', pmap_args)) pmean = functools.partial(jax.lax.pmean, axis_name='batch') self._maybe_pmean = pmean if n_devices > 1 else lambda x: x extra_args[static_arg] = 4 self.jitted_grad = func(self._compute_grad, **extra_args) extra_args[static_arg] = 5 self.jitted_feedback = func(self._feedback, donate_argnums=[0, 4], **extra_args) extra_args[static_arg] = [3, 4] self.jitted_accum_opt_update = func(accum_opt_update, donate_argnums=[0, 2], **extra_args) def _init_mp_state(self, features_list: List[List[_FeaturesChunked]], rng_key: _Array): def _empty_mp_state(): return nets.MessagePassingStateChunked( # pytype: disable=wrong-arg-types # numpy-scalars inputs=None, hints=None, is_first=None, hint_preds=None, hiddens=None, lstm_state=None) empty_mp_states = [[_empty_mp_state() for _ in f] for f in features_list] dummy_params = [self.net_fn.init(rng_key, f, e, False, init_mp_state=True, algorithm_index=-1) for (f, e) in zip(features_list, empty_mp_states)] mp_states = [ self.net_fn.apply(d, rng_key, f, e, False, init_mp_state=True, algorithm_index=-1)[1] for (d, f, e) in zip(dummy_params, features_list, empty_mp_states)] return mp_states def init(self, features: List[List[_FeaturesChunked]], seed: _Seed): self.mp_states = self._init_mp_state(features, jax.random.PRNGKey(seed)) # pytype: disable=wrong-arg-types # jax-ndarray self.init_mp_states = [list(x) for x in self.mp_states] self.params = self.net_fn.init( jax.random.PRNGKey(seed), features[0], self.mp_states[0], # pytype: disable=wrong-arg-types # jax-ndarray True, init_mp_state=False, algorithm_index=-1) self.opt_state = self.opt.init(self.params) # We will use the optimizer state skeleton for traversal when we # want to avoid updating the state of params of untrained algorithms. self.opt_state_skeleton = self.opt.init(jnp.zeros(1)) def predict(self, rng_key: hk.PRNGSequence, features: _FeaturesChunked, algorithm_index: Optional[int] = None): """Inference not implemented. Chunked model intended for training only.""" raise NotImplementedError def _loss(self, params, rng_key, feedback, mp_state, algorithm_index): (output_preds, hint_preds), mp_state = self.net_fn.apply( params, rng_key, [feedback.features], [mp_state], repred=False, init_mp_state=False, algorithm_index=algorithm_index) nb_nodes = _nb_nodes(feedback, is_chunked=True) total_loss = 0.0 is_first = feedback.features.is_first is_last = feedback.features.is_last # Calculate output loss. for truth in feedback.outputs: total_loss += losses.output_loss_chunked( truth=truth, pred=output_preds[truth.name], is_last=is_last, nb_nodes=nb_nodes, ) # Optionally accumulate hint losses. if self.decode_hints: for truth in feedback.features.hints: loss = losses.hint_loss_chunked( truth=truth, pred=hint_preds[truth.name], is_first=is_first, nb_nodes=nb_nodes, ) total_loss += loss return total_loss, (mp_state,) def _compute_grad(self, params, rng_key, feedback, mp_state, algorithm_index): (lss, (mp_state,)), grads = jax.value_and_grad(self._loss, has_aux=True)( params, rng_key, feedback, mp_state, algorithm_index) return self._maybe_pmean(lss), mp_state, self._maybe_pmean(grads) def _feedback(self, params, rng_key, feedback, mp_state, opt_state, algorithm_index): (lss, (mp_state,)), grads = jax.value_and_grad(self._loss, has_aux=True)( params, rng_key, feedback, mp_state, algorithm_index) grads = self._maybe_pmean(grads) params, opt_state = self._update_params(params, grads, opt_state, algorithm_index) lss = self._maybe_pmean(lss) return lss, params, opt_state, mp_state def compute_grad( self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index: Optional[Tuple[int, int]] = None, ) -> Tuple[float, _Array]: """Compute gradients.""" if algorithm_index is None: assert len(self._spec) == 1 algorithm_index = (0, 0) length_index, algorithm_index = algorithm_index # Reusing init_mp_state improves performance. # The next, commented out line, should be used for proper state keeping. # mp_state = self.mp_states[length_index][algorithm_index] mp_state = self.init_mp_states[length_index][algorithm_index] rng_keys = _maybe_pmap_rng_key(rng_key) # pytype: disable=wrong-arg-types # numpy-scalars feedback = _maybe_pmap_reshape(feedback, split_axis=1) mp_state = _maybe_pmap_reshape(mp_state, split_axis=0) loss, mp_state, grads = self.jitted_grad( self._device_params, rng_keys, feedback, mp_state, algorithm_index) loss = _maybe_pick_first_pmapped(loss) grads = _maybe_pick_first_pmapped(grads) mp_state = _maybe_restack_from_pmap(mp_state) self.mp_states[length_index][algorithm_index] = mp_state return loss, grads def feedback(self, rng_key: hk.PRNGSequence, feedback: _Feedback, algorithm_index=None) -> float: if algorithm_index is None: assert len(self._spec) == 1 algorithm_index = (0, 0) length_index, algorithm_index = algorithm_index # Reusing init_mp_state improves performance. # The next, commented out line, should be used for proper state keeping. # mp_state = self.mp_states[length_index][algorithm_index] mp_state = self.init_mp_states[length_index][algorithm_index] rng_keys = _maybe_pmap_rng_key(rng_key) # pytype: disable=wrong-arg-types # numpy-scalars feedback = _maybe_pmap_reshape(feedback, split_axis=1) mp_state = _maybe_pmap_reshape(mp_state, split_axis=0) loss, self._device_params, self._device_opt_state, mp_state = ( self.jitted_feedback( self._device_params, rng_keys, feedback, mp_state, self._device_opt_state, algorithm_index)) loss = _maybe_pick_first_pmapped(loss) mp_state = _maybe_restack_from_pmap(mp_state) self.mp_states[length_index][algorithm_index] = mp_state return loss def verbose_loss(self, *args, **kwargs): raise NotImplementedError def _nb_nodes(feedback: _Feedback, is_chunked) -> int: for inp in feedback.features.inputs: if inp.location in [_Location.NODE, _Location.EDGE]: if is_chunked: return inp.data.shape[2] # inputs are time x batch x nodes x ... else: return inp.data.shape[1] # inputs are batch x nodes x ... assert False def _param_in_processor(module_name): return processors.PROCESSOR_TAG in module_name def _filter_out_processor(params: hk.Params) -> hk.Params: return hk.data_structures.filter( lambda module_name, n, v: not _param_in_processor(module_name), params) def _filter_in_processor(params: hk.Params) -> hk.Params: return hk.data_structures.filter( lambda module_name, n, v: _param_in_processor(module_name), params) def _is_not_done_broadcast(lengths, i, tensor): is_not_done = (lengths > i + 1) * 1.0 while len(is_not_done.shape) < len(tensor.shape): is_not_done = jnp.expand_dims(is_not_done, -1) return is_not_done def accum_opt_update(params, grads, opt_state, opt, freeze_processor): """Update params from gradients collected from several algorithms.""" # Average the gradients over all algos grads = jax.tree_util.tree_map( lambda *x: sum(x) / (sum([jnp.any(k) for k in x]) + 1e-12), *grads) updates, opt_state = opt.update(grads, opt_state) if freeze_processor: params_subset = _filter_out_processor(params) assert len(params) > len(params_subset) assert params_subset updates_subset = _filter_out_processor(updates) new_params = optax.apply_updates(params_subset, updates_subset) new_params = hk.data_structures.merge(params, new_params) else: new_params = optax.apply_updates(params, updates) return new_params, opt_state @functools.partial(jax.jit, static_argnames=['opt']) def opt_update(opt, flat_grads, flat_opt_state): return opt.update(flat_grads, flat_opt_state) def filter_null_grads(grads, opt, opt_state, opt_state_skeleton, algo_idx): """Compute updates ignoring params that have no gradients. This prevents untrained params (e.g., encoders/decoders for algorithms that are not being trained) to accumulate, e.g., momentum from spurious zero gradients. Note: this works as intended for "per-parameter" optimizer state, such as momentum. However, when the optimizer has some global state (such as the step counts in Adam), the global state will be updated every time, affecting also future updates of parameters that had null gradients in the current step. Args: grads: Gradients for all parameters. opt: Optax optimizer. opt_state: Optimizer state. opt_state_skeleton: A "skeleton" of optimizer state that has been initialized with scalar parameters. This serves to traverse each parameter of the otpimizer state during the opt state update. algo_idx: Index of algorithm, to filter out unused encoders/decoders. If None, no filtering happens. Returns: Updates and new optimizer state, where the parameters with null gradient have not been taken into account. """ def _keep_in_algo(k, v): """Ignore params of encoders/decoders irrelevant for this algo.""" # Note: in shared pointer decoder modes, we should exclude shared params # for algos that do not have pointer outputs. if ((processors.PROCESSOR_TAG in k) or (f'algo_{algo_idx}_' in k)): return v return jax.tree_util.tree_map(lambda x: None, v) if algo_idx is None: masked_grads = grads else: masked_grads = {k: _keep_in_algo(k, v) for k, v in grads.items()} flat_grads, treedef = jax.tree_util.tree_flatten(masked_grads) flat_opt_state = jax.tree_util.tree_map( lambda _, x: x # pylint:disable=g-long-lambda if isinstance(x, (np.ndarray, jax.Array)) else treedef.flatten_up_to(x), opt_state_skeleton, opt_state, ) # Compute updates only for the params with gradient. flat_updates, flat_opt_state = opt_update(opt, flat_grads, flat_opt_state) def unflatten(flat, original): """Restore tree structure, filling missing (None) leaves with original.""" if isinstance(flat, (np.ndarray, jax.Array)): return flat return jax.tree_util.tree_map(lambda x, y: x if y is None else y, original, treedef.unflatten(flat)) # Restore the state and updates tree structure. new_opt_state = jax.tree_util.tree_map(lambda _, x, y: unflatten(x, y), opt_state_skeleton, flat_opt_state, opt_state) updates = unflatten(flat_updates, jax.tree_util.tree_map(lambda x: 0., grads)) return updates, new_opt_state
clrs-master
clrs/_src/baselines.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """decoders utilities.""" import functools from typing import Dict, Optional import chex from clrs._src import probing from clrs._src import specs import haiku as hk import jax import jax.numpy as jnp _Array = chex.Array _DataPoint = probing.DataPoint _Location = specs.Location _Spec = specs.Spec _Stage = specs.Stage _Type = specs.Type def log_sinkhorn(x: _Array, steps: int, temperature: float, zero_diagonal: bool, noise_rng_key: Optional[_Array]) -> _Array: """Sinkhorn operator in log space, to postprocess permutation pointer logits. Args: x: input of shape [..., n, n], a batch of square matrices. steps: number of iterations. temperature: temperature parameter (as temperature approaches zero, the output approaches a permutation matrix). zero_diagonal: whether to force the diagonal logits towards -inf. noise_rng_key: key to add Gumbel noise. Returns: Elementwise logarithm of a doubly-stochastic matrix (a matrix with non-negative elements whose rows and columns sum to 1). """ assert x.ndim >= 2 assert x.shape[-1] == x.shape[-2] if noise_rng_key is not None: # Add standard Gumbel noise (see https://arxiv.org/abs/1802.08665) noise = -jnp.log(-jnp.log(jax.random.uniform(noise_rng_key, x.shape) + 1e-12) + 1e-12) x = x + noise x /= temperature if zero_diagonal: x = x - 1e6 * jnp.eye(x.shape[-1]) for _ in range(steps): x = jax.nn.log_softmax(x, axis=-1) x = jax.nn.log_softmax(x, axis=-2) return x def construct_decoders(loc: str, t: str, hidden_dim: int, nb_dims: int, name: str): """Constructs decoders.""" linear = functools.partial(hk.Linear, name=f"{name}_dec_linear") if loc == _Location.NODE: # Node decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1),) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims),) elif t in [_Type.POINTER, _Type.PERMUTATION_POINTER]: decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1)) else: raise ValueError(f"Invalid Type {t}") elif loc == _Location.EDGE: # Edge decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1), linear(1), linear(1)) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims), linear(nb_dims), linear(nb_dims)) elif t == _Type.POINTER: decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1)) else: raise ValueError(f"Invalid Type {t}") elif loc == _Location.GRAPH: # Graph decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1), linear(1)) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims), linear(nb_dims)) elif t == _Type.POINTER: decoders = (linear(1), linear(1), linear(1)) else: raise ValueError(f"Invalid Type {t}") else: raise ValueError(f"Invalid Location {loc}") return decoders def construct_diff_decoders(name: str): """Constructs diff decoders.""" linear = functools.partial(hk.Linear, name=f"{name}_diffdec_linear") decoders = {} decoders[_Location.NODE] = linear(1) decoders[_Location.EDGE] = (linear(1), linear(1), linear(1)) decoders[_Location.GRAPH] = (linear(1), linear(1)) return decoders def postprocess(spec: _Spec, preds: Dict[str, _Array], sinkhorn_temperature: float, sinkhorn_steps: int, hard: bool) -> Dict[str, _DataPoint]: """Postprocesses decoder output. This is done on outputs in order to score performance, and on hints in order to score them but also in order to feed them back to the model. At scoring time, the postprocessing mode is "hard", logits will be arg-maxed and masks will be thresholded. However, for the case of the hints that are fed back in the model, the postprocessing can be hard or soft, depending on whether we want to let gradients flow through them or not. Args: spec: The spec of the algorithm whose outputs/hints we are postprocessing. preds: Output and/or hint predictions, as produced by decoders. sinkhorn_temperature: Parameter for the sinkhorn operator on permutation pointers. sinkhorn_steps: Parameter for the sinkhorn operator on permutation pointers. hard: whether to do hard postprocessing, which involves argmax for MASK_ONE, CATEGORICAL and POINTERS, thresholding for MASK, and stop gradient through for SCALAR. If False, soft postprocessing will be used, with softmax, sigmoid and gradients allowed. Returns: The postprocessed `preds`. In "soft" post-processing, POINTER types will change to SOFT_POINTER, so encoders know they do not need to be pre-processed before feeding them back in. """ result = {} for name in preds.keys(): _, loc, t = spec[name] new_t = t data = preds[name] if t == _Type.SCALAR: if hard: data = jax.lax.stop_gradient(data) elif t == _Type.MASK: if hard: data = (data > 0.0) * 1.0 else: data = jax.nn.sigmoid(data) elif t in [_Type.MASK_ONE, _Type.CATEGORICAL]: cat_size = data.shape[-1] if hard: best = jnp.argmax(data, -1) data = hk.one_hot(best, cat_size) else: data = jax.nn.softmax(data, axis=-1) elif t == _Type.POINTER: if hard: data = jnp.argmax(data, -1).astype(float) else: data = jax.nn.softmax(data, -1) new_t = _Type.SOFT_POINTER elif t == _Type.PERMUTATION_POINTER: # Convert the matrix of logits to a doubly stochastic matrix. data = log_sinkhorn( x=data, steps=sinkhorn_steps, temperature=sinkhorn_temperature, zero_diagonal=True, noise_rng_key=None) data = jnp.exp(data) if hard: data = jax.nn.one_hot(jnp.argmax(data, axis=-1), data.shape[-1]) else: raise ValueError("Invalid type") result[name] = probing.DataPoint( name=name, location=loc, type_=new_t, data=data) return result def decode_fts( decoders, spec: _Spec, h_t: _Array, adj_mat: _Array, edge_fts: _Array, graph_fts: _Array, inf_bias: bool, inf_bias_edge: bool, repred: bool, ): """Decodes node, edge and graph features.""" output_preds = {} hint_preds = {} for name in decoders: decoder = decoders[name] stage, loc, t = spec[name] if loc == _Location.NODE: preds = _decode_node_fts(decoder, t, h_t, edge_fts, adj_mat, inf_bias, repred) elif loc == _Location.EDGE: preds = _decode_edge_fts(decoder, t, h_t, edge_fts, adj_mat, inf_bias_edge) elif loc == _Location.GRAPH: preds = _decode_graph_fts(decoder, t, h_t, graph_fts) else: raise ValueError("Invalid output type") if stage == _Stage.OUTPUT: output_preds[name] = preds elif stage == _Stage.HINT: hint_preds[name] = preds else: raise ValueError(f"Found unexpected decoder {name}") return hint_preds, output_preds def _decode_node_fts(decoders, t: str, h_t: _Array, edge_fts: _Array, adj_mat: _Array, inf_bias: bool, repred: bool) -> _Array: """Decodes node features.""" if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(decoders[0](h_t), -1) elif t == _Type.CATEGORICAL: preds = decoders[0](h_t) elif t in [_Type.POINTER, _Type.PERMUTATION_POINTER]: p_1 = decoders[0](h_t) p_2 = decoders[1](h_t) p_3 = decoders[2](edge_fts) p_e = jnp.expand_dims(p_2, -2) + p_3 p_m = jnp.maximum(jnp.expand_dims(p_1, -2), jnp.transpose(p_e, (0, 2, 1, 3))) preds = jnp.squeeze(decoders[3](p_m), -1) if inf_bias: per_batch_min = jnp.min(preds, axis=range(1, preds.ndim), keepdims=True) preds = jnp.where(adj_mat > 0.5, preds, jnp.minimum(-1.0, per_batch_min - 1.0)) if t == _Type.PERMUTATION_POINTER: if repred: # testing or validation, no Gumbel noise preds = log_sinkhorn( x=preds, steps=10, temperature=0.1, zero_diagonal=True, noise_rng_key=None) else: # training, add Gumbel noise preds = log_sinkhorn( x=preds, steps=10, temperature=0.1, zero_diagonal=True, noise_rng_key=hk.next_rng_key()) else: raise ValueError("Invalid output type") return preds def _decode_edge_fts(decoders, t: str, h_t: _Array, edge_fts: _Array, adj_mat: _Array, inf_bias_edge: bool) -> _Array: """Decodes edge features.""" pred_1 = decoders[0](h_t) pred_2 = decoders[1](h_t) pred_e = decoders[2](edge_fts) pred = (jnp.expand_dims(pred_1, -2) + jnp.expand_dims(pred_2, -3) + pred_e) if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(pred, -1) elif t == _Type.CATEGORICAL: preds = pred elif t == _Type.POINTER: pred_2 = decoders[3](h_t) p_m = jnp.maximum(jnp.expand_dims(pred, -2), jnp.expand_dims( jnp.expand_dims(pred_2, -3), -3)) preds = jnp.squeeze(decoders[4](p_m), -1) else: raise ValueError("Invalid output type") if inf_bias_edge and t in [_Type.MASK, _Type.MASK_ONE]: per_batch_min = jnp.min(preds, axis=range(1, preds.ndim), keepdims=True) preds = jnp.where(adj_mat > 0.5, preds, jnp.minimum(-1.0, per_batch_min - 1.0)) return preds def _decode_graph_fts(decoders, t: str, h_t: _Array, graph_fts: _Array) -> _Array: """Decodes graph features.""" gr_emb = jnp.max(h_t, axis=-2) pred_n = decoders[0](gr_emb) pred_g = decoders[1](graph_fts) pred = pred_n + pred_g if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(pred, -1) elif t == _Type.CATEGORICAL: preds = pred elif t == _Type.POINTER: pred_2 = decoders[2](h_t) ptr_p = jnp.expand_dims(pred, 1) + jnp.transpose(pred_2, (0, 2, 1)) preds = jnp.squeeze(ptr_p, 1) else: raise ValueError("Invalid output type") return preds def maybe_decode_diffs( diff_decoders, h_t: _Array, edge_fts: _Array, graph_fts: _Array, decode_diffs: bool, ) -> Optional[Dict[str, _Array]]: """Optionally decodes node, edge and graph diffs.""" if decode_diffs: preds = {} node = _Location.NODE edge = _Location.EDGE graph = _Location.GRAPH preds[node] = _decode_node_diffs(diff_decoders[node], h_t) preds[edge] = _decode_edge_diffs(diff_decoders[edge], h_t, edge_fts) preds[graph] = _decode_graph_diffs(diff_decoders[graph], h_t, graph_fts) else: preds = None return preds def _decode_node_diffs(decoders, h_t: _Array) -> _Array: """Decodes node diffs.""" return jnp.squeeze(decoders(h_t), -1) def _decode_edge_diffs(decoders, h_t: _Array, edge_fts: _Array) -> _Array: """Decodes edge diffs.""" e_pred_1 = decoders[0](h_t) e_pred_2 = decoders[1](h_t) e_pred_e = decoders[2](edge_fts) preds = jnp.squeeze( jnp.expand_dims(e_pred_1, -1) + jnp.expand_dims(e_pred_2, -2) + e_pred_e, -1, ) return preds def _decode_graph_diffs(decoders, h_t: _Array, graph_fts: _Array) -> _Array: """Decodes graph diffs.""" gr_emb = jnp.max(h_t, axis=-2) g_pred_n = decoders[0](gr_emb) g_pred_g = decoders[1](graph_fts) preds = jnp.squeeze(g_pred_n + g_pred_g, -1) return preds
clrs-master
clrs/_src/decoders.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for processors.py.""" from absl.testing import absltest import chex from clrs._src import processors import haiku as hk import jax.numpy as jnp class MemnetTest(absltest.TestCase): def test_simple_run_and_check_shapes(self): batch_size = 64 vocab_size = 177 embedding_size = 64 sentence_size = 11 memory_size = 320 linear_output_size = 128 num_hops = 2 use_ln = True def forward_fn(queries, stories): model = processors.MemNetFull( vocab_size=vocab_size, embedding_size=embedding_size, sentence_size=sentence_size, memory_size=memory_size, linear_output_size=linear_output_size, num_hops=num_hops, use_ln=use_ln) return model._apply(queries, stories) forward = hk.transform(forward_fn) queries = jnp.ones([batch_size, sentence_size], dtype=jnp.int32) stories = jnp.ones([batch_size, memory_size, sentence_size], dtype=jnp.int32) key = hk.PRNGSequence(42) params = forward.init(next(key), queries, stories) model_output = forward.apply(params, None, queries, stories) chex.assert_shape(model_output, [batch_size, vocab_size]) chex.assert_type(model_output, jnp.float32) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/processors_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model base classes and utilities.""" from typing import Dict, List, Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = chex.Array Result = Dict[str, probing.DataPoint] def fuse_perm_and_mask(perm: probing.DataPoint, mask: probing.DataPoint) -> probing.DataPoint: """Replace permutation pointers active in the mask with self-pointers. Args: perm: a node permutation_pointer; data shape is expected to be [..., N, N], and ideally one-hot over the last two dimensions, although this method does not check for one-hotness. mask: a mask_one over nodes; data shape is expected to be [..., N], and ideally one-hot over the last dimension, although this method does not check for one-hotness. Returns: A node pointer with shape [..., N]. """ assert perm.type_ == specs.Type.PERMUTATION_POINTER assert perm.location == specs.Location.NODE assert mask.name == perm.name + '_mask' assert mask.type_ == specs.Type.MASK_ONE assert mask.location == specs.Location.NODE assert perm.data.shape[-1] == perm.data.shape[-2] assert perm.data.shape[:-1] == mask.data.shape data = np.where(mask.data > 0.5, np.arange(perm.data.shape[-1]), # self-pointers np.argmax(perm.data, axis=-1)) # original pointers return probing.DataPoint(name=perm.name, type_=specs.Type.POINTER, location=perm.location, data=data) def _reduce_permutations_tuple( targets: Tuple[probing.DataPoint, ...]) -> Tuple[probing.DataPoint, ...]: """Reduce node pointer + mask_one permutation to just node pointer.""" out_targets = [] n_perms = 0 i = 0 while i < len(targets): truth = targets[i] if truth.type_ != specs.Type.PERMUTATION_POINTER: out_targets.append(truth) i += 1 continue truth_mask = targets[i + 1] out_targets.append(fuse_perm_and_mask(truth, truth_mask)) i += 2 n_perms += 1 assert len(out_targets) == len(targets) - n_perms return tuple(out_targets) def _reduce_permutations_dict(predictions: Result) -> Result: """Reduce node pointer + mask_one permutation to just node pointer.""" out_preds = {} n_perms = 0 for k, pred in predictions.items(): if (k.endswith('_mask') and k[:-5] in predictions and predictions[k[:-5]].type_ == specs.Type.PERMUTATION_POINTER): # This mask will be processed with its associated permutation datapoint continue if pred.type_ != specs.Type.PERMUTATION_POINTER: out_preds[k] = pred continue pred_mask = predictions[k + '_mask'] out_preds[k] = fuse_perm_and_mask(pred, pred_mask) n_perms += 1 assert len(out_preds) == len(predictions) - n_perms return out_preds def evaluate_hints( hints: Tuple[probing.DataPoint, ...], lengths: _Array, hint_preds: List[Result], ) -> Dict[str, _Array]: """Evaluate hint predictions.""" evals = {} hints = _reduce_permutations_tuple(hints) hint_preds = [_reduce_permutations_dict(h) for h in hint_preds] for truth in hints: assert truth.name in hint_preds[0] eval_along_time = [_evaluate(truth, p[truth.name], idx=i+1, lengths=lengths) for (i, p) in enumerate(hint_preds)] evals[truth.name] = np.sum( [x * np.sum(i+1 < lengths) for i, x in enumerate(eval_along_time)]) / np.sum(lengths - 1) evals[truth.name + '_along_time'] = np.array(eval_along_time) # Unlike outputs, the hints sometimes include scalars, which don't have # a meaningful eval score. So we don't compute a global 'hint score' as we # do for outputs. return evals def evaluate( outputs: Tuple[probing.DataPoint, ...], predictions: Result, ) -> Dict[str, float]: """Evaluate output predictions.""" evals = {} outputs = _reduce_permutations_tuple(outputs) predictions = _reduce_permutations_dict(predictions) for truth in outputs: assert truth.name in predictions pred = predictions[truth.name] evals[truth.name] = _evaluate(truth, pred) # Return a single scalar score that is the mean of all output scores. evals['score'] = sum([v.item() for v in evals.values()]) / len(evals) return evals def _evaluate(truth, pred, idx=None, lengths=None): """Evaluate single prediction of hint or output.""" assert pred.name == truth.name assert pred.location == truth.location assert pred.type_ == truth.type_ if truth.type_ not in _EVAL_FN: raise ValueError('Invalid type') truth_data = truth.data pred_data = pred.data if idx is not None: if np.all(idx >= lengths): return 0. truth_data = truth_data[idx][idx < lengths] pred_data = pred_data[idx < lengths] return _EVAL_FN[truth.type_](pred_data, truth_data) def _eval_one(pred, truth): mask = np.all(truth != specs.OutputClass.MASKED, axis=-1) return np.sum( (np.argmax(pred, -1) == np.argmax(truth, -1)) * mask) / np.sum(mask) def _mask_fn(pred, truth): """Evaluate outputs of type MASK, and account for any class imbalance.""" mask = (truth != specs.OutputClass.MASKED).astype(np.float32) # Use F1 score for the masked outputs to address any imbalance tp = np.sum((((pred > 0.5) * (truth > 0.5)) * 1.0) * mask) fp = np.sum((((pred > 0.5) * (truth < 0.5)) * 1.0) * mask) fn = np.sum((((pred < 0.5) * (truth > 0.5)) * 1.0) * mask) # Protect against division by zero if tp + fp > 0: precision = tp / (tp + fp) else: precision = np.float32(1.0) if tp + fn > 0: recall = tp / (tp + fn) else: recall = np.float32(1.0) if precision + recall > 0.0: f_1 = 2.0 * precision * recall / (precision + recall) else: f_1 = np.float32(0.0) return f_1 _EVAL_FN = { specs.Type.SCALAR: lambda pred, truth: np.mean((pred - truth)**2), specs.Type.MASK: _mask_fn, specs.Type.MASK_ONE: _eval_one, specs.Type.CATEGORICAL: _eval_one, specs.Type.POINTER: lambda pred, truth: np.mean((pred == truth) * 1.0), }
clrs-master
clrs/_src/evaluation.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
clrs-master
clrs/_src/__init__.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Encoder utilities.""" import functools import chex from clrs._src import probing from clrs._src import specs import haiku as hk import jax.numpy as jnp _Array = chex.Array _DataPoint = probing.DataPoint _Location = specs.Location _Spec = specs.Spec _Stage = specs.Stage _Type = specs.Type def construct_encoders(stage: str, loc: str, t: str, hidden_dim: int, init: str, name: str): """Constructs encoders.""" if init == 'xavier_on_scalars' and stage == _Stage.HINT and t == _Type.SCALAR: initialiser = hk.initializers.TruncatedNormal( stddev=1.0 / jnp.sqrt(hidden_dim)) elif init in ['default', 'xavier_on_scalars']: initialiser = None else: raise ValueError(f'Encoder initialiser {init} not supported.') linear = functools.partial( hk.Linear, w_init=initialiser, name=f'{name}_enc_linear') encoders = [linear(hidden_dim)] if loc == _Location.EDGE and t == _Type.POINTER: # Edge pointers need two-way encoders. encoders.append(linear(hidden_dim)) return encoders def preprocess(dp: _DataPoint, nb_nodes: int) -> _DataPoint: """Pre-process data point. Make sure that the data is ready to be encoded into features. If the data is of POINTER type, we expand the compressed index representation to a full one-hot. But if the data is a SOFT_POINTER, the representation is already expanded and we just overwrite the type as POINTER so that it is treated as such for encoding. Args: dp: A DataPoint to prepare for encoding. nb_nodes: Number of nodes in the graph, necessary to expand pointers to the right dimension. Returns: The datapoint, with data and possibly type modified. """ new_type = dp.type_ if dp.type_ == _Type.POINTER: data = hk.one_hot(dp.data, nb_nodes) else: data = dp.data.astype(jnp.float32) if dp.type_ == _Type.SOFT_POINTER: new_type = _Type.POINTER dp = probing.DataPoint( name=dp.name, location=dp.location, type_=new_type, data=data) return dp def accum_adj_mat(dp: _DataPoint, adj_mat: _Array) -> _Array: """Accumulates adjacency matrix.""" if dp.location == _Location.NODE and dp.type_ in [_Type.POINTER, _Type.PERMUTATION_POINTER]: adj_mat += ((dp.data + jnp.transpose(dp.data, (0, 2, 1))) > 0.5) elif dp.location == _Location.EDGE and dp.type_ == _Type.MASK: adj_mat += ((dp.data + jnp.transpose(dp.data, (0, 2, 1))) > 0.0) return (adj_mat > 0.).astype('float32') # pytype: disable=attribute-error # numpy-scalars def accum_edge_fts(encoders, dp: _DataPoint, edge_fts: _Array) -> _Array: """Encodes and accumulates edge features.""" if dp.location == _Location.NODE and dp.type_ in [_Type.POINTER, _Type.PERMUTATION_POINTER]: encoding = _encode_inputs(encoders, dp) edge_fts += encoding elif dp.location == _Location.EDGE: encoding = _encode_inputs(encoders, dp) if dp.type_ == _Type.POINTER: # Aggregate pointer contributions across sender and receiver nodes. encoding_2 = encoders[1](jnp.expand_dims(dp.data, -1)) edge_fts += jnp.mean(encoding, axis=1) + jnp.mean(encoding_2, axis=2) else: edge_fts += encoding return edge_fts def accum_node_fts(encoders, dp: _DataPoint, node_fts: _Array) -> _Array: """Encodes and accumulates node features.""" is_pointer = (dp.type_ in [_Type.POINTER, _Type.PERMUTATION_POINTER]) if ((dp.location == _Location.NODE and not is_pointer) or (dp.location == _Location.GRAPH and dp.type_ == _Type.POINTER)): encoding = _encode_inputs(encoders, dp) node_fts += encoding return node_fts def accum_graph_fts(encoders, dp: _DataPoint, graph_fts: _Array) -> _Array: """Encodes and accumulates graph features.""" if dp.location == _Location.GRAPH and dp.type_ != _Type.POINTER: encoding = _encode_inputs(encoders, dp) graph_fts += encoding return graph_fts def _encode_inputs(encoders, dp: _DataPoint) -> _Array: if dp.type_ == _Type.CATEGORICAL: encoding = encoders[0](dp.data) else: encoding = encoders[0](jnp.expand_dims(dp.data, -1)) return encoding
clrs-master
clrs/_src/encoders.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `probing.py`.""" from absl.testing import absltest from clrs._src import probing import jax.numpy as jnp import numpy as np # pylint: disable=invalid-name class ProbingTest(absltest.TestCase): def test_array(self): A_pos = np.array([1, 2, 0, 4, 3]) expected = np.array([2, 1, 1, 4, 0]) out = probing.array(A_pos) np.testing.assert_array_equal(expected, out) def test_array_cat(self): A = np.array([2, 1, 0, 1, 1]) expected = np.array([ [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0] ]) out = probing.array_cat(A, 3) np.testing.assert_array_equal(expected, out) def test_heap(self): A_pos = np.array([1, 3, 5, 0, 7, 4, 2, 6]) expected = np.array([3, 1, 2, 1, 5, 1, 6, 3]) out = probing.heap(A_pos, heap_size=6) np.testing.assert_array_equal(expected, out) def test_graph(self): G = np.array([ [0.0, 7.0, -1.0, -3.9, 7.452], [0.0, 0.0, 133.0, 0.0, 9.3], [0.5, 0.1, 0.22, 0.55, 0.666], [7.0, 6.1, 0.2, 0.0, 0.0], [0.0, 3.0, 0.0, 1.0, 0.5] ]) expected = np.array([ [1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 1.0] ]) out = probing.graph(G) np.testing.assert_array_equal(expected, out) def test_mask_one(self): expected = np.array([0, 0, 0, 1, 0]) out = probing.mask_one(3, 5) np.testing.assert_array_equal(expected, out) def test_strings_id(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2]) expected = np.array([0, 0, 0, 0, 0, 1, 1, 1]) out = probing.strings_id(T_pos, P_pos) np.testing.assert_array_equal(expected, out) def test_strings_pair(self): pair_probe = np.array([ [0.5, 3.1, 9.1, 7.3], [1.0, 0.0, 8.0, 9.3], [0.1, 5.0, 0.0, 1.2] ]) expected = np.array([ [0.0, 0.0, 0.0, 0.5, 3.1, 9.1, 7.3], [0.0, 0.0, 0.0, 1.0, 0.0, 8.0, 9.3], [0.0, 0.0, 0.0, 0.1, 5.0, 0.0, 1.2], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ]) out = probing.strings_pair(pair_probe) np.testing.assert_equal(expected, out) def test_strings_pair_cat(self): pair_probe = np.array([ [0, 2, 1], [2, 2, 0] ]) expected = np.array([ [ [0, 0, 0, -1], [0, 0, 0, -1], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], ], [ [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], ], [ [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], ], [ [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], ], [ [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], [0, 0, 0, -1], ], ]) out = probing.strings_pair_cat(pair_probe, 3) np.testing.assert_equal(expected, out) def test_strings_pi(self): T_pos = np.array([0, 1, 2, 3, 4, 5]) P_pos = np.array([0, 1, 2, 3]) pi = np.array([3, 1, 0, 2]) expected = np.array( [0, 1, 2, 3, 4, 5, 9, 7, 6, 8] ) out = probing.strings_pi(T_pos, P_pos, pi) np.testing.assert_array_equal(expected, out) def test_strings_pos(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2, 3]) expected = np.array( [0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.25, 0.5, 0.75] ) out = probing.strings_pos(T_pos, P_pos) np.testing.assert_array_equal(expected, out) def test_strings_pred(self): T_pos = np.array([0, 1, 2, 3, 4]) P_pos = np.array([0, 1, 2]) expected = np.array([0, 0, 1, 2, 3, 5, 5, 6]) out = probing.strings_pred(T_pos, P_pos) np.testing.assert_array_equal(expected, out) class PermutationsTest(absltest.TestCase): def test_pointers_to_permutation(self): pointers = jnp.array([2, 1, 1]) perm, first = probing.predecessor_to_cyclic_predecessor_and_first(pointers) np.testing.assert_array_equal( perm, np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])) np.testing.assert_array_equal(first, np.array([0, 1, 0])) def test_pointers_to_permutation_already_sorted(self): pointers = jnp.array([0, 0, 1, 2, 3, 4]) perm, first = probing.predecessor_to_cyclic_predecessor_and_first(pointers) np.testing.assert_array_equal(perm, np.roll(np.eye(6), 1, 0)) np.testing.assert_array_equal(first, np.eye(6)[0]) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/probing_test.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `dataset.py`.""" from typing import Generator, List from absl.testing import absltest from absl.testing import parameterized from clrs._src import dataset from clrs._src import samplers from clrs._src import specs import numpy as np _Array = np.ndarray def _stack_to_shortest(x: List[_Array]) -> _Array: min_len = min(map(len, x)) return np.array([a[:min_len] for a in x]) def _make_sampler(algo: str) -> samplers.Sampler: sampler, _ = samplers.build_sampler( algo, seed=samplers.CLRS30['val']['seed'], num_samples=samplers.CLRS30['val']['num_samples'], length=samplers.CLRS30['val']['length'], ) return sampler def _make_iterable_sampler( algo: str, batch_size: int) -> Generator[samplers.Feedback, None, None]: sampler = _make_sampler(algo) while True: yield sampler.next(batch_size) class DatasetTest(parameterized.TestCase): @parameterized.product( name=specs.CLRS_30_ALGS[:5], chunk_length=[20, 50]) def test_chunkify(self, name: str, chunk_length: int): """Test that samples are concatenated and split in chunks correctly.""" batch_size = 8 ds = _make_iterable_sampler(name, batch_size) chunked_ds = dataset.chunkify( _make_iterable_sampler(name, batch_size), chunk_length) samples = [next(ds) for _ in range(20)] cum_lengths = np.cumsum([s.features.lengths for s in samples], axis=0) n_chunks = np.amax(cum_lengths[-1]).astype(int) // chunk_length + 1 chunks = [next(chunked_ds) for _ in range(n_chunks)] # Check correctness of `is_first` and `is_last` markers start_idx = _stack_to_shortest([np.where(x)[0] for x in np.concatenate( [c.features.is_first for c in chunks]).T]).T end_idx = _stack_to_shortest([np.where(x)[0] for x in np.concatenate( [c.features.is_last for c in chunks]).T]).T assert len(start_idx) >= len(cum_lengths) start_idx = start_idx[:len(cum_lengths)] assert len(end_idx) >= len(cum_lengths) end_idx = end_idx[:len(cum_lengths)] np.testing.assert_equal(start_idx[0], 0) np.testing.assert_array_equal(cum_lengths - 1, end_idx) np.testing.assert_array_equal(cum_lengths[:-1], start_idx[1:]) # Check that inputs, outputs and hints have been copied correctly all_input = np.concatenate([c.features.inputs[0].data for c in chunks]) all_output = np.concatenate([c.outputs[0].data for c in chunks]) all_hint = np.concatenate([c.features.hints[0].data for c in chunks]) for i in range(batch_size): length0 = int(samples[0].features.lengths[i]) length1 = int(samples[1].features.lengths[i]) # Check first sample np.testing.assert_array_equal( all_input[:length0, i], np.tile(samples[0].features.inputs[0].data[i], [length0, 1])) np.testing.assert_array_equal( all_output[:length0, i], np.tile(samples[0].outputs[0].data[i], [length0, 1])) np.testing.assert_array_equal( all_hint[:length0, i], samples[0].features.hints[0].data[:length0, i]) # Check second sample np.testing.assert_array_equal( all_input[length0:length0 + length1, i], np.tile(samples[1].features.inputs[0].data[i], [length1, 1])) np.testing.assert_array_equal( all_output[length0:length0 + length1, i], np.tile(samples[1].outputs[0].data[i], [length1, 1])) np.testing.assert_array_equal( all_hint[length0:length0 + length1, i], samples[1].features.hints[0].data[:length1, i]) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/dataset_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model base classes and utilities.""" import abc from typing import Dict, List, Optional, Union from clrs._src import probing from clrs._src import samplers from clrs._src import specs Result = Dict[str, probing.DataPoint] class Model(abc.ABC): """Abstract base class for CLRS3-B models.""" def __init__(self, spec: Union[specs.Spec, List[specs.Spec]]): """Set up the problem, prepare to predict on first task.""" if not isinstance(spec, list): spec = [spec] self._spec = spec @abc.abstractmethod def predict(self, features: samplers.Features) -> Result: """Make predictions about the current task.""" pass @abc.abstractmethod def feedback(self, feedback: Optional[samplers.Feedback]): """Advance to the next task, incorporating any available feedback.""" pass
clrs-master
clrs/_src/model.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CLRS dataset.""" import dataclasses import functools from typing import Iterator from clrs._src import probing from clrs._src import samplers from clrs._src import specs import jax import numpy as np import tensorflow as tf import tensorflow_datasets as tfds def _correct_axis_filtering(tensor, index, name): if 'hint_' in name: return tensor[:, index] else: return tensor[index] @dataclasses.dataclass class CLRSConfig(tfds.core.BuilderConfig): """Specify the split in the variant because they have different shapes.""" split: str = '' DEFAULT_BUILDER_CONFIGS = [] def _build_default_builder_configs(): for split in ['train', 'val', 'test']: for alg in specs.CLRS_30_ALGS: DEFAULT_BUILDER_CONFIGS.append( CLRSConfig(name=f'{alg}_{split}', split=split)) _build_default_builder_configs() class CLRSDataset(tfds.core.GeneratorBasedBuilder): """DatasetBuilder for my_dataset dataset.""" VERSION = tfds.core.Version('1.0.0') RELEASE_NOTES = { '1.0.0': 'Initial release.', } BUILDER_CONFIGS = DEFAULT_BUILDER_CONFIGS _instantiated_dataset = None _instantiated_dataset_name = '' _instantiated_dataset_split = '' def _num_samples(self, algorithm_name): num_samples = samplers.CLRS30[self._builder_config.split]['num_samples'] # pytype: disable=attribute-error # always-use-return-annotations if self._builder_config.split != 'train': # pytype: disable=attribute-error # always-use-return-annotations # Generate more samples for those algorithms in which the number of # signals is small. num_samples *= specs.CLRS_30_ALGS_SETTINGS[algorithm_name][ 'num_samples_multiplier'] return num_samples def _create_data(self, single_sample): algorithm_name = '_'.join(self._builder_config.name.split('_')[:-1]) num_samples = self._num_samples(algorithm_name) sampler, _ = samplers.build_sampler( algorithm_name, seed=samplers.CLRS30[self._builder_config.split]['seed'], # pytype: disable=attribute-error # always-use-return-annotations num_samples=num_samples, length=samplers.CLRS30[self._builder_config.split]['length'], # pytype: disable=attribute-error # always-use-return-annotations ) sampled_dataset = sampler.next(batch_size=1 if single_sample else None) data = {'input_' + t.name: t.data for t in sampled_dataset.features.inputs} # All other data points have input_, hint_, and output_ prefixes, so we # guarantee that this key is unused. data['lengths'] = sampled_dataset.features.lengths data.update({'output_' + t.name: t.data for t in sampled_dataset.outputs}) data.update({ 'hint_' + t.name: t.data for t in sampled_dataset.features.hints}) self._instantiated_dataset = data def _info(self) -> tfds.core.DatasetInfo: if tf.io.gfile.exists(self.data_dir): info = tfds.core.DatasetInfo(builder=self) info.read_from_directory(self.data_dir) return info if (self._instantiated_dataset_name != self._builder_config.name or self._instantiated_dataset_split != self._builder_config.split): # pytype: disable=attribute-error # always-use-return-annotations self._create_data(single_sample=True) data = {k: _correct_axis_filtering(v, 0, k) for k, v in self._instantiated_dataset.items()} data_info = { k: tfds.features.Tensor(shape=v.shape, dtype=tf.dtypes.as_dtype( v.dtype)) for k, v in data.items()} return tfds.core.DatasetInfo( builder=self, features=tfds.features.FeaturesDict(data_info), ) def _split_generators(self, dl_manager: tfds.download.DownloadManager): """Download the data and define splits.""" if (self._instantiated_dataset_name != self._builder_config.name or self._instantiated_dataset_split != self._builder_config.split): # pytype: disable=attribute-error # always-use-return-annotations self._create_data(single_sample=False) self._instantiated_dataset_name = self._builder_config.name self._instantiated_dataset_split = self._builder_config.split # pytype: disable=attribute-error # always-use-return-annotations return {self._builder_config.split: self._generate_examples()} # pytype: disable=attribute-error # always-use-return-annotations def _generate_examples(self): """Generator of examples for each split.""" algorithm_name = '_'.join(self._builder_config.name.split('_')[:-1]) for i in range(self._num_samples(algorithm_name)): data = {k: _correct_axis_filtering(v, i, k) for k, v in self._instantiated_dataset.items()} yield str(i), data def _get_clrs_file_name(): return f'CLRS30_v{CLRSDataset.VERSION}.tar.gz' def get_dataset_gcp_url(): return f'https://storage.googleapis.com/dm-clrs/{_get_clrs_file_name()}' def get_clrs_folder(): return f'CLRS30_v{CLRSDataset.VERSION}' def _preprocess(data_point, algorithm=None): """Convert sampled inputs into DataPoints.""" inputs = [] outputs = [] hints = [] lengths = None for name, data in data_point.items(): if name == 'lengths': lengths = data continue data_point_name = name.split('_') name = '_'.join(data_point_name[1:]) (stage, location, dp_type) = specs.SPECS[algorithm][name] assert stage == data_point_name[0] if stage == specs.Stage.HINT: data = tf.experimental.numpy.swapaxes(data, 0, 1) dp = probing.DataPoint(name, location, dp_type, data) if stage == specs.Stage.INPUT: inputs.append(dp) elif stage == specs.Stage.OUTPUT: outputs.append(dp) else: hints.append(dp) return samplers.Feedback( samplers.Features(tuple(inputs), tuple(hints), lengths), tuple(outputs)) def create_dataset(folder, algorithm, split, batch_size): dataset = tfds.load(f'clrs_dataset/{algorithm}_{split}', data_dir=folder, split=split) num_samples = len(dataset) # Must be done here for correct size dataset = dataset.repeat() dataset = dataset.batch(batch_size) return (dataset.map(lambda d: _preprocess(d, algorithm=algorithm)), num_samples, specs.SPECS[algorithm]) def _copy_hint(source, dest, i, start_source, start_dest, to_add): """Copy from full-sample hint to a hint chunk.""" assert np.all(dest[start_dest:, i:] == 0) assert start_dest < dest.shape[0] assert start_dest + to_add <= dest.shape[0] assert start_source < source.shape[0] assert start_source + to_add <= source.shape[0] dest[start_dest:start_dest+to_add, i] = source[ start_source:start_source+to_add, i] return dest def _copy_io(source, dest, i, start_dest, to_add): """Copy from an input or output to an input or output chunk.""" assert np.all(dest[start_dest:, i:] == 0) dest[start_dest:start_dest+to_add, i] = source[i] return dest def chunkify(dataset: Iterator[samplers.Feedback], chunk_length: int): """Generator of fixed-length chunks from full-trajectory samples. Args: dataset: full-sample dataset as numpy iterator. chunk_length: time length of chunks. Yields: Fixed-timelength chunks of data. Each tensor of inputs, hints and outputs has dimensions chunk_length x batch_size x ... Samples are not time-padded, after the end of one sample immediately comes the next. Since different samples can have different time lengths, the beginnings and ends of samples within a batch do not need to coincide. For this reason, the chunked dataset features include two chunk_length x batch_size int tensors, `is_first` and `is_last`, that mark the beginning and end of each sample. For example, if `chunk_legnth`==6 and `batch_size`==2 and the first full-sample batch had one sample of length 3 and one of length 5, we would have a first chunked batch with the following `is_first` and `is_last` tensors: is_first = [[1, 1] is_last = [[0, 0] ( sample id [[0 1] [0, 0] [0, 0] [0 1] [0, 0] [1, 0] [0 1] [1, 0] [0, 0] [2 1] [0, 0] [0, 1] [2 1] [0, 1]] [0, 0]] [2 3]] ) while the data in the inputs, outputs and hints tensors would correspond to samples as identified by the sample_id indicated above for reference. Notice that, while in the full-sample dataset inputs and outputs have no time dimension, here they do; the input and output tensors are simply repeated along each sample's time length. """ def _get_batch(): d = next(dataset) return (d.features.inputs, d.features.hints, d.outputs, d.features.lengths.astype(int)) inputs, hints, outputs, lengths = _get_batch() for inp in inputs: if inp.location in [specs.Location.NODE, specs.Location.EDGE]: batch_size = inp.data.shape[0] break io_chunk = lambda x: np.zeros((chunk_length,) + x.shape, dtype=x.dtype) chunk_inputs = jax.tree_util.tree_map(io_chunk, inputs) chunk_outputs = jax.tree_util.tree_map(io_chunk, outputs) hint_chunk = lambda x: np.zeros((chunk_length,) + x.shape[1:], dtype=x.dtype) chunk_hints = jax.tree_util.tree_map(hint_chunk, hints) inputs = [inputs] hints = [hints] outputs = [outputs] left = [lengths.copy()] lengths = [lengths.copy()] while True: # Create a new empty chunk chunk_inputs = jax.tree_util.tree_map(np.zeros_like, chunk_inputs) chunk_hints = jax.tree_util.tree_map(np.zeros_like, chunk_hints) chunk_outputs = jax.tree_util.tree_map(np.zeros_like, chunk_outputs) start_mark = np.zeros((chunk_length, batch_size), dtype=int) end_mark = np.zeros((chunk_length, batch_size), dtype=int) # Get enough data batches to fill the new chunk while np.any(np.sum(left, axis=0) < chunk_length): inp, hh, out, ll = _get_batch() inputs.append(inp) hints.append(hh) outputs.append(out) left.append(ll.copy()) lengths.append(ll.copy()) # Fill the chunk, one batch element at a time for i in range(batch_size): total, idx = 0, 0 while total < chunk_length: to_add = min(left[idx][i], chunk_length - total) if to_add: start = lengths[idx][i] - left[idx][i] assert start >= 0 f_io = functools.partial(_copy_io, i=i, start_dest=total, to_add=to_add) chunk_inputs = jax.tree_util.tree_map(f_io, inputs[idx], chunk_inputs) chunk_outputs = jax.tree_util.tree_map(f_io, outputs[idx], chunk_outputs) f_hint = functools.partial(_copy_hint, i=i, start_source=start, start_dest=total, to_add=to_add) chunk_hints = jax.tree_util.tree_map(f_hint, hints[idx], chunk_hints) if start == 0: start_mark[total, i] = 1 total += to_add left[idx][i] -= to_add assert left[idx][i] >= 0 if left[idx][i] == 0: end_mark[total - 1, i] = 1 idx += 1 assert total == chunk_length while left and np.all(left[0] == 0): inputs.pop(0) hints.pop(0) outputs.pop(0) left.pop(0) lengths.pop(0) yield samplers.Feedback( samplers.FeaturesChunked(chunk_inputs, chunk_hints, start_mark, end_mark), chunk_outputs) def create_chunked_dataset(folder, algorithm, split, batch_size, chunk_length): dataset = tfds.load(f'clrs_dataset/{algorithm}_{split}', data_dir=folder, split=split) dataset = dataset.repeat() dataset = dataset.batch(batch_size) dataset = dataset.map(lambda d: _preprocess(d, algorithm=algorithm)) dataset = dataset.as_numpy_iterator() return chunkify(dataset, chunk_length), specs.SPECS[algorithm]
clrs-master
clrs/_src/dataset.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Probing utilities. The dataflow for an algorithm is represented by `(stage, loc, type, data)` "probes" that are valid under that algorithm's spec (see `specs.py`). When constructing probes, it is convenient to represent these fields in a nested format (`ProbesDict`) to facilate efficient contest-based look-up. """ import functools from typing import Dict, List, Tuple, Union import attr from clrs._src import specs import jax import jax.numpy as jnp import numpy as np import tensorflow as tf _Location = specs.Location _Stage = specs.Stage _Type = specs.Type _OutputClass = specs.OutputClass _Array = np.ndarray _Data = Union[_Array, List[_Array]] _DataOrType = Union[_Data, str] ProbesDict = Dict[ str, Dict[str, Dict[str, Dict[str, _DataOrType]]]] def _convert_to_str(element): if isinstance(element, tf.Tensor): return element.numpy().decode('utf-8') elif isinstance(element, (np.ndarray, bytes)): return element.decode('utf-8') else: return element # First anotation makes this object jax.jit/pmap friendly, second one makes this # tf.data.Datasets friendly. @jax.tree_util.register_pytree_node_class @attr.define class DataPoint: """Describes a data point.""" _name: str _location: str _type_: str data: _Array @property def name(self): return _convert_to_str(self._name) @property def location(self): return _convert_to_str(self._location) @property def type_(self): return _convert_to_str(self._type_) def __repr__(self): s = f'DataPoint(name="{self.name}",\tlocation={self.location},\t' return s + f'type={self.type_},\tdata=Array{self.data.shape})' def tree_flatten(self): data = (self.data,) meta = (self.name, self.location, self.type_) return data, meta @classmethod def tree_unflatten(cls, meta, data): name, location, type_ = meta subdata, = data return DataPoint(name, location, type_, subdata) class ProbeError(Exception): pass def initialize(spec: specs.Spec) -> ProbesDict: """Initializes an empty `ProbesDict` corresponding with the provided spec.""" probes = dict() for stage in [_Stage.INPUT, _Stage.OUTPUT, _Stage.HINT]: probes[stage] = {} for loc in [_Location.NODE, _Location.EDGE, _Location.GRAPH]: probes[stage][loc] = {} for name in spec: stage, loc, t = spec[name] probes[stage][loc][name] = {} probes[stage][loc][name]['data'] = [] probes[stage][loc][name]['type_'] = t # Pytype thinks initialize() returns a ProbesDict with a str for all final # values instead of _DataOrType. return probes # pytype: disable=bad-return-type def push(probes: ProbesDict, stage: str, next_probe): """Pushes a probe into an existing `ProbesDict`.""" for loc in [_Location.NODE, _Location.EDGE, _Location.GRAPH]: for name in probes[stage][loc]: if name not in next_probe: raise ProbeError(f'Missing probe for {name}.') if isinstance(probes[stage][loc][name]['data'], _Array): raise ProbeError('Attemping to push to finalized `ProbesDict`.') # Pytype thinks initialize() returns a ProbesDict with a str for all final # values instead of _DataOrType. probes[stage][loc][name]['data'].append(next_probe[name]) # pytype: disable=attribute-error def finalize(probes: ProbesDict): """Finalizes a `ProbesDict` by stacking/squeezing `data` field.""" for stage in [_Stage.INPUT, _Stage.OUTPUT, _Stage.HINT]: for loc in [_Location.NODE, _Location.EDGE, _Location.GRAPH]: for name in probes[stage][loc]: if isinstance(probes[stage][loc][name]['data'], _Array): raise ProbeError('Attemping to re-finalize a finalized `ProbesDict`.') if stage == _Stage.HINT: # Hints are provided for each timestep. Stack them here. probes[stage][loc][name]['data'] = np.stack( probes[stage][loc][name]['data']) else: # Only one instance of input/output exist. Remove leading axis. probes[stage][loc][name]['data'] = np.squeeze( np.array(probes[stage][loc][name]['data'])) def split_stages( probes: ProbesDict, spec: specs.Spec, ) -> Tuple[List[DataPoint], List[DataPoint], List[DataPoint]]: """Splits contents of `ProbesDict` into `DataPoint`s by stage.""" inputs = [] outputs = [] hints = [] for name in spec: stage, loc, t = spec[name] if stage not in probes: raise ProbeError(f'Missing stage {stage}.') if loc not in probes[stage]: raise ProbeError(f'Missing location {loc}.') if name not in probes[stage][loc]: raise ProbeError(f'Missing probe {name}.') if 'type_' not in probes[stage][loc][name]: raise ProbeError(f'Probe {name} missing attribute `type_`.') if 'data' not in probes[stage][loc][name]: raise ProbeError(f'Probe {name} missing attribute `data`.') if t != probes[stage][loc][name]['type_']: raise ProbeError(f'Probe {name} of incorrect type {t}.') data = probes[stage][loc][name]['data'] if not isinstance(probes[stage][loc][name]['data'], _Array): raise ProbeError((f'Invalid `data` for probe "{name}". ' + 'Did you forget to call `probing.finalize`?')) if t in [_Type.MASK, _Type.MASK_ONE, _Type.CATEGORICAL]: # pytype: disable=attribute-error if not ((data == 0) | (data == 1) | (data == -1)).all(): raise ProbeError(f'0|1|-1 `data` for probe "{name}"') # pytype: enable=attribute-error if t in [_Type.MASK_ONE, _Type.CATEGORICAL ] and not np.all(np.sum(np.abs(data), -1) == 1): raise ProbeError(f'Expected one-hot `data` for probe "{name}"') dim_to_expand = 1 if stage == _Stage.HINT else 0 data_point = DataPoint(name=name, location=loc, type_=t, data=np.expand_dims(data, dim_to_expand)) if stage == _Stage.INPUT: inputs.append(data_point) elif stage == _Stage.OUTPUT: outputs.append(data_point) else: hints.append(data_point) return inputs, outputs, hints # pylint: disable=invalid-name def array(A_pos: np.ndarray) -> np.ndarray: """Constructs an `array` probe.""" probe = np.arange(A_pos.shape[0]) for i in range(1, A_pos.shape[0]): probe[A_pos[i]] = A_pos[i - 1] return probe def array_cat(A: np.ndarray, n: int) -> np.ndarray: """Constructs an `array_cat` probe.""" assert n > 0 probe = np.zeros((A.shape[0], n)) for i in range(A.shape[0]): probe[i, A[i]] = 1 return probe def heap(A_pos: np.ndarray, heap_size: int) -> np.ndarray: """Constructs a `heap` probe.""" assert heap_size > 0 probe = np.arange(A_pos.shape[0]) for i in range(1, heap_size): probe[A_pos[i]] = A_pos[(i - 1) // 2] return probe def graph(A: np.ndarray) -> np.ndarray: """Constructs a `graph` probe.""" probe = (A != 0) * 1.0 probe = ((A + np.eye(A.shape[0])) != 0) * 1.0 return probe def mask_one(i: int, n: int) -> np.ndarray: """Constructs a `mask_one` probe.""" assert n > i probe = np.zeros(n) probe[i] = 1 return probe def strings_id(T_pos: np.ndarray, P_pos: np.ndarray) -> np.ndarray: """Constructs a `strings_id` probe.""" probe_T = np.zeros(T_pos.shape[0]) probe_P = np.ones(P_pos.shape[0]) return np.concatenate([probe_T, probe_P]) def strings_pair(pair_probe: np.ndarray) -> np.ndarray: """Constructs a `strings_pair` probe.""" n = pair_probe.shape[0] m = pair_probe.shape[1] probe_ret = np.zeros((n + m, n + m)) for i in range(0, n): for j in range(0, m): probe_ret[i, j + n] = pair_probe[i, j] return probe_ret def strings_pair_cat(pair_probe: np.ndarray, nb_classes: int) -> np.ndarray: """Constructs a `strings_pair_cat` probe.""" assert nb_classes > 0 n = pair_probe.shape[0] m = pair_probe.shape[1] # Add an extra class for 'this cell left blank.' probe_ret = np.zeros((n + m, n + m, nb_classes + 1)) for i in range(0, n): for j in range(0, m): probe_ret[i, j + n, int(pair_probe[i, j])] = _OutputClass.POSITIVE # Fill the blank cells. for i_1 in range(0, n): for i_2 in range(0, n): probe_ret[i_1, i_2, nb_classes] = _OutputClass.MASKED for j_1 in range(0, m): for x in range(0, n + m): probe_ret[j_1 + n, x, nb_classes] = _OutputClass.MASKED return probe_ret def strings_pi(T_pos: np.ndarray, P_pos: np.ndarray, pi: np.ndarray) -> np.ndarray: """Constructs a `strings_pi` probe.""" probe = np.arange(T_pos.shape[0] + P_pos.shape[0]) for j in range(P_pos.shape[0]): probe[T_pos.shape[0] + P_pos[j]] = T_pos.shape[0] + pi[P_pos[j]] return probe def strings_pos(T_pos: np.ndarray, P_pos: np.ndarray) -> np.ndarray: """Constructs a `strings_pos` probe.""" probe_T = np.copy(T_pos) * 1.0 / T_pos.shape[0] probe_P = np.copy(P_pos) * 1.0 / P_pos.shape[0] return np.concatenate([probe_T, probe_P]) def strings_pred(T_pos: np.ndarray, P_pos: np.ndarray) -> np.ndarray: """Constructs a `strings_pred` probe.""" probe = np.arange(T_pos.shape[0] + P_pos.shape[0]) for i in range(1, T_pos.shape[0]): probe[T_pos[i]] = T_pos[i - 1] for j in range(1, P_pos.shape[0]): probe[T_pos.shape[0] + P_pos[j]] = T_pos.shape[0] + P_pos[j - 1] return probe @functools.partial(jnp.vectorize, signature='(n)->(n,n),(n)') def predecessor_to_cyclic_predecessor_and_first( pointers: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]: """Converts predecessor pointers to cyclic predecessor + first node mask. This function assumes that the pointers represent a linear order of the nodes (akin to a linked list), where each node points to its predecessor and the first node points to itself. It returns the same pointers, except that the first node points to the last, and a mask_one marking the first node. Example: ``` pointers = [2, 1, 1] P = [[0, 0, 1], [1, 0, 0], [0, 1, 0]], M = [0, 1, 0] ``` Args: pointers: array of shape [N] containing pointers. The pointers are assumed to describe a linear order such that `pointers[i]` is the predecessor of node `i`. Returns: Permutation pointers `P` of shape [N] and one-hot vector `M` of shape [N]. """ nb_nodes = pointers.shape[-1] pointers_one_hot = jax.nn.one_hot(pointers, nb_nodes) # Find the index of the last node: it's the node that no other node points to. last = pointers_one_hot.sum(-2).argmin() # Find the first node: should be the only one pointing to itself. first = pointers_one_hot.diagonal().argmax() mask = jax.nn.one_hot(first, nb_nodes) pointers_one_hot += mask[..., None] * jax.nn.one_hot(last, nb_nodes) pointers_one_hot -= mask[..., None] * mask return pointers_one_hot, mask
clrs-master
clrs/_src/probing.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for calculating losses.""" from typing import Dict, List, Tuple import chex from clrs._src import probing from clrs._src import specs import haiku as hk import jax import jax.numpy as jnp _Array = chex.Array _DataPoint = probing.DataPoint _Location = specs.Location _OutputClass = specs.OutputClass _PredTrajectory = Dict[str, _Array] _PredTrajectories = List[_PredTrajectory] _Type = specs.Type EPS = 1e-12 def _expand_to(x: _Array, y: _Array) -> _Array: while len(y.shape) > len(x.shape): x = jnp.expand_dims(x, -1) return x def _expand_and_broadcast_to(x: _Array, y: _Array) -> _Array: return jnp.broadcast_to(_expand_to(x, y), y.shape) def output_loss_chunked(truth: _DataPoint, pred: _Array, is_last: _Array, nb_nodes: int) -> float: """Output loss for time-chunked training.""" mask = None if truth.type_ == _Type.SCALAR: loss = (pred - truth.data)**2 elif truth.type_ == _Type.MASK: loss = ( jnp.maximum(pred, 0) - pred * truth.data + jnp.log1p(jnp.exp(-jnp.abs(pred)))) mask = (truth.data != _OutputClass.MASKED) elif truth.type_ in [_Type.MASK_ONE, _Type.CATEGORICAL]: mask = jnp.any(truth.data == _OutputClass.POSITIVE, axis=-1) masked_truth = truth.data * (truth.data != _OutputClass.MASKED).astype( jnp.float32) loss = -jnp.sum(masked_truth * jax.nn.log_softmax(pred), axis=-1) elif truth.type_ == _Type.POINTER: loss = -jnp.sum( hk.one_hot(truth.data, nb_nodes) * jax.nn.log_softmax(pred), axis=-1) elif truth.type_ == _Type.PERMUTATION_POINTER: # Predictions are NxN logits aiming to represent a doubly stochastic matrix. # Compute the cross entropy between doubly stochastic pred and truth_data loss = -jnp.sum(truth.data * pred, axis=-1) if mask is not None: mask = mask * _expand_and_broadcast_to(is_last, loss) else: mask = _expand_and_broadcast_to(is_last, loss) total_mask = jnp.maximum(jnp.sum(mask), EPS) return jnp.sum(jnp.where(mask, loss, 0.0)) / total_mask def output_loss(truth: _DataPoint, pred: _Array, nb_nodes: int) -> float: """Output loss for full-sample training.""" if truth.type_ == _Type.SCALAR: total_loss = jnp.mean((pred - truth.data)**2) elif truth.type_ == _Type.MASK: loss = ( jnp.maximum(pred, 0) - pred * truth.data + jnp.log1p(jnp.exp(-jnp.abs(pred)))) mask = (truth.data != _OutputClass.MASKED).astype(jnp.float32) total_loss = jnp.sum(loss * mask) / jnp.sum(mask) elif truth.type_ in [_Type.MASK_ONE, _Type.CATEGORICAL]: masked_truth = truth.data * (truth.data != _OutputClass.MASKED).astype( jnp.float32) total_loss = (-jnp.sum(masked_truth * jax.nn.log_softmax(pred)) / jnp.sum(truth.data == _OutputClass.POSITIVE)) elif truth.type_ == _Type.POINTER: total_loss = ( jnp.mean(-jnp.sum( hk.one_hot(truth.data, nb_nodes) * jax.nn.log_softmax(pred), axis=-1))) elif truth.type_ == _Type.PERMUTATION_POINTER: # Predictions are NxN logits aiming to represent a doubly stochastic matrix. # Compute the cross entropy between doubly stochastic pred and truth_data total_loss = jnp.mean(-jnp.sum(truth.data * pred, axis=-1)) return total_loss def hint_loss_chunked( truth: _DataPoint, pred: _Array, is_first: _Array, nb_nodes: int, ): """Hint loss for time-chunked training.""" loss, mask = _hint_loss( truth_data=truth.data, truth_type=truth.type_, pred=pred, nb_nodes=nb_nodes, ) mask *= (1 - _expand_to(is_first, loss)).astype(jnp.float32) loss = jnp.sum(loss * mask) / jnp.maximum(jnp.sum(mask), EPS) return loss def hint_loss( truth: _DataPoint, preds: List[_Array], lengths: _Array, nb_nodes: int, verbose: bool = False, ): """Hint loss for full-sample training.""" total_loss = 0. verbose_loss = {} length = truth.data.shape[0] - 1 loss, mask = _hint_loss( truth_data=truth.data[1:], truth_type=truth.type_, pred=jnp.stack(preds), nb_nodes=nb_nodes, ) mask *= _is_not_done_broadcast(lengths, jnp.arange(length)[:, None], loss) loss = jnp.sum(loss * mask) / jnp.maximum(jnp.sum(mask), EPS) if verbose: verbose_loss['loss_' + truth.name] = loss else: total_loss += loss return verbose_loss if verbose else total_loss def _hint_loss( truth_data: _Array, truth_type: str, pred: _Array, nb_nodes: int, ) -> Tuple[_Array, _Array]: """Hint loss helper.""" mask = None if truth_type == _Type.SCALAR: loss = (pred - truth_data)**2 elif truth_type == _Type.MASK: loss = (jnp.maximum(pred, 0) - pred * truth_data + jnp.log1p(jnp.exp(-jnp.abs(pred)))) mask = (truth_data != _OutputClass.MASKED).astype(jnp.float32) # pytype: disable=attribute-error # numpy-scalars elif truth_type == _Type.MASK_ONE: loss = -jnp.sum(truth_data * jax.nn.log_softmax(pred), axis=-1, keepdims=True) elif truth_type == _Type.CATEGORICAL: loss = -jnp.sum(truth_data * jax.nn.log_softmax(pred), axis=-1) mask = jnp.any(truth_data == _OutputClass.POSITIVE, axis=-1).astype( jnp.float32) elif truth_type == _Type.POINTER: loss = -jnp.sum( hk.one_hot(truth_data, nb_nodes) * jax.nn.log_softmax(pred), axis=-1) elif truth_type == _Type.PERMUTATION_POINTER: # Predictions are NxN logits aiming to represent a doubly stochastic matrix. # Compute the cross entropy between doubly stochastic pred and truth_data loss = -jnp.sum(truth_data * pred, axis=-1) if mask is None: mask = jnp.ones_like(loss) return loss, mask def _is_not_done_broadcast(lengths, i, tensor): is_not_done = (lengths > i + 1) * 1.0 while len(is_not_done.shape) < len(tensor.shape): # pytype: disable=attribute-error # numpy-scalars is_not_done = jnp.expand_dims(is_not_done, -1) return is_not_done
clrs-master
clrs/_src/losses.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `evaluation.py`.""" from absl.testing import absltest from clrs._src import evaluation from clrs._src import probing from clrs._src import specs import jax import jax.numpy as jnp import numpy as np class EvaluationTest(absltest.TestCase): def test_reduce_permutations(self): b = 8 n = 16 pred = jnp.stack([jax.random.permutation(jax.random.PRNGKey(i), n) for i in range(b)]) heads = jax.random.randint(jax.random.PRNGKey(42), (b,), 0, n) perm = probing.DataPoint(name='test', type_=specs.Type.PERMUTATION_POINTER, location=specs.Location.NODE, data=jax.nn.one_hot(pred, n)) mask = probing.DataPoint(name='test_mask', type_=specs.Type.MASK_ONE, location=specs.Location.NODE, data=jax.nn.one_hot(heads, n)) output = evaluation.fuse_perm_and_mask(perm=perm, mask=mask) expected_output = np.array(pred) expected_output[np.arange(b), heads] = heads self.assertEqual(output.name, 'test') self.assertEqual(output.type_, specs.Type.POINTER) self.assertEqual(output.location, specs.Location.NODE) np.testing.assert_allclose(output.data, expected_output) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/evaluation_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """JAX implementation of baseline processor networks.""" import abc from typing import Any, Callable, List, Optional, Tuple import chex import haiku as hk import jax import jax.numpy as jnp import numpy as np _Array = chex.Array _Fn = Callable[..., Any] BIG_NUMBER = 1e6 PROCESSOR_TAG = 'clrs_processor' class Processor(hk.Module): """Processor abstract base class.""" def __init__(self, name: str): if not name.endswith(PROCESSOR_TAG): name = name + '_' + PROCESSOR_TAG super().__init__(name=name) @abc.abstractmethod def __call__( self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **kwargs, ) -> Tuple[_Array, Optional[_Array]]: """Processor inference step. Args: node_fts: Node features. edge_fts: Edge features. graph_fts: Graph features. adj_mat: Graph adjacency matrix. hidden: Hidden features. **kwargs: Extra kwargs. Returns: Output of processor inference step as a 2-tuple of (node, edge) embeddings. The edge embeddings can be None. """ pass @property def inf_bias(self): return False @property def inf_bias_edge(self): return False class GAT(Processor): """Graph Attention Network (Velickovic et al., ICLR 2018).""" def __init__( self, out_size: int, nb_heads: int, activation: Optional[_Fn] = jax.nn.relu, residual: bool = True, use_ln: bool = False, name: str = 'gat_aggr', ): super().__init__(name=name) self.out_size = out_size self.nb_heads = nb_heads if out_size % nb_heads != 0: raise ValueError('The number of attention heads must divide the width!') self.head_size = out_size // nb_heads self.activation = activation self.residual = residual self.use_ln = use_ln def __call__( # pytype: disable=signature-mismatch # numpy-scalars self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs, ) -> _Array: """GAT inference step.""" b, n, _ = node_fts.shape assert edge_fts.shape[:-1] == (b, n, n) assert graph_fts.shape[:-1] == (b,) assert adj_mat.shape == (b, n, n) z = jnp.concatenate([node_fts, hidden], axis=-1) m = hk.Linear(self.out_size) skip = hk.Linear(self.out_size) bias_mat = (adj_mat - 1.0) * 1e9 bias_mat = jnp.tile(bias_mat[..., None], (1, 1, 1, self.nb_heads)) # [B, N, N, H] bias_mat = jnp.transpose(bias_mat, (0, 3, 1, 2)) # [B, H, N, N] a_1 = hk.Linear(self.nb_heads) a_2 = hk.Linear(self.nb_heads) a_e = hk.Linear(self.nb_heads) a_g = hk.Linear(self.nb_heads) values = m(z) # [B, N, H*F] values = jnp.reshape( values, values.shape[:-1] + (self.nb_heads, self.head_size)) # [B, N, H, F] values = jnp.transpose(values, (0, 2, 1, 3)) # [B, H, N, F] att_1 = jnp.expand_dims(a_1(z), axis=-1) att_2 = jnp.expand_dims(a_2(z), axis=-1) att_e = a_e(edge_fts) att_g = jnp.expand_dims(a_g(graph_fts), axis=-1) logits = ( jnp.transpose(att_1, (0, 2, 1, 3)) + # + [B, H, N, 1] jnp.transpose(att_2, (0, 2, 3, 1)) + # + [B, H, 1, N] jnp.transpose(att_e, (0, 3, 1, 2)) + # + [B, H, N, N] jnp.expand_dims(att_g, axis=-1) # + [B, H, 1, 1] ) # = [B, H, N, N] coefs = jax.nn.softmax(jax.nn.leaky_relu(logits) + bias_mat, axis=-1) ret = jnp.matmul(coefs, values) # [B, H, N, F] ret = jnp.transpose(ret, (0, 2, 1, 3)) # [B, N, H, F] ret = jnp.reshape(ret, ret.shape[:-2] + (self.out_size,)) # [B, N, H*F] if self.residual: ret += skip(z) if self.activation is not None: ret = self.activation(ret) if self.use_ln: ln = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True) ret = ln(ret) return ret, None # pytype: disable=bad-return-type # numpy-scalars class GATFull(GAT): """Graph Attention Network with full adjacency matrix.""" def __call__(self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs) -> _Array: adj_mat = jnp.ones_like(adj_mat) return super().__call__(node_fts, edge_fts, graph_fts, adj_mat, hidden) class GATv2(Processor): """Graph Attention Network v2 (Brody et al., ICLR 2022).""" def __init__( self, out_size: int, nb_heads: int, mid_size: Optional[int] = None, activation: Optional[_Fn] = jax.nn.relu, residual: bool = True, use_ln: bool = False, name: str = 'gatv2_aggr', ): super().__init__(name=name) if mid_size is None: self.mid_size = out_size else: self.mid_size = mid_size self.out_size = out_size self.nb_heads = nb_heads if out_size % nb_heads != 0: raise ValueError('The number of attention heads must divide the width!') self.head_size = out_size // nb_heads if self.mid_size % nb_heads != 0: raise ValueError('The number of attention heads must divide the message!') self.mid_head_size = self.mid_size // nb_heads self.activation = activation self.residual = residual self.use_ln = use_ln def __call__( # pytype: disable=signature-mismatch # numpy-scalars self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs, ) -> _Array: """GATv2 inference step.""" b, n, _ = node_fts.shape assert edge_fts.shape[:-1] == (b, n, n) assert graph_fts.shape[:-1] == (b,) assert adj_mat.shape == (b, n, n) z = jnp.concatenate([node_fts, hidden], axis=-1) m = hk.Linear(self.out_size) skip = hk.Linear(self.out_size) bias_mat = (adj_mat - 1.0) * 1e9 bias_mat = jnp.tile(bias_mat[..., None], (1, 1, 1, self.nb_heads)) # [B, N, N, H] bias_mat = jnp.transpose(bias_mat, (0, 3, 1, 2)) # [B, H, N, N] w_1 = hk.Linear(self.mid_size) w_2 = hk.Linear(self.mid_size) w_e = hk.Linear(self.mid_size) w_g = hk.Linear(self.mid_size) a_heads = [] for _ in range(self.nb_heads): a_heads.append(hk.Linear(1)) values = m(z) # [B, N, H*F] values = jnp.reshape( values, values.shape[:-1] + (self.nb_heads, self.head_size)) # [B, N, H, F] values = jnp.transpose(values, (0, 2, 1, 3)) # [B, H, N, F] pre_att_1 = w_1(z) pre_att_2 = w_2(z) pre_att_e = w_e(edge_fts) pre_att_g = w_g(graph_fts) pre_att = ( jnp.expand_dims(pre_att_1, axis=1) + # + [B, 1, N, H*F] jnp.expand_dims(pre_att_2, axis=2) + # + [B, N, 1, H*F] pre_att_e + # + [B, N, N, H*F] jnp.expand_dims(pre_att_g, axis=(1, 2)) # + [B, 1, 1, H*F] ) # = [B, N, N, H*F] pre_att = jnp.reshape( pre_att, pre_att.shape[:-1] + (self.nb_heads, self.mid_head_size) ) # [B, N, N, H, F] pre_att = jnp.transpose(pre_att, (0, 3, 1, 2, 4)) # [B, H, N, N, F] # This part is not very efficient, but we agree to keep it this way to # enhance readability, assuming `nb_heads` will not be large. logit_heads = [] for head in range(self.nb_heads): logit_heads.append( jnp.squeeze( a_heads[head](jax.nn.leaky_relu(pre_att[:, head])), axis=-1) ) # [B, N, N] logits = jnp.stack(logit_heads, axis=1) # [B, H, N, N] coefs = jax.nn.softmax(logits + bias_mat, axis=-1) ret = jnp.matmul(coefs, values) # [B, H, N, F] ret = jnp.transpose(ret, (0, 2, 1, 3)) # [B, N, H, F] ret = jnp.reshape(ret, ret.shape[:-2] + (self.out_size,)) # [B, N, H*F] if self.residual: ret += skip(z) if self.activation is not None: ret = self.activation(ret) if self.use_ln: ln = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True) ret = ln(ret) return ret, None # pytype: disable=bad-return-type # numpy-scalars class GATv2Full(GATv2): """Graph Attention Network v2 with full adjacency matrix.""" def __call__(self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs) -> _Array: adj_mat = jnp.ones_like(adj_mat) return super().__call__(node_fts, edge_fts, graph_fts, adj_mat, hidden) def get_triplet_msgs(z, edge_fts, graph_fts, nb_triplet_fts): """Triplet messages, as done by Dudzik and Velickovic (2022).""" t_1 = hk.Linear(nb_triplet_fts) t_2 = hk.Linear(nb_triplet_fts) t_3 = hk.Linear(nb_triplet_fts) t_e_1 = hk.Linear(nb_triplet_fts) t_e_2 = hk.Linear(nb_triplet_fts) t_e_3 = hk.Linear(nb_triplet_fts) t_g = hk.Linear(nb_triplet_fts) tri_1 = t_1(z) tri_2 = t_2(z) tri_3 = t_3(z) tri_e_1 = t_e_1(edge_fts) tri_e_2 = t_e_2(edge_fts) tri_e_3 = t_e_3(edge_fts) tri_g = t_g(graph_fts) return ( jnp.expand_dims(tri_1, axis=(2, 3)) + # (B, N, 1, 1, H) jnp.expand_dims(tri_2, axis=(1, 3)) + # + (B, 1, N, 1, H) jnp.expand_dims(tri_3, axis=(1, 2)) + # + (B, 1, 1, N, H) jnp.expand_dims(tri_e_1, axis=3) + # + (B, N, N, 1, H) jnp.expand_dims(tri_e_2, axis=2) + # + (B, N, 1, N, H) jnp.expand_dims(tri_e_3, axis=1) + # + (B, 1, N, N, H) jnp.expand_dims(tri_g, axis=(1, 2, 3)) # + (B, 1, 1, 1, H) ) # = (B, N, N, N, H) class PGN(Processor): """Pointer Graph Networks (Veličković et al., NeurIPS 2020).""" def __init__( self, out_size: int, mid_size: Optional[int] = None, mid_act: Optional[_Fn] = None, activation: Optional[_Fn] = jax.nn.relu, reduction: _Fn = jnp.max, msgs_mlp_sizes: Optional[List[int]] = None, use_ln: bool = False, use_triplets: bool = False, nb_triplet_fts: int = 8, gated: bool = False, name: str = 'mpnn_aggr', ): super().__init__(name=name) if mid_size is None: self.mid_size = out_size else: self.mid_size = mid_size self.out_size = out_size self.mid_act = mid_act self.activation = activation self.reduction = reduction self._msgs_mlp_sizes = msgs_mlp_sizes self.use_ln = use_ln self.use_triplets = use_triplets self.nb_triplet_fts = nb_triplet_fts self.gated = gated def __call__( # pytype: disable=signature-mismatch # numpy-scalars self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs, ) -> _Array: """MPNN inference step.""" b, n, _ = node_fts.shape assert edge_fts.shape[:-1] == (b, n, n) assert graph_fts.shape[:-1] == (b,) assert adj_mat.shape == (b, n, n) z = jnp.concatenate([node_fts, hidden], axis=-1) m_1 = hk.Linear(self.mid_size) m_2 = hk.Linear(self.mid_size) m_e = hk.Linear(self.mid_size) m_g = hk.Linear(self.mid_size) o1 = hk.Linear(self.out_size) o2 = hk.Linear(self.out_size) msg_1 = m_1(z) msg_2 = m_2(z) msg_e = m_e(edge_fts) msg_g = m_g(graph_fts) tri_msgs = None if self.use_triplets: # Triplet messages, as done by Dudzik and Velickovic (2022) triplets = get_triplet_msgs(z, edge_fts, graph_fts, self.nb_triplet_fts) o3 = hk.Linear(self.out_size) tri_msgs = o3(jnp.max(triplets, axis=1)) # (B, N, N, H) if self.activation is not None: tri_msgs = self.activation(tri_msgs) msgs = ( jnp.expand_dims(msg_1, axis=1) + jnp.expand_dims(msg_2, axis=2) + msg_e + jnp.expand_dims(msg_g, axis=(1, 2))) if self._msgs_mlp_sizes is not None: msgs = hk.nets.MLP(self._msgs_mlp_sizes)(jax.nn.relu(msgs)) if self.mid_act is not None: msgs = self.mid_act(msgs) if self.reduction == jnp.mean: msgs = jnp.sum(msgs * jnp.expand_dims(adj_mat, -1), axis=1) msgs = msgs / jnp.sum(adj_mat, axis=-1, keepdims=True) elif self.reduction == jnp.max: maxarg = jnp.where(jnp.expand_dims(adj_mat, -1), msgs, -BIG_NUMBER) msgs = jnp.max(maxarg, axis=1) else: msgs = self.reduction(msgs * jnp.expand_dims(adj_mat, -1), axis=1) h_1 = o1(z) h_2 = o2(msgs) ret = h_1 + h_2 if self.activation is not None: ret = self.activation(ret) if self.use_ln: ln = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True) ret = ln(ret) if self.gated: gate1 = hk.Linear(self.out_size) gate2 = hk.Linear(self.out_size) gate3 = hk.Linear(self.out_size, b_init=hk.initializers.Constant(-3)) gate = jax.nn.sigmoid(gate3(jax.nn.relu(gate1(z) + gate2(msgs)))) ret = ret * gate + hidden * (1-gate) return ret, tri_msgs # pytype: disable=bad-return-type # numpy-scalars class DeepSets(PGN): """Deep Sets (Zaheer et al., NeurIPS 2017).""" def __call__(self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs) -> _Array: assert adj_mat.ndim == 3 adj_mat = jnp.ones_like(adj_mat) * jnp.eye(adj_mat.shape[-1]) return super().__call__(node_fts, edge_fts, graph_fts, adj_mat, hidden) class MPNN(PGN): """Message-Passing Neural Network (Gilmer et al., ICML 2017).""" def __call__(self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs) -> _Array: adj_mat = jnp.ones_like(adj_mat) return super().__call__(node_fts, edge_fts, graph_fts, adj_mat, hidden) class PGNMask(PGN): """Masked Pointer Graph Networks (Veličković et al., NeurIPS 2020).""" @property def inf_bias(self): return True @property def inf_bias_edge(self): return True class MemNetMasked(Processor): """Implementation of End-to-End Memory Networks. Inspired by the description in https://arxiv.org/abs/1503.08895. """ def __init__( self, vocab_size: int, sentence_size: int, linear_output_size: int, embedding_size: int = 16, memory_size: Optional[int] = 128, num_hops: int = 1, nonlin: Callable[[Any], Any] = jax.nn.relu, apply_embeddings: bool = True, init_func: hk.initializers.Initializer = jnp.zeros, use_ln: bool = False, name: str = 'memnet') -> None: """Constructor. Args: vocab_size: the number of words in the dictionary (each story, query and answer come contain symbols coming from this dictionary). sentence_size: the dimensionality of each memory. linear_output_size: the dimensionality of the output of the last layer of the model. embedding_size: the dimensionality of the latent space to where all memories are projected. memory_size: the number of memories provided. num_hops: the number of layers in the model. nonlin: non-linear transformation applied at the end of each layer. apply_embeddings: flag whether to aply embeddings. init_func: initialization function for the biases. use_ln: whether to use layer normalisation in the model. name: the name of the model. """ super().__init__(name=name) self._vocab_size = vocab_size self._embedding_size = embedding_size self._sentence_size = sentence_size self._memory_size = memory_size self._linear_output_size = linear_output_size self._num_hops = num_hops self._nonlin = nonlin self._apply_embeddings = apply_embeddings self._init_func = init_func self._use_ln = use_ln # Encoding part: i.e. "I" of the paper. self._encodings = _position_encoding(sentence_size, embedding_size) def __call__( # pytype: disable=signature-mismatch # numpy-scalars self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs, ) -> _Array: """MemNet inference step.""" del hidden node_and_graph_fts = jnp.concatenate([node_fts, graph_fts[:, None]], axis=1) edge_fts_padded = jnp.pad(edge_fts * adj_mat[..., None], ((0, 0), (0, 1), (0, 1), (0, 0))) nxt_hidden = jax.vmap(self._apply, (1), 1)(node_and_graph_fts, edge_fts_padded) # Broadcast hidden state corresponding to graph features across the nodes. nxt_hidden = nxt_hidden[:, :-1] + nxt_hidden[:, -1:] return nxt_hidden, None # pytype: disable=bad-return-type # numpy-scalars def _apply(self, queries: _Array, stories: _Array) -> _Array: """Apply Memory Network to the queries and stories. Args: queries: Tensor of shape [batch_size, sentence_size]. stories: Tensor of shape [batch_size, memory_size, sentence_size]. Returns: Tensor of shape [batch_size, vocab_size]. """ if self._apply_embeddings: query_biases = hk.get_parameter( 'query_biases', shape=[self._vocab_size - 1, self._embedding_size], init=self._init_func) stories_biases = hk.get_parameter( 'stories_biases', shape=[self._vocab_size - 1, self._embedding_size], init=self._init_func) memory_biases = hk.get_parameter( 'memory_contents', shape=[self._memory_size, self._embedding_size], init=self._init_func) output_biases = hk.get_parameter( 'output_biases', shape=[self._vocab_size - 1, self._embedding_size], init=self._init_func) nil_word_slot = jnp.zeros([1, self._embedding_size]) # This is "A" in the paper. if self._apply_embeddings: stories_biases = jnp.concatenate([stories_biases, nil_word_slot], axis=0) memory_embeddings = jnp.take( stories_biases, stories.reshape([-1]).astype(jnp.int32), axis=0).reshape(list(stories.shape) + [self._embedding_size]) memory_embeddings = jnp.pad( memory_embeddings, ((0, 0), (0, self._memory_size - jnp.shape(memory_embeddings)[1]), (0, 0), (0, 0))) memory = jnp.sum(memory_embeddings * self._encodings, 2) + memory_biases else: memory = stories # This is "B" in the paper. Also, when there are no queries (only # sentences), then there these lines are substituted by # query_embeddings = 0.1. if self._apply_embeddings: query_biases = jnp.concatenate([query_biases, nil_word_slot], axis=0) query_embeddings = jnp.take( query_biases, queries.reshape([-1]).astype(jnp.int32), axis=0).reshape(list(queries.shape) + [self._embedding_size]) # This is "u" in the paper. query_input_embedding = jnp.sum(query_embeddings * self._encodings, 1) else: query_input_embedding = queries # This is "C" in the paper. if self._apply_embeddings: output_biases = jnp.concatenate([output_biases, nil_word_slot], axis=0) output_embeddings = jnp.take( output_biases, stories.reshape([-1]).astype(jnp.int32), axis=0).reshape(list(stories.shape) + [self._embedding_size]) output_embeddings = jnp.pad( output_embeddings, ((0, 0), (0, self._memory_size - jnp.shape(output_embeddings)[1]), (0, 0), (0, 0))) output = jnp.sum(output_embeddings * self._encodings, 2) else: output = stories intermediate_linear = hk.Linear(self._embedding_size, with_bias=False) # Output_linear is "H". output_linear = hk.Linear(self._linear_output_size, with_bias=False) for hop_number in range(self._num_hops): query_input_embedding_transposed = jnp.transpose( jnp.expand_dims(query_input_embedding, -1), [0, 2, 1]) # Calculate probabilities. probs = jax.nn.softmax( jnp.sum(memory * query_input_embedding_transposed, 2)) # Calculate output of the layer by multiplying by C. transposed_probs = jnp.transpose(jnp.expand_dims(probs, -1), [0, 2, 1]) transposed_output_embeddings = jnp.transpose(output, [0, 2, 1]) # This is "o" in the paper. layer_output = jnp.sum(transposed_output_embeddings * transposed_probs, 2) # Finally the answer if hop_number == self._num_hops - 1: # Please note that in the TF version we apply the final linear layer # in all hops and this results in shape mismatches. output_layer = output_linear(query_input_embedding + layer_output) else: output_layer = intermediate_linear(query_input_embedding + layer_output) query_input_embedding = output_layer if self._nonlin: output_layer = self._nonlin(output_layer) # This linear here is "W". ret = hk.Linear(self._vocab_size, with_bias=False)(output_layer) if self._use_ln: ln = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True) ret = ln(ret) return ret class MemNetFull(MemNetMasked): """Memory Networks with full adjacency matrix.""" def __call__(self, node_fts: _Array, edge_fts: _Array, graph_fts: _Array, adj_mat: _Array, hidden: _Array, **unused_kwargs) -> _Array: adj_mat = jnp.ones_like(adj_mat) return super().__call__(node_fts, edge_fts, graph_fts, adj_mat, hidden) ProcessorFactory = Callable[[int], Processor] def get_processor_factory(kind: str, use_ln: bool, nb_triplet_fts: int, nb_heads: Optional[int] = None) -> ProcessorFactory: """Returns a processor factory. Args: kind: One of the available types of processor. use_ln: Whether the processor passes the output through a layernorm layer. nb_triplet_fts: How many triplet features to compute. nb_heads: Number of attention heads for GAT processors. Returns: A callable that takes an `out_size` parameter (equal to the hidden dimension of the network) and returns a processor instance. """ def _factory(out_size: int): if kind == 'deepsets': processor = DeepSets( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=0 ) elif kind == 'gat': processor = GAT( out_size=out_size, nb_heads=nb_heads, use_ln=use_ln, ) elif kind == 'gat_full': processor = GATFull( out_size=out_size, nb_heads=nb_heads, use_ln=use_ln ) elif kind == 'gatv2': processor = GATv2( out_size=out_size, nb_heads=nb_heads, use_ln=use_ln ) elif kind == 'gatv2_full': processor = GATv2Full( out_size=out_size, nb_heads=nb_heads, use_ln=use_ln ) elif kind == 'memnet_full': processor = MemNetFull( vocab_size=out_size, sentence_size=out_size, linear_output_size=out_size, ) elif kind == 'memnet_masked': processor = MemNetMasked( vocab_size=out_size, sentence_size=out_size, linear_output_size=out_size, ) elif kind == 'mpnn': processor = MPNN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=0, ) elif kind == 'pgn': processor = PGN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=0, ) elif kind == 'pgn_mask': processor = PGNMask( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=0, ) elif kind == 'triplet_mpnn': processor = MPNN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, ) elif kind == 'triplet_pgn': processor = PGN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, ) elif kind == 'triplet_pgn_mask': processor = PGNMask( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, ) elif kind == 'gpgn': processor = PGN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=nb_triplet_fts, gated=True, ) elif kind == 'gpgn_mask': processor = PGNMask( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=nb_triplet_fts, gated=True, ) elif kind == 'gmpnn': processor = MPNN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=False, nb_triplet_fts=nb_triplet_fts, gated=True, ) elif kind == 'triplet_gpgn': processor = PGN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, gated=True, ) elif kind == 'triplet_gpgn_mask': processor = PGNMask( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, gated=True, ) elif kind == 'triplet_gmpnn': processor = MPNN( out_size=out_size, msgs_mlp_sizes=[out_size, out_size], use_ln=use_ln, use_triplets=True, nb_triplet_fts=nb_triplet_fts, gated=True, ) else: raise ValueError('Unexpected processor kind ' + kind) return processor return _factory def _position_encoding(sentence_size: int, embedding_size: int) -> np.ndarray: """Position Encoding described in section 4.1 [1].""" encoding = np.ones((embedding_size, sentence_size), dtype=np.float32) ls = sentence_size + 1 le = embedding_size + 1 for i in range(1, le): for j in range(1, ls): encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2) encoding = 1 + 4 * encoding / embedding_size / sentence_size return np.transpose(encoding)
clrs-master
clrs/_src/processors.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `samplers.py`.""" from absl.testing import absltest from absl.testing import parameterized import chex from clrs._src import probing from clrs._src import samplers from clrs._src import specs import jax import numpy as np class SamplersTest(parameterized.TestCase): @parameterized.parameters(*specs.CLRS_30_ALGS) def test_sampler_determinism(self, name): num_samples = 3 num_nodes = 10 sampler, _ = samplers.build_sampler(name, num_samples, num_nodes) np.random.seed(47) # Set seed feedback = sampler.next() expected = feedback.outputs[0].data.copy() np.random.seed(48) # Set a different seed feedback = sampler.next() actual = feedback.outputs[0].data.copy() # Validate that datasets are the same. np.testing.assert_array_equal(expected, actual) @parameterized.parameters(*specs.CLRS_30_ALGS) def test_sampler_batch_determinism(self, name): num_samples = 10 batch_size = 5 num_nodes = 10 seed = 0 sampler_1, _ = samplers.build_sampler( name, num_samples, num_nodes, seed=seed) sampler_2, _ = samplers.build_sampler( name, num_samples, num_nodes, seed=seed) feedback_1 = sampler_1.next(batch_size) feedback_2 = sampler_2.next(batch_size) # Validate that datasets are the same. jax.tree_util.tree_map(np.testing.assert_array_equal, feedback_1, feedback_2) def test_end_to_end(self): num_samples = 7 num_nodes = 3 sampler, _ = samplers.build_sampler("bfs", num_samples, num_nodes) feedback = sampler.next() inputs = feedback.features.inputs self.assertLen(inputs, 4) self.assertEqual(inputs[0].name, "pos") self.assertEqual(inputs[0].data.shape, (num_samples, num_nodes)) outputs = feedback.outputs self.assertLen(outputs, 1) self.assertEqual(outputs[0].name, "pi") self.assertEqual(outputs[0].data.shape, (num_samples, num_nodes)) def test_batch_size(self): num_samples = 7 num_nodes = 3 sampler, _ = samplers.build_sampler("bfs", num_samples, num_nodes) # Full-batch. feedback = sampler.next() for dp in feedback.features.inputs: # [B, ...] self.assertEqual(dp.data.shape[0], num_samples) for dp in feedback.outputs: # [B, ...] self.assertEqual(dp.data.shape[0], num_samples) for dp in feedback.features.hints: # [T, B, ...] self.assertEqual(dp.data.shape[1], num_samples) self.assertLen(feedback.features.lengths, num_samples) # Specified batch. batch_size = 5 feedback = sampler.next(batch_size) for dp in feedback.features.inputs: # [B, ...] self.assertEqual(dp.data.shape[0], batch_size) for dp in feedback.outputs: # [B, ...] self.assertEqual(dp.data.shape[0], batch_size) for dp in feedback.features.hints: # [T, B, ...] self.assertEqual(dp.data.shape[1], batch_size) self.assertLen(feedback.features.lengths, batch_size) def test_batch_io(self): sample = [ probing.DataPoint( name="x", location=specs.Location.NODE, type_=specs.Type.SCALAR, data=np.zeros([1, 3]), ), probing.DataPoint( name="y", location=specs.Location.EDGE, type_=specs.Type.MASK, data=np.zeros([1, 3, 3]), ), ] trajectory = [sample.copy(), sample.copy(), sample.copy(), sample.copy()] batched = samplers._batch_io(trajectory) np.testing.assert_array_equal(batched[0].data, np.zeros([4, 3])) np.testing.assert_array_equal(batched[1].data, np.zeros([4, 3, 3])) def test_batch_hint(self): sample0 = [ probing.DataPoint( name="x", location=specs.Location.NODE, type_=specs.Type.MASK, data=np.zeros([2, 1, 3]), ), probing.DataPoint( name="y", location=specs.Location.NODE, type_=specs.Type.POINTER, data=np.zeros([2, 1, 3]), ), ] sample1 = [ probing.DataPoint( name="x", location=specs.Location.NODE, type_=specs.Type.MASK, data=np.zeros([1, 1, 3]), ), probing.DataPoint( name="y", location=specs.Location.NODE, type_=specs.Type.POINTER, data=np.zeros([1, 1, 3]), ), ] trajectory = [sample0, sample1] batched, lengths = samplers._batch_hints(trajectory, 0) np.testing.assert_array_equal(batched[0].data, np.zeros([2, 2, 3])) np.testing.assert_array_equal(batched[1].data, np.zeros([2, 2, 3])) np.testing.assert_array_equal(lengths, np.array([2, 1])) batched, lengths = samplers._batch_hints(trajectory, 5) np.testing.assert_array_equal(batched[0].data, np.zeros([5, 2, 3])) np.testing.assert_array_equal(batched[1].data, np.zeros([5, 2, 3])) np.testing.assert_array_equal(lengths, np.array([2, 1])) def test_padding(self): lens = np.random.choice(10, (10,), replace=True) + 1 trajectory = [] for len_ in lens: trajectory.append([ probing.DataPoint( name="x", location=specs.Location.NODE, type_=specs.Type.MASK, data=np.ones([len_, 1, 3]), ) ]) batched, lengths = samplers._batch_hints(trajectory, 0) np.testing.assert_array_equal(lengths, lens) for i in range(len(lens)): ones = batched[0].data[:lens[i], i, :] zeros = batched[0].data[lens[i]:, i, :] np.testing.assert_array_equal(ones, np.ones_like(ones)) np.testing.assert_array_equal(zeros, np.zeros_like(zeros)) class ProcessRandomPosTest(parameterized.TestCase): @parameterized.parameters(["insertion_sort", "naive_string_matcher"]) def test_random_pos(self, algorithm_name): batch_size, length = 12, 10 def _make_sampler(): sampler, _ = samplers.build_sampler( algorithm_name, seed=0, num_samples=100, length=length, ) while True: yield sampler.next(batch_size) sampler_1 = _make_sampler() sampler_2 = _make_sampler() sampler_2 = samplers.process_random_pos(sampler_2, np.random.RandomState(0)) batch_without_rand_pos = next(sampler_1) batch_with_rand_pos = next(sampler_2) pos_idx = [x.name for x in batch_without_rand_pos.features.inputs].index( "pos") fixed_pos = batch_without_rand_pos.features.inputs[pos_idx] rand_pos = batch_with_rand_pos.features.inputs[pos_idx] self.assertEqual(rand_pos.location, specs.Location.NODE) self.assertEqual(rand_pos.type_, specs.Type.SCALAR) self.assertEqual(rand_pos.data.shape, (batch_size, length)) self.assertEqual(rand_pos.data.shape, fixed_pos.data.shape) self.assertEqual(rand_pos.type_, fixed_pos.type_) self.assertEqual(rand_pos.location, fixed_pos.location) assert (rand_pos.data.std(axis=0) > 1e-3).all() assert (fixed_pos.data.std(axis=0) < 1e-9).all() if "string" in algorithm_name: expected = np.concatenate([np.arange(4*length//5)/(4*length//5), np.arange(length//5)/(length//5)]) else: expected = np.arange(length)/length np.testing.assert_array_equal( fixed_pos.data, np.broadcast_to(expected, (batch_size, length))) batch_with_rand_pos.features.inputs[pos_idx] = fixed_pos chex.assert_trees_all_equal(batch_with_rand_pos, batch_without_rand_pos) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/samplers_test.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `losses.py`.""" from typing import Generator from absl.testing import absltest from absl.testing import parameterized from clrs._src import dataset from clrs._src import losses from clrs._src import probing from clrs._src import samplers from clrs._src import specs import jax import jax.numpy as jnp import numpy as np _Array = np.ndarray _Location = specs.Location def _make_sampler(algo: str, nb_nodes: int) -> samplers.Sampler: sampler, _ = samplers.build_sampler( algo, seed=samplers.CLRS30['val']['seed'], num_samples=samplers.CLRS30['val']['num_samples'], length=nb_nodes, ) return sampler def _make_iterable_sampler( algo: str, batch_size: int, nb_nodes: int) -> Generator[samplers.Feedback, None, None]: sampler = _make_sampler(algo, nb_nodes) while True: yield sampler.next(batch_size) def _as_pred_data(x, nb_nodes, seed, batch_axis): """Fake a prediction from a data point.""" # Permute along batch axis to make the prediction different. key = jax.random.PRNGKey(seed) data = jax.random.permutation(key, x.data, axis=batch_axis) # Extend to one-hot for pointer types. if x.type_ == specs.Type.POINTER: return jax.nn.one_hot(data, nb_nodes) return data def _mask_datapoint(x, seed, t_axis=None): """Add some masking to data.""" key = jax.random.PRNGKey(seed) data = x.data if x.type_ == specs.Type.MASK: # mask some data at random mask_shape = list(data.shape) if t_axis is not None: mask_shape[t_axis] = 1 mask = jax.random.uniform(key, tuple(mask_shape)) < 0.2 data = jnp.where(mask, specs.OutputClass.MASKED, data) elif x.type_ in [specs.Type.CATEGORICAL, specs.Type.MASK_ONE]: # mask some data at random (all categories together) mask_shape = list(data.shape)[:-1] if t_axis is not None: mask_shape[t_axis] = 1 mask = jax.random.uniform(key, tuple(mask_shape)) < 0.2 data = jnp.where(mask[..., None], specs.OutputClass.MASKED, data) return probing.DataPoint(name=x.name, location=x.location, type_=x.type_, data=data) def _rand_diff(seed, shape): return 2.0 * jax.random.uniform(jax.random.PRNGKey(seed), shape) - 1.0 def _rand_mask(seed, shape, p=0.5): return (jax.random.uniform(jax.random.PRNGKey(seed), shape) > p).astype(float) def invert(d): """Dict of lists -> list of dicts.""" if d: return [dict(zip(d, i)) for i in zip(*d.values())] def _create_data(algo, nb_nodes): batch_size = 8 ds = _make_iterable_sampler(algo, batch_size, nb_nodes) full_sample = next(ds) chunk_length = full_sample.features.lengths[0].astype(int) chunked_ds = dataset.chunkify( _make_iterable_sampler(algo, batch_size, nb_nodes), chunk_length) chunk_sample = next(chunked_ds) return full_sample, chunk_sample class FullVsChunkLossesTest(parameterized.TestCase): """Test that the full and chunked versions of the losses match.""" # Test two algorithms with fixed-length, covering all data types @parameterized.parameters('dfs', 'floyd_warshall') def test_output_loss(self, algo): nb_nodes = 16 full_sample, chunk_sample = _create_data(algo, nb_nodes) # Calculate output loss. for truth_full, truth_chunked in zip(full_sample.outputs, chunk_sample.outputs): chunk_output_loss = losses.output_loss_chunked( truth=_mask_datapoint(truth_chunked, seed=0), pred=_as_pred_data(truth_chunked, nb_nodes, 0, 1), is_last=chunk_sample.features.is_last, nb_nodes=nb_nodes, ) full_output_loss = losses.output_loss( truth=_mask_datapoint(truth_full, seed=0), pred=_as_pred_data(truth_full, nb_nodes, 0, 0), nb_nodes=nb_nodes, ) np.testing.assert_allclose(chunk_output_loss, full_output_loss, rtol=1e-4) @parameterized.parameters('dfs', 'floyd_warshall') def test_hint_loss(self, algo): nb_nodes = 16 full_sample, chunk_sample = _create_data(algo, nb_nodes) for truth_full, truth_chunked in zip(full_sample.features.hints, chunk_sample.features.hints): np.testing.assert_array_equal(truth_full.data, truth_chunked.data) pred = _as_pred_data(truth_chunked, nb_nodes, 0, 1) chunk_hint_loss = losses.hint_loss_chunked( truth=_mask_datapoint(truth_chunked, seed=1, t_axis=0), pred=pred, is_first=chunk_sample.features.is_first, nb_nodes=nb_nodes, ) full_preds = pred[1:] full_hint_loss = losses.hint_loss( truth=_mask_datapoint(truth_full, 1, t_axis=0), preds=full_preds, lengths=full_sample.features.lengths, nb_nodes=nb_nodes, ) np.testing.assert_allclose(chunk_hint_loss, full_hint_loss, rtol=1e-4) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/losses_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sampling utilities.""" import abc import collections import inspect import types from typing import Any, Callable, List, Optional, Tuple from absl import logging from clrs._src import algorithms from clrs._src import probing from clrs._src import specs import jax import numpy as np _Array = np.ndarray _DataPoint = probing.DataPoint Trajectory = List[_DataPoint] Trajectories = List[Trajectory] Algorithm = Callable[..., Any] Features = collections.namedtuple('Features', ['inputs', 'hints', 'lengths']) FeaturesChunked = collections.namedtuple( 'Features', ['inputs', 'hints', 'is_first', 'is_last']) Feedback = collections.namedtuple('Feedback', ['features', 'outputs']) # CLRS-30 baseline spec. CLRS30 = types.MappingProxyType({ 'train': { 'num_samples': 1000, 'length': 16, 'seed': 1, }, 'val': { 'num_samples': 32, 'length': 16, 'seed': 2, }, 'test': { 'num_samples': 32, 'length': 64, 'seed': 3, }, }) class Sampler(abc.ABC): """Sampler abstract base class.""" def __init__( self, algorithm: Algorithm, spec: specs.Spec, num_samples: int, *args, seed: Optional[int] = None, **kwargs, ): """Initializes a `Sampler`. Args: algorithm: The algorithm to sample from spec: The algorithm spec. num_samples: Number of algorithm unrolls to sample. If positive, all the samples will be generated in the constructor, and at each call of the `next` method a batch will be randomly selected among them. If -1, samples are generated on the fly with each call to `next`. *args: Algorithm args. seed: RNG seed. **kwargs: Algorithm kwargs. """ # Use `RandomState` to ensure deterministic sampling across Numpy versions. self._rng = np.random.RandomState(seed) self._spec = spec self._num_samples = num_samples self._algorithm = algorithm self._args = args self._kwargs = kwargs if num_samples < 0: logging.warning('Sampling dataset on-the-fly, unlimited samples.') # Just get an initial estimate of max hint length self.max_steps = -1 for _ in range(1000): data = self._sample_data(*args, **kwargs) _, probes = algorithm(*data) _, _, hint = probing.split_stages(probes, spec) for dp in hint: assert dp.data.shape[1] == 1 # batching axis if dp.data.shape[0] > self.max_steps: self.max_steps = dp.data.shape[0] else: logging.info('Creating a dataset with %i samples.', num_samples) (self._inputs, self._outputs, self._hints, self._lengths) = self._make_batch(num_samples, spec, 0, algorithm, *args, **kwargs) def _make_batch(self, num_samples: int, spec: specs.Spec, min_length: int, algorithm: Algorithm, *args, **kwargs): """Generate a batch of data.""" inputs = [] outputs = [] hints = [] for _ in range(num_samples): data = self._sample_data(*args, **kwargs) _, probes = algorithm(*data) inp, outp, hint = probing.split_stages(probes, spec) inputs.append(inp) outputs.append(outp) hints.append(hint) if len(hints) % 1000 == 0: logging.info('%i samples created', len(hints)) # Batch and pad trajectories to max(T). inputs = _batch_io(inputs) outputs = _batch_io(outputs) hints, lengths = _batch_hints(hints, min_length) return inputs, outputs, hints, lengths def next(self, batch_size: Optional[int] = None) -> Feedback: """Subsamples trajectories from the pre-generated dataset. Args: batch_size: Optional batch size. If `None`, returns entire dataset. Returns: Subsampled trajectories. """ if batch_size: if self._num_samples < 0: # generate on the fly inputs, outputs, hints, lengths = self._make_batch( batch_size, self._spec, self.max_steps, self._algorithm, *self._args, **self._kwargs) if hints[0].data.shape[0] > self.max_steps: logging.warning('Increasing hint lengh from %i to %i', self.max_steps, hints[0].data.shape[0]) self.max_steps = hints[0].data.shape[0] else: if batch_size > self._num_samples: raise ValueError( f'Batch size {batch_size} > dataset size {self._num_samples}.') # Returns a fixed-size random batch. indices = self._rng.choice(self._num_samples, (batch_size,), replace=True) inputs = _subsample_data(self._inputs, indices, axis=0) outputs = _subsample_data(self._outputs, indices, axis=0) hints = _subsample_data(self._hints, indices, axis=1) lengths = self._lengths[indices] else: # Returns the full dataset. assert self._num_samples >= 0 inputs = self._inputs hints = self._hints lengths = self._lengths outputs = self._outputs return Feedback(Features(inputs, hints, lengths), outputs) @abc.abstractmethod def _sample_data(self, length: int, *args, **kwargs) -> List[_Array]: pass def _random_sequence(self, length, low=0.0, high=1.0): """Random sequence.""" return self._rng.uniform(low=low, high=high, size=(length,)) def _random_string(self, length, chars=4): """Random string.""" return self._rng.randint(0, high=chars, size=(length,)) def _random_er_graph(self, nb_nodes, p=0.5, directed=False, acyclic=False, weighted=False, low=0.0, high=1.0): """Random Erdos-Renyi graph.""" mat = self._rng.binomial(1, p, size=(nb_nodes, nb_nodes)) if not directed: mat *= np.transpose(mat) elif acyclic: mat = np.triu(mat, k=1) p = self._rng.permutation(nb_nodes) # To allow nontrivial solutions mat = mat[p, :][:, p] if weighted: weights = self._rng.uniform(low=low, high=high, size=(nb_nodes, nb_nodes)) if not directed: weights *= np.transpose(weights) weights = np.sqrt(weights + 1e-3) # Add epsilon to protect underflow mat = mat.astype(float) * weights return mat def _random_community_graph(self, nb_nodes, k=4, p=0.5, eps=0.01, directed=False, acyclic=False, weighted=False, low=0.0, high=1.0): """Random perturbed k-community graph.""" mat = np.zeros((nb_nodes, nb_nodes)) if k > nb_nodes: raise ValueError(f'Cannot generate graph of too many ({k}) communities.') los, his = [], [] lo = 0 for i in range(k): if i == k - 1: hi = nb_nodes else: hi = lo + nb_nodes // k mat[lo:hi, lo:hi] = self._random_er_graph( hi - lo, p=p, directed=directed, acyclic=acyclic, weighted=weighted, low=low, high=high) los.append(lo) his.append(hi) lo = hi toggle = self._random_er_graph(nb_nodes, p=eps, directed=directed, acyclic=acyclic, weighted=weighted, low=low, high=high) # Prohibit closing new cycles for i in range(k): for j in range(i): toggle[los[i]:his[i], los[j]:his[j]] *= 0 mat = np.where(toggle > 0.0, (1.0 - (mat > 0.0)) * toggle, mat) p = self._rng.permutation(nb_nodes) # To allow nontrivial solutions mat = mat[p, :][:, p] return mat def _random_bipartite_graph(self, n, m, p=0.25): """Random bipartite graph-based flow network.""" nb_nodes = n + m + 2 s = 0 t = n + m + 1 mat = np.zeros((nb_nodes, nb_nodes)) mat[s, 1:n+1] = 1.0 # supersource mat[n+1:n+m+1, t] = 1.0 # supersink mat[1:n+1, n+1:n+m+1] = self._rng.binomial(1, p, size=(n, m)) return mat def build_sampler( name: str, num_samples: int, *args, seed: Optional[int] = None, **kwargs, ) -> Tuple[Sampler, specs.Spec]: """Builds a sampler. See `Sampler` documentation.""" if name not in specs.SPECS or name not in SAMPLERS: raise NotImplementedError(f'No implementation of algorithm {name}.') spec = specs.SPECS[name] algorithm = getattr(algorithms, name) sampler_class = SAMPLERS[name] # Ignore kwargs not accepted by the sampler. sampler_args = inspect.signature(sampler_class._sample_data).parameters # pylint:disable=protected-access clean_kwargs = {k: kwargs[k] for k in kwargs if k in sampler_args} if set(clean_kwargs) != set(kwargs): logging.warning('Ignoring kwargs %s when building sampler class %s', set(kwargs).difference(clean_kwargs), sampler_class) sampler = sampler_class(algorithm, spec, num_samples, seed=seed, *args, **clean_kwargs) return sampler, spec class SortingSampler(Sampler): """Sorting sampler. Generates a random sequence of U[0, 1].""" def _sample_data( self, length: int, low: float = 0., high: float = 1., ): arr = self._random_sequence(length=length, low=low, high=high) return [arr] class SearchSampler(Sampler): """Search sampler. Generates a random sequence and target (of U[0, 1]).""" def _sample_data( self, length: int, low: float = 0., high: float = 1., ): arr = self._random_sequence(length=length, low=low, high=high) arr.sort() x = self._rng.uniform(low=low, high=high) return [x, arr] class MaxSubarraySampler(Sampler): """Maximum subarray sampler. Generates a random sequence of U[-1, 1].""" def _sample_data( self, length: int, low: float = -1., high: float = 1., ): arr = self._random_sequence(length=length, low=low, high=high) return [arr] class LCSSampler(Sampler): """Longest Common Subsequence sampler. Generates two random ATCG strings.""" def _sample_data( self, length: int, length_2: Optional[int] = None, chars: int = 4, ): if length_2 is None: # Assume provided length is total length. length_2 = length // 2 length -= length_2 a = self._random_string(length=length, chars=chars) b = self._random_string(length=length_2, chars=chars) return [a, b] class OptimalBSTSampler(Sampler): """Optimal BST sampler. Samples array of probabilities, splits it into two.""" def _sample_data( self, length: int, ): tot_length = length + (length + 1) arr = self._random_sequence(length=tot_length, low=0.0, high=1.0) arr /= np.sum(arr) p = arr[:length] q = arr[length:] return [p, q] class ActivitySampler(Sampler): """Activity sampler. Samples start and finish times from U[0, 1].""" def _sample_data( self, length: int, low: float = 0., high: float = 1., ): arr_1 = self._random_sequence(length=length, low=low, high=high) arr_2 = self._random_sequence(length=length, low=low, high=high) return [np.minimum(arr_1, arr_2), np.maximum(arr_1, arr_2)] class TaskSampler(Sampler): """Task sampler. Samples deadlines (integers) and values (U[0, 1]).""" def _sample_data( self, length: int, max_deadline: Optional[int] = None, low: float = 0., high: float = 1., ): if max_deadline is None: max_deadline = length d = self._random_string(length=length, chars=max_deadline) + 1 w = self._random_sequence(length=length, low=low, high=high) return [d, w] class DfsSampler(Sampler): """DFS sampler.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=True, acyclic=False, weighted=False) return [graph] class BfsSampler(Sampler): """BFS sampler.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=False, acyclic=False, weighted=False) source_node = self._rng.choice(length) return [graph, source_node] class TopoSampler(Sampler): """Topological Sorting sampler.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=True, acyclic=True, weighted=False) return [graph] class ArticulationSampler(Sampler): """Articulation Point sampler.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.2,), ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=False, acyclic=False, weighted=False) return [graph] class MSTSampler(Sampler): """MST sampler for Kruskal's algorithm.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.2,), # lower p to account for class imbalance low: float = 0., high: float = 1., ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=False, acyclic=False, weighted=True, low=low, high=high) return [graph] class BellmanFordSampler(Sampler): """Bellman-Ford sampler.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), low: float = 0., high: float = 1., ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=False, acyclic=False, weighted=True, low=low, high=high) source_node = self._rng.choice(length) return [graph, source_node] class DAGPathSampler(Sampler): """Sampler for DAG shortest paths.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), low: float = 0., high: float = 1., ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=True, acyclic=True, weighted=True, low=low, high=high) source_node = self._rng.choice(length) return [graph, source_node] class FloydWarshallSampler(Sampler): """Sampler for all-pairs shortest paths.""" def _sample_data( self, length: int, p: Tuple[float, ...] = (0.5,), low: float = 0., high: float = 1., ): graph = self._random_er_graph( nb_nodes=length, p=self._rng.choice(p), directed=False, acyclic=False, weighted=True, low=low, high=high) return [graph] class SccSampler(Sampler): """Sampler for strongly connected component (SCC) tasks.""" def _sample_data( self, length: int, k: int = 4, p: Tuple[float, ...] = (0.5,), eps: float = 0.01, ): graph = self._random_community_graph( nb_nodes=length, k=k, p=self._rng.choice(p), eps=eps, directed=True, acyclic=False, weighted=False) return [graph] class BipartiteSampler(Sampler): """Sampler for bipartite matching-based flow networks.""" def _sample_data( self, length: int, length_2: Optional[int] = None, p: Tuple[float, ...] = (0.3,), ): if length_2 is None: # Assume provided length is total length. length_2 = length // 2 length -= length_2 graph = self._random_bipartite_graph(n=length, m=length_2, p=self._rng.choice(p)) return [graph, length, length_2, 0, length + length_2 + 1] class MatcherSampler(Sampler): """String matching sampler; embeds needle in a random haystack.""" def _sample_data( self, length: int, # length of haystack + needle, i.e., total number of nodes length_needle: Optional[int] = None, chars: int = 4, ): if length_needle is None: if length < 5: length_needle = 1 else: length_needle = length // 5 elif length_needle < 0: # randomize needle length length_needle = self._rng.randint(1, high=1 - length_needle) length_haystack = length - length_needle needle = self._random_string(length=length_needle, chars=chars) haystack = self._random_string(length=length_haystack, chars=chars) embed_pos = self._rng.choice(length_haystack - length_needle) haystack[embed_pos:embed_pos + length_needle] = needle return [haystack, needle] class SegmentsSampler(Sampler): """Two-segment sampler of points from (U[0, 1], U[0, 1]).""" def _sample_data(self, length: int, low: float = 0., high: float = 1.): del length # There are exactly four endpoints. # Quick CCW check (ignoring collinearity) for rejection sampling def ccw(x_a, y_a, x_b, y_b, x_c, y_c): return (y_c - y_a) * (x_b - x_a) > (y_b - y_a) * (x_c - x_a) def intersect(xs, ys): return ccw(xs[0], ys[0], xs[2], ys[2], xs[3], ys[3]) != ccw( xs[1], ys[1], xs[2], ys[2], xs[3], ys[3]) and ccw( xs[0], ys[0], xs[1], ys[1], xs[2], ys[2]) != ccw( xs[0], ys[0], xs[1], ys[1], xs[3], ys[3]) # Decide (with uniform probability) should this sample intersect coin_flip = self._rng.binomial(1, 0.5) xs = self._random_sequence(length=4, low=low, high=high) ys = self._random_sequence(length=4, low=low, high=high) while intersect(xs, ys) != coin_flip: xs = self._random_sequence(length=4, low=low, high=high) ys = self._random_sequence(length=4, low=low, high=high) return [xs, ys] class ConvexHullSampler(Sampler): """Convex hull sampler of points over a disk of radius r.""" def _sample_data(self, length: int, origin_x: float = 0., origin_y: float = 0., radius: float = 2.): thetas = self._random_sequence(length=length, low=0.0, high=2.0 * np.pi) rs = radius * np.sqrt( self._random_sequence(length=length, low=0.0, high=1.0)) xs = rs * np.cos(thetas) + origin_x ys = rs * np.sin(thetas) + origin_y return [xs, ys] SAMPLERS = { 'insertion_sort': SortingSampler, 'bubble_sort': SortingSampler, 'heapsort': SortingSampler, 'quicksort': SortingSampler, 'quickselect': SortingSampler, 'minimum': SortingSampler, 'binary_search': SearchSampler, 'find_maximum_subarray': MaxSubarraySampler, 'find_maximum_subarray_kadane': MaxSubarraySampler, 'matrix_chain_order': SortingSampler, 'lcs_length': LCSSampler, 'optimal_bst': OptimalBSTSampler, 'activity_selector': ActivitySampler, 'task_scheduling': TaskSampler, 'dfs': DfsSampler, 'topological_sort': TopoSampler, 'strongly_connected_components': SccSampler, 'articulation_points': ArticulationSampler, 'bridges': ArticulationSampler, 'bfs': BfsSampler, 'mst_kruskal': MSTSampler, 'mst_prim': BellmanFordSampler, 'bellman_ford': BellmanFordSampler, 'dag_shortest_paths': DAGPathSampler, 'dijkstra': BellmanFordSampler, 'floyd_warshall': FloydWarshallSampler, 'bipartite_matching': BipartiteSampler, 'naive_string_matcher': MatcherSampler, 'kmp_matcher': MatcherSampler, 'segments_intersect': SegmentsSampler, 'graham_scan': ConvexHullSampler, 'jarvis_march': ConvexHullSampler, } def _batch_io(traj_io: Trajectories) -> Trajectory: """Batches a trajectory of input/output samples along the time axis per probe. Args: traj_io: An i/o trajectory of `DataPoint`s indexed by time then probe. Returns: A |num probes| list of `DataPoint`s with the time axis stacked into `data`. """ assert traj_io # non-empty for sample_io in traj_io: for i, dp in enumerate(sample_io): assert dp.data.shape[0] == 1 # batching axis assert traj_io[0][i].name == dp.name return jax.tree_util.tree_map(lambda *x: np.concatenate(x), *traj_io) def _batch_hints( traj_hints: Trajectories, min_steps: int) -> Tuple[Trajectory, List[int]]: """Batches a trajectory of hints samples along the time axis per probe. Unlike i/o, hints have a variable-length time dimension. Before batching, each trajectory is padded to the maximum trajectory length. Args: traj_hints: A hint trajectory of `DataPoints`s indexed by time then probe min_steps: Hints will be padded at least to this length - if any hint is longer than this, the greater length will be used. Returns: A |num probes| list of `DataPoint`s with the time axis stacked into `data`, and a |sample| list containing the length of each trajectory. """ max_steps = min_steps assert traj_hints # non-empty for sample_hint in traj_hints: for dp in sample_hint: assert dp.data.shape[1] == 1 # batching axis if dp.data.shape[0] > max_steps: max_steps = dp.data.shape[0] time_and_batch = (max_steps, len(traj_hints)) # Create zero-filled space for the batched hints, then copy each hint # up to the corresponding length. batched_traj = jax.tree_util.tree_map( lambda x: np.zeros(time_and_batch + x.shape[2:]), traj_hints[0]) hint_lengths = np.zeros(len(traj_hints)) for sample_idx, cur_sample in enumerate(traj_hints): for i in range(len(cur_sample)): assert batched_traj[i].name == cur_sample[i].name cur_data = cur_sample[i].data cur_length = cur_data.shape[0] batched_traj[i].data[:cur_length, sample_idx:sample_idx+1] = cur_data if i > 0: assert hint_lengths[sample_idx] == cur_length else: hint_lengths[sample_idx] = cur_length return batched_traj, hint_lengths def _subsample_data( trajectory: Trajectory, idx: List[int], axis: int = 0, ) -> Trajectory: """New `Trajectory` where each `DataPoint`'s data is subsampled along axis.""" sampled_traj = [] for dp in trajectory: sampled_data = np.take(dp.data, idx, axis=axis) sampled_traj.append( probing.DataPoint(dp.name, dp.location, dp.type_, sampled_data)) return sampled_traj def _preprocess_permutations(probes, enforce_permutations): """Replace should-be permutations with proper permutation pointer + mask.""" output = [] for x in probes: if x.type_ != specs.Type.SHOULD_BE_PERMUTATION: output.append(x) continue assert x.location == specs.Location.NODE if enforce_permutations: new_x, mask = probing.predecessor_to_cyclic_predecessor_and_first(x.data) output.append( probing.DataPoint( name=x.name, location=x.location, type_=specs.Type.PERMUTATION_POINTER, data=new_x)) output.append( probing.DataPoint( name=x.name + '_mask', location=x.location, type_=specs.Type.MASK_ONE, data=mask)) else: output.append(probing.DataPoint(name=x.name, location=x.location, type_=specs.Type.POINTER, data=x.data)) return output def process_permutations(spec, sample_iterator, enforce_permutations): """Replace should-be permutations with proper permutation pointer + mask.""" def _iterate(): while True: feedback = next(sample_iterator) features = feedback.features inputs = _preprocess_permutations(features.inputs, enforce_permutations) hints = _preprocess_permutations(features.hints, enforce_permutations) outputs = _preprocess_permutations(feedback.outputs, enforce_permutations) features = features._replace(inputs=tuple(inputs), hints=tuple(hints)) feedback = feedback._replace(features=features, outputs=outputs) yield feedback new_spec = {} for k in spec: if (spec[k][1] == specs.Location.NODE and spec[k][2] == specs.Type.SHOULD_BE_PERMUTATION): if enforce_permutations: new_spec[k] = (spec[k][0], spec[k][1], specs.Type.PERMUTATION_POINTER) new_spec[k + '_mask'] = (spec[k][0], spec[k][1], specs.Type.MASK_ONE) else: new_spec[k] = (spec[k][0], spec[k][1], specs.Type.POINTER) else: new_spec[k] = spec[k] return new_spec, _iterate() def process_pred_as_input(spec, sample_iterator): """Move pred_h hint to pred input.""" def _iterate(): while True: feedback = next(sample_iterator) features = feedback.features pred_h = [h for h in features.hints if h.name == 'pred_h'] if pred_h: assert len(pred_h) == 1 pred_h = pred_h[0] hints = [h for h in features.hints if h.name != 'pred_h'] for i in range(len(features.lengths)): assert np.sum(np.abs(pred_h.data[1:int(features.lengths[i]), i] - pred_h.data[0, i])) == 0.0 inputs = tuple(features.inputs) + ( probing.DataPoint(name='pred', location=pred_h.location, type_=pred_h.type_, data=pred_h.data[0]),) features = features._replace(inputs=tuple(inputs), hints=tuple(hints)) feedback = feedback._replace(features=features) yield feedback new_spec = {} for k in spec: if k == 'pred_h': assert spec[k] == (specs.Stage.HINT, specs.Location.NODE, specs.Type.POINTER) new_spec['pred'] = (specs.Stage.INPUT, specs.Location.NODE, specs.Type.POINTER) else: new_spec[k] = spec[k] return new_spec, _iterate() def process_random_pos(sample_iterator, rng): """Randomize the `pos` input from a sampler. The `pos` input is, by default, a scalar uniformly spaced between 0 and 1 across the nodes. The exception are string algorithms (naive_string_matcher, kmp_string_matcher and lcs_length), where the `pos` sequence is split into needle and haystack (or first and second string, for lcs_length). Here we replace the uniformly spaced `pos` with an ordered sequence of random scalars, or, for string algorithms, two ordered sequences of random scalars. Args: sample_iterator: An iterator producing samples with non-random `pos` inputs. rng: Numpy random generator Returns: An iterator returning the samples with randomized `pos` inputs. """ def _iterate(): while True: feedback = next(sample_iterator) inputs = feedback.features.inputs pos, = [x for x in inputs if x.name == 'pos'] batch_size, num_nodes = pos.data.shape unsorted = rng.uniform(size=(batch_size, num_nodes)) new_pos = [] for i in range(batch_size): # we check one example at a time. # We find if there are splits in the pos sequence, marked by zeros. # We know there will always be at least 1 zero, if there's no split. split, = np.where(pos.data[i] == 0) split = np.concatenate([split, [num_nodes]]) # We construct the randomized pos by sorting the random values in each # split and concatenating them. new_pos.append( np.concatenate([np.sort(unsorted[i, split[j]:split[j+1]]) for j in range(len(split) - 1)])) pos.data = np.array(new_pos) inputs = [(pos if x.name == 'pos' else x) for x in inputs] features = feedback.features._replace(inputs=inputs) feedback = feedback._replace(features=features) yield feedback return _iterate()
clrs-master
clrs/_src/samplers.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `decoders.py`.""" from absl.testing import absltest import chex from clrs._src import decoders import jax import jax.numpy as jnp class DecodersTest(absltest.TestCase): def test_log_sinkhorn(self): x = jax.random.normal(jax.random.PRNGKey(42), (10, 10)) y = jnp.exp(decoders.log_sinkhorn(x, steps=10, temperature=1.0, zero_diagonal=False, noise_rng_key=None)) chex.assert_trees_all_close(jnp.sum(y, axis=-1), 1., atol=1e-4) chex.assert_trees_all_close(jnp.sum(y, axis=-2), 1., atol=1e-4) def test_log_sinkhorn_zero_diagonal(self): x = jax.random.normal(jax.random.PRNGKey(42), (10, 10)) y = jnp.exp(decoders.log_sinkhorn(x, steps=10, temperature=1.0, zero_diagonal=True, noise_rng_key=None)) chex.assert_trees_all_close(jnp.sum(y, axis=-1), 1., atol=1e-4) chex.assert_trees_all_close(jnp.sum(y, axis=-2), 1., atol=1e-4) chex.assert_trees_all_close(jnp.sum(y.diagonal()), 0., atol=1e-4) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/decoders_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """JAX implementation of CLRS basic network.""" import functools from typing import Dict, List, Optional, Tuple import chex from clrs._src import decoders from clrs._src import encoders from clrs._src import probing from clrs._src import processors from clrs._src import samplers from clrs._src import specs import haiku as hk import jax import jax.numpy as jnp _Array = chex.Array _DataPoint = probing.DataPoint _Features = samplers.Features _FeaturesChunked = samplers.FeaturesChunked _Location = specs.Location _Spec = specs.Spec _Stage = specs.Stage _Trajectory = samplers.Trajectory _Type = specs.Type @chex.dataclass class _MessagePassingScanState: hint_preds: chex.Array output_preds: chex.Array hiddens: chex.Array lstm_state: Optional[hk.LSTMState] @chex.dataclass class _MessagePassingOutputChunked: hint_preds: chex.Array output_preds: chex.Array @chex.dataclass class MessagePassingStateChunked: inputs: chex.Array hints: chex.Array is_first: chex.Array hint_preds: chex.Array hiddens: chex.Array lstm_state: Optional[hk.LSTMState] class Net(hk.Module): """Building blocks (networks) used to encode and decode messages.""" def __init__( self, spec: List[_Spec], hidden_dim: int, encode_hints: bool, decode_hints: bool, processor_factory: processors.ProcessorFactory, use_lstm: bool, encoder_init: str, dropout_prob: float, hint_teacher_forcing: float, hint_repred_mode='soft', nb_dims=None, nb_msg_passing_steps=1, name: str = 'net', ): """Constructs a `Net`.""" super().__init__(name=name) self._dropout_prob = dropout_prob self._hint_teacher_forcing = hint_teacher_forcing self._hint_repred_mode = hint_repred_mode self.spec = spec self.hidden_dim = hidden_dim self.encode_hints = encode_hints self.decode_hints = decode_hints self.processor_factory = processor_factory self.nb_dims = nb_dims self.use_lstm = use_lstm self.encoder_init = encoder_init self.nb_msg_passing_steps = nb_msg_passing_steps def _msg_passing_step(self, mp_state: _MessagePassingScanState, i: int, hints: List[_DataPoint], repred: bool, lengths: chex.Array, batch_size: int, nb_nodes: int, inputs: _Trajectory, first_step: bool, spec: _Spec, encs: Dict[str, List[hk.Module]], decs: Dict[str, Tuple[hk.Module]], return_hints: bool, return_all_outputs: bool ): if self.decode_hints and not first_step: assert self._hint_repred_mode in ['soft', 'hard', 'hard_on_eval'] hard_postprocess = (self._hint_repred_mode == 'hard' or (self._hint_repred_mode == 'hard_on_eval' and repred)) decoded_hint = decoders.postprocess(spec, mp_state.hint_preds, sinkhorn_temperature=0.1, sinkhorn_steps=25, hard=hard_postprocess) if repred and self.decode_hints and not first_step: cur_hint = [] for hint in decoded_hint: cur_hint.append(decoded_hint[hint]) else: cur_hint = [] needs_noise = (self.decode_hints and not first_step and self._hint_teacher_forcing < 1.0) if needs_noise: # For noisy teacher forcing, choose which examples in the batch to force force_mask = jax.random.bernoulli( hk.next_rng_key(), self._hint_teacher_forcing, (batch_size,)) else: force_mask = None for hint in hints: hint_data = jnp.asarray(hint.data)[i] _, loc, typ = spec[hint.name] if needs_noise: if (typ == _Type.POINTER and decoded_hint[hint.name].type_ == _Type.SOFT_POINTER): # When using soft pointers, the decoded hints cannot be summarised # as indices (as would happen in hard postprocessing), so we need # to raise the ground-truth hint (potentially used for teacher # forcing) to its one-hot version. hint_data = hk.one_hot(hint_data, nb_nodes) typ = _Type.SOFT_POINTER hint_data = jnp.where(_expand_to(force_mask, hint_data), hint_data, decoded_hint[hint.name].data) cur_hint.append( probing.DataPoint( name=hint.name, location=loc, type_=typ, data=hint_data)) hiddens, output_preds_cand, hint_preds, lstm_state = self._one_step_pred( inputs, cur_hint, mp_state.hiddens, batch_size, nb_nodes, mp_state.lstm_state, spec, encs, decs, repred) if first_step: output_preds = output_preds_cand else: output_preds = {} for outp in mp_state.output_preds: is_not_done = _is_not_done_broadcast(lengths, i, output_preds_cand[outp]) output_preds[outp] = is_not_done * output_preds_cand[outp] + ( 1.0 - is_not_done) * mp_state.output_preds[outp] new_mp_state = _MessagePassingScanState( # pytype: disable=wrong-arg-types # numpy-scalars hint_preds=hint_preds, output_preds=output_preds, hiddens=hiddens, lstm_state=lstm_state) # Save memory by not stacking unnecessary fields accum_mp_state = _MessagePassingScanState( # pytype: disable=wrong-arg-types # numpy-scalars hint_preds=hint_preds if return_hints else None, output_preds=output_preds if return_all_outputs else None, hiddens=None, lstm_state=None) # Complying to jax.scan, the first returned value is the state we carry over # the second value is the output that will be stacked over steps. return new_mp_state, accum_mp_state def __call__(self, features_list: List[_Features], repred: bool, algorithm_index: int, return_hints: bool, return_all_outputs: bool): """Process one batch of data. Args: features_list: A list of _Features objects, each with the inputs, hints and lengths for a batch o data corresponding to one algorithm. The list should have either length 1, at train/evaluation time, or length equal to the number of algorithms this Net is meant to process, at initialization. repred: False during training, when we have access to ground-truth hints. True in validation/test mode, when we have to use our own hint predictions. algorithm_index: Which algorithm is being processed. It can be -1 at initialisation (either because we are initialising the parameters of the module or because we are intialising the message-passing state), meaning that all algorithms should be processed, in which case `features_list` should have length equal to the number of specs of the Net. Otherwise, `algorithm_index` should be between 0 and `length(self.spec) - 1`, meaning only one of the algorithms will be processed, and `features_list` should have length 1. return_hints: Whether to accumulate and return the predicted hints, when they are decoded. return_all_outputs: Whether to return the full sequence of outputs, or just the last step's output. Returns: A 2-tuple with (output predictions, hint predictions) for the selected algorithm. """ if algorithm_index == -1: algorithm_indices = range(len(features_list)) else: algorithm_indices = [algorithm_index] assert len(algorithm_indices) == len(features_list) self.encoders, self.decoders = self._construct_encoders_decoders() self.processor = self.processor_factory(self.hidden_dim) # Optionally construct LSTM. if self.use_lstm: self.lstm = hk.LSTM( hidden_size=self.hidden_dim, name='processor_lstm') lstm_init = self.lstm.initial_state else: self.lstm = None lstm_init = lambda x: 0 for algorithm_index, features in zip(algorithm_indices, features_list): inputs = features.inputs hints = features.hints lengths = features.lengths batch_size, nb_nodes = _data_dimensions(features) nb_mp_steps = max(1, hints[0].data.shape[0] - 1) hiddens = jnp.zeros((batch_size, nb_nodes, self.hidden_dim)) if self.use_lstm: lstm_state = lstm_init(batch_size * nb_nodes) lstm_state = jax.tree_util.tree_map( lambda x, b=batch_size, n=nb_nodes: jnp.reshape(x, [b, n, -1]), lstm_state) else: lstm_state = None mp_state = _MessagePassingScanState( # pytype: disable=wrong-arg-types # numpy-scalars hint_preds=None, output_preds=None, hiddens=hiddens, lstm_state=lstm_state) # Do the first step outside of the scan because it has a different # computation graph. common_args = dict( hints=hints, repred=repred, inputs=inputs, batch_size=batch_size, nb_nodes=nb_nodes, lengths=lengths, spec=self.spec[algorithm_index], encs=self.encoders[algorithm_index], decs=self.decoders[algorithm_index], return_hints=return_hints, return_all_outputs=return_all_outputs, ) mp_state, lean_mp_state = self._msg_passing_step( mp_state, i=0, first_step=True, **common_args) # Then scan through the rest. scan_fn = functools.partial( self._msg_passing_step, first_step=False, **common_args) output_mp_state, accum_mp_state = hk.scan( scan_fn, mp_state, jnp.arange(nb_mp_steps - 1) + 1, length=nb_mp_steps - 1) # We only return the last algorithm's output. That's because # the output only matters when a single algorithm is processed; the case # `algorithm_index==-1` (meaning all algorithms should be processed) # is used only to init parameters. accum_mp_state = jax.tree_util.tree_map( lambda init, tail: jnp.concatenate([init[None], tail], axis=0), lean_mp_state, accum_mp_state) def invert(d): """Dict of lists -> list of dicts.""" if d: return [dict(zip(d, i)) for i in zip(*d.values())] if return_all_outputs: output_preds = {k: jnp.stack(v) for k, v in accum_mp_state.output_preds.items()} else: output_preds = output_mp_state.output_preds hint_preds = invert(accum_mp_state.hint_preds) return output_preds, hint_preds def _construct_encoders_decoders(self): """Constructs encoders and decoders, separate for each algorithm.""" encoders_ = [] decoders_ = [] enc_algo_idx = None for (algo_idx, spec) in enumerate(self.spec): enc = {} dec = {} for name, (stage, loc, t) in spec.items(): if stage == _Stage.INPUT or ( stage == _Stage.HINT and self.encode_hints): # Build input encoders. if name == specs.ALGO_IDX_INPUT_NAME: if enc_algo_idx is None: enc_algo_idx = [hk.Linear(self.hidden_dim, name=f'{name}_enc_linear')] enc[name] = enc_algo_idx else: enc[name] = encoders.construct_encoders( stage, loc, t, hidden_dim=self.hidden_dim, init=self.encoder_init, name=f'algo_{algo_idx}_{name}') if stage == _Stage.OUTPUT or ( stage == _Stage.HINT and self.decode_hints): # Build output decoders. dec[name] = decoders.construct_decoders( loc, t, hidden_dim=self.hidden_dim, nb_dims=self.nb_dims[algo_idx][name], name=f'algo_{algo_idx}_{name}') encoders_.append(enc) decoders_.append(dec) return encoders_, decoders_ def _one_step_pred( self, inputs: _Trajectory, hints: _Trajectory, hidden: _Array, batch_size: int, nb_nodes: int, lstm_state: Optional[hk.LSTMState], spec: _Spec, encs: Dict[str, List[hk.Module]], decs: Dict[str, Tuple[hk.Module]], repred: bool, ): """Generates one-step predictions.""" # Initialise empty node/edge/graph features and adjacency matrix. node_fts = jnp.zeros((batch_size, nb_nodes, self.hidden_dim)) edge_fts = jnp.zeros((batch_size, nb_nodes, nb_nodes, self.hidden_dim)) graph_fts = jnp.zeros((batch_size, self.hidden_dim)) adj_mat = jnp.repeat( jnp.expand_dims(jnp.eye(nb_nodes), 0), batch_size, axis=0) # ENCODE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Encode node/edge/graph features from inputs and (optionally) hints. trajectories = [inputs] if self.encode_hints: trajectories.append(hints) for trajectory in trajectories: for dp in trajectory: try: dp = encoders.preprocess(dp, nb_nodes) assert dp.type_ != _Type.SOFT_POINTER adj_mat = encoders.accum_adj_mat(dp, adj_mat) encoder = encs[dp.name] edge_fts = encoders.accum_edge_fts(encoder, dp, edge_fts) node_fts = encoders.accum_node_fts(encoder, dp, node_fts) graph_fts = encoders.accum_graph_fts(encoder, dp, graph_fts) except Exception as e: raise Exception(f'Failed to process {dp}') from e # PROCESS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nxt_hidden = hidden for _ in range(self.nb_msg_passing_steps): nxt_hidden, nxt_edge = self.processor( node_fts, edge_fts, graph_fts, adj_mat, nxt_hidden, batch_size=batch_size, nb_nodes=nb_nodes, ) if not repred: # dropout only on training nxt_hidden = hk.dropout(hk.next_rng_key(), self._dropout_prob, nxt_hidden) if self.use_lstm: # lstm doesn't accept multiple batch dimensions (in our case, batch and # nodes), so we vmap over the (first) batch dimension. nxt_hidden, nxt_lstm_state = jax.vmap(self.lstm)(nxt_hidden, lstm_state) else: nxt_lstm_state = None h_t = jnp.concatenate([node_fts, hidden, nxt_hidden], axis=-1) if nxt_edge is not None: e_t = jnp.concatenate([edge_fts, nxt_edge], axis=-1) else: e_t = edge_fts # DECODE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Decode features and (optionally) hints. hint_preds, output_preds = decoders.decode_fts( decoders=decs, spec=spec, h_t=h_t, adj_mat=adj_mat, edge_fts=e_t, graph_fts=graph_fts, inf_bias=self.processor.inf_bias, inf_bias_edge=self.processor.inf_bias_edge, repred=repred, ) return nxt_hidden, output_preds, hint_preds, nxt_lstm_state class NetChunked(Net): """A Net that will process time-chunked data instead of full samples.""" def _msg_passing_step(self, mp_state: MessagePassingStateChunked, xs, repred: bool, init_mp_state: bool, batch_size: int, nb_nodes: int, spec: _Spec, encs: Dict[str, List[hk.Module]], decs: Dict[str, Tuple[hk.Module]], ): """Perform one message passing step. This function is unrolled along the time axis to process a data chunk. Args: mp_state: message-passing state. Includes the inputs, hints, beginning-of-sample markers, hint predictions, hidden and lstm state to be used for prediction in the current step. xs: A 3-tuple of with the next timestep's inputs, hints, and beginning-of-sample markers. These will replace the contents of the `mp_state` at the output, in readiness for the next unroll step of the chunk (or the first step of the next chunk). Besides, the next timestep's hints are necessary to compute diffs when `decode_diffs` is True. repred: False during training, when we have access to ground-truth hints. True in validation/test mode, when we have to use our own hint predictions. init_mp_state: Indicates if we are calling the method just to initialise the message-passing state, before the beginning of training or validation. batch_size: Size of batch dimension. nb_nodes: Number of nodes in graph. spec: The spec of the algorithm being processed. encs: encoders for the algorithm being processed. decs: decoders for the algorithm being processed. Returns: A 2-tuple with the next mp_state and an output consisting of hint predictions and output predictions. """ def _as_prediction_data(hint): if hint.type_ == _Type.POINTER: return hk.one_hot(hint.data, nb_nodes) return hint.data nxt_inputs, nxt_hints, nxt_is_first = xs inputs = mp_state.inputs is_first = mp_state.is_first hints = mp_state.hints if init_mp_state: prev_hint_preds = {h.name: _as_prediction_data(h) for h in hints} hints_for_pred = hints else: prev_hint_preds = mp_state.hint_preds if self.decode_hints: if repred: force_mask = jnp.zeros(batch_size, dtype=bool) elif self._hint_teacher_forcing == 1.0: force_mask = jnp.ones(batch_size, dtype=bool) else: force_mask = jax.random.bernoulli( hk.next_rng_key(), self._hint_teacher_forcing, (batch_size,)) assert self._hint_repred_mode in ['soft', 'hard', 'hard_on_eval'] hard_postprocess = ( self._hint_repred_mode == 'hard' or (self._hint_repred_mode == 'hard_on_eval' and repred)) decoded_hints = decoders.postprocess(spec, prev_hint_preds, sinkhorn_temperature=0.1, sinkhorn_steps=25, hard=hard_postprocess) hints_for_pred = [] for h in hints: typ = h.type_ hint_data = h.data if (typ == _Type.POINTER and decoded_hints[h.name].type_ == _Type.SOFT_POINTER): hint_data = hk.one_hot(hint_data, nb_nodes) typ = _Type.SOFT_POINTER hints_for_pred.append(probing.DataPoint( name=h.name, location=h.location, type_=typ, data=jnp.where(_expand_to(is_first | force_mask, hint_data), hint_data, decoded_hints[h.name].data))) else: hints_for_pred = hints hiddens = jnp.where(is_first[..., None, None], 0.0, mp_state.hiddens) if self.use_lstm: lstm_state = jax.tree_util.tree_map( lambda x: jnp.where(is_first[..., None, None], 0.0, x), mp_state.lstm_state) else: lstm_state = None hiddens, output_preds, hint_preds, lstm_state = self._one_step_pred( inputs, hints_for_pred, hiddens, batch_size, nb_nodes, lstm_state, spec, encs, decs, repred) new_mp_state = MessagePassingStateChunked( # pytype: disable=wrong-arg-types # numpy-scalars hiddens=hiddens, lstm_state=lstm_state, hint_preds=hint_preds, inputs=nxt_inputs, hints=nxt_hints, is_first=nxt_is_first) mp_output = _MessagePassingOutputChunked( # pytype: disable=wrong-arg-types # numpy-scalars hint_preds=hint_preds, output_preds=output_preds) return new_mp_state, mp_output def __call__(self, features_list: List[_FeaturesChunked], mp_state_list: List[MessagePassingStateChunked], repred: bool, init_mp_state: bool, algorithm_index: int): """Process one chunk of data. Args: features_list: A list of _FeaturesChunked objects, each with the inputs, hints and beginning- and end-of-sample markers for a chunk (i.e., fixed time length) of data corresponding to one algorithm. All features are expected to have dimensions chunk_length x batch_size x ... The list should have either length 1, at train/evaluation time, or length equal to the number of algorithms this Net is meant to process, at initialization. mp_state_list: list of message-passing states. Each message-passing state includes the inputs, hints, beginning-of-sample markers, hint prediction, hidden and lstm state from the end of the previous chunk, for one algorithm. The length of the list should be the same as the length of `features_list`. repred: False during training, when we have access to ground-truth hints. True in validation/test mode, when we have to use our own hint predictions. init_mp_state: Indicates if we are calling the network just to initialise the message-passing state, before the beginning of training or validation. If True, `algorithm_index` (see below) must be -1 in order to initialize the message-passing state of all algorithms. algorithm_index: Which algorithm is being processed. It can be -1 at initialisation (either because we are initialising the parameters of the module or because we are intialising the message-passing state), meaning that all algorithms should be processed, in which case `features_list` and `mp_state_list` should have length equal to the number of specs of the Net. Otherwise, `algorithm_index` should be between 0 and `length(self.spec) - 1`, meaning only one of the algorithms will be processed, and `features_list` and `mp_state_list` should have length 1. Returns: A 2-tuple consisting of: - A 2-tuple with (output predictions, hint predictions) for the selected algorithm. Each of these has chunk_length x batch_size x ... data, where the first time slice contains outputs for the mp_state that was passed as input, and the last time slice contains outputs for the next-to-last slice of the input features. The outputs that correspond to the final time slice of the input features will be calculated when the next chunk is processed, using the data in the mp_state returned here (see below). If `init_mp_state` is True, we return None instead of the 2-tuple. - The mp_state (message-passing state) for the next chunk of data of the selected algorithm. If `init_mp_state` is True, we return initial mp states for all the algorithms. """ if algorithm_index == -1: algorithm_indices = range(len(features_list)) else: algorithm_indices = [algorithm_index] assert not init_mp_state # init state only allowed with all algorithms assert len(algorithm_indices) == len(features_list) assert len(algorithm_indices) == len(mp_state_list) self.encoders, self.decoders = self._construct_encoders_decoders() self.processor = self.processor_factory(self.hidden_dim) # Optionally construct LSTM. if self.use_lstm: self.lstm = hk.LSTM( hidden_size=self.hidden_dim, name='processor_lstm') lstm_init = self.lstm.initial_state else: self.lstm = None lstm_init = lambda x: 0 if init_mp_state: output_mp_states = [] for algorithm_index, features, mp_state in zip( algorithm_indices, features_list, mp_state_list): inputs = features.inputs hints = features.hints batch_size, nb_nodes = _data_dimensions_chunked(features) if self.use_lstm: lstm_state = lstm_init(batch_size * nb_nodes) lstm_state = jax.tree_util.tree_map( lambda x, b=batch_size, n=nb_nodes: jnp.reshape(x, [b, n, -1]), lstm_state) mp_state.lstm_state = lstm_state mp_state.inputs = jax.tree_util.tree_map(lambda x: x[0], inputs) mp_state.hints = jax.tree_util.tree_map(lambda x: x[0], hints) mp_state.is_first = jnp.zeros(batch_size, dtype=int) mp_state.hiddens = jnp.zeros((batch_size, nb_nodes, self.hidden_dim)) next_is_first = jnp.ones(batch_size, dtype=int) mp_state, _ = self._msg_passing_step( mp_state, (mp_state.inputs, mp_state.hints, next_is_first), repred=repred, init_mp_state=True, batch_size=batch_size, nb_nodes=nb_nodes, spec=self.spec[algorithm_index], encs=self.encoders[algorithm_index], decs=self.decoders[algorithm_index], ) output_mp_states.append(mp_state) return None, output_mp_states for algorithm_index, features, mp_state in zip( algorithm_indices, features_list, mp_state_list): inputs = features.inputs hints = features.hints is_first = features.is_first batch_size, nb_nodes = _data_dimensions_chunked(features) scan_fn = functools.partial( self._msg_passing_step, repred=repred, init_mp_state=False, batch_size=batch_size, nb_nodes=nb_nodes, spec=self.spec[algorithm_index], encs=self.encoders[algorithm_index], decs=self.decoders[algorithm_index], ) mp_state, scan_output = hk.scan( scan_fn, mp_state, (inputs, hints, is_first), ) # We only return the last algorithm's output and state. That's because # the output only matters when a single algorithm is processed; the case # `algorithm_index==-1` (meaning all algorithms should be processed) # is used only to init parameters. return (scan_output.output_preds, scan_output.hint_preds), mp_state def _data_dimensions(features: _Features) -> Tuple[int, int]: """Returns (batch_size, nb_nodes).""" for inp in features.inputs: if inp.location in [_Location.NODE, _Location.EDGE]: return inp.data.shape[:2] assert False def _data_dimensions_chunked(features: _FeaturesChunked) -> Tuple[int, int]: """Returns (batch_size, nb_nodes).""" for inp in features.inputs: if inp.location in [_Location.NODE, _Location.EDGE]: return inp.data.shape[1:3] assert False def _expand_to(x: _Array, y: _Array) -> _Array: while len(y.shape) > len(x.shape): x = jnp.expand_dims(x, -1) return x def _is_not_done_broadcast(lengths, i, tensor): is_not_done = (lengths > i + 1) * 1.0 while len(is_not_done.shape) < len(tensor.shape): # pytype: disable=attribute-error # numpy-scalars is_not_done = jnp.expand_dims(is_not_done, -1) return is_not_done
clrs-master
clrs/_src/nets.py
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `baselines.py`.""" import copy import functools from typing import Generator from absl.testing import absltest from absl.testing import parameterized import chex from clrs._src import baselines from clrs._src import dataset from clrs._src import probing from clrs._src import processors from clrs._src import samplers from clrs._src import specs import haiku as hk import jax import numpy as np _Array = np.ndarray def _error(x, y): return np.sum(np.abs(x-y)) def _make_sampler(algo: str, length: int) -> samplers.Sampler: sampler, _ = samplers.build_sampler( algo, seed=samplers.CLRS30['val']['seed'], num_samples=samplers.CLRS30['val']['num_samples'], length=length, ) return sampler def _without_permutation(feedback): """Replace should-be permutations with pointers.""" outputs = [] for x in feedback.outputs: if x.type_ != specs.Type.SHOULD_BE_PERMUTATION: outputs.append(x) continue assert x.location == specs.Location.NODE outputs.append(probing.DataPoint(name=x.name, location=x.location, type_=specs.Type.POINTER, data=x.data)) return feedback._replace(outputs=outputs) def _make_iterable_sampler( algo: str, batch_size: int, length: int) -> Generator[samplers.Feedback, None, None]: sampler = _make_sampler(algo, length) while True: yield _without_permutation(sampler.next(batch_size)) def _remove_permutation_from_spec(spec): """Modify spec to turn permutation type to pointer.""" new_spec = {} for k in spec: if (spec[k][1] == specs.Location.NODE and spec[k][2] == specs.Type.SHOULD_BE_PERMUTATION): new_spec[k] = (spec[k][0], spec[k][1], specs.Type.POINTER) else: new_spec[k] = spec[k] return new_spec class BaselinesTest(parameterized.TestCase): def test_full_vs_chunked(self): """Test that chunking does not affect gradients.""" batch_size = 4 length = 8 algo = 'insertion_sort' spec = _remove_permutation_from_spec(specs.SPECS[algo]) rng_key = jax.random.PRNGKey(42) full_ds = _make_iterable_sampler(algo, batch_size, length) chunked_ds = dataset.chunkify( _make_iterable_sampler(algo, batch_size, length), length) double_chunked_ds = dataset.chunkify( _make_iterable_sampler(algo, batch_size, length), length * 2) full_batches = [next(full_ds) for _ in range(2)] chunked_batches = [next(chunked_ds) for _ in range(2)] double_chunk_batch = next(double_chunked_ds) with chex.fake_jit(): # jitting makes test longer processor_factory = processors.get_processor_factory( 'mpnn', use_ln=False, nb_triplet_fts=0) common_args = dict(processor_factory=processor_factory, hidden_dim=8, learning_rate=0.01, decode_hints=True, encode_hints=True) b_full = baselines.BaselineModel( spec, dummy_trajectory=full_batches[0], **common_args) b_full.init(full_batches[0].features, seed=42) # pytype: disable=wrong-arg-types # jax-ndarray full_params = b_full.params full_loss_0 = b_full.feedback(rng_key, full_batches[0]) b_full.params = full_params full_loss_1 = b_full.feedback(rng_key, full_batches[1]) new_full_params = b_full.params b_chunked = baselines.BaselineModelChunked( spec, dummy_trajectory=chunked_batches[0], **common_args) b_chunked.init([[chunked_batches[0].features]], seed=42) # pytype: disable=wrong-arg-types # jax-ndarray chunked_params = b_chunked.params jax.tree_util.tree_map(np.testing.assert_array_equal, full_params, chunked_params) chunked_loss_0 = b_chunked.feedback(rng_key, chunked_batches[0]) b_chunked.params = chunked_params chunked_loss_1 = b_chunked.feedback(rng_key, chunked_batches[1]) new_chunked_params = b_chunked.params b_chunked.params = chunked_params double_chunked_loss = b_chunked.feedback(rng_key, double_chunk_batch) # Test that losses match np.testing.assert_allclose(full_loss_0, chunked_loss_0, rtol=1e-4) np.testing.assert_allclose(full_loss_1, chunked_loss_1, rtol=1e-4) np.testing.assert_allclose(full_loss_0 + full_loss_1, 2 * double_chunked_loss, rtol=1e-4) # Test that gradients are the same (parameters changed equally). # First check that gradients were not zero, i.e., parameters have changed. param_change, _ = jax.tree_util.tree_flatten( jax.tree_util.tree_map(_error, full_params, new_full_params)) self.assertGreater(np.mean(param_change), 0.1) # Now check that full and chunked gradients are the same. jax.tree_util.tree_map( functools.partial(np.testing.assert_allclose, rtol=1e-4), new_full_params, new_chunked_params) def test_multi_vs_single(self): """Test that multi = single when we only train one of the algorithms.""" batch_size = 4 length = 16 algos = ['insertion_sort', 'activity_selector', 'bfs'] spec = [_remove_permutation_from_spec(specs.SPECS[algo]) for algo in algos] rng_key = jax.random.PRNGKey(42) full_ds = [_make_iterable_sampler(algo, batch_size, length) for algo in algos] full_batches = [next(ds) for ds in full_ds] full_batches_2 = [next(ds) for ds in full_ds] with chex.fake_jit(): # jitting makes test longer processor_factory = processors.get_processor_factory( 'mpnn', use_ln=False, nb_triplet_fts=0) common_args = dict(processor_factory=processor_factory, hidden_dim=8, learning_rate=0.01, decode_hints=True, encode_hints=True) b_single = baselines.BaselineModel( spec[0], dummy_trajectory=full_batches[0], **common_args) b_multi = baselines.BaselineModel( spec, dummy_trajectory=full_batches, **common_args) b_single.init(full_batches[0].features, seed=0) # pytype: disable=wrong-arg-types # jax-ndarray b_multi.init([f.features for f in full_batches], seed=0) # pytype: disable=wrong-arg-types # jax-ndarray single_params = [] single_losses = [] multi_params = [] multi_losses = [] single_params.append(copy.deepcopy(b_single.params)) single_losses.append(b_single.feedback(rng_key, full_batches[0])) single_params.append(copy.deepcopy(b_single.params)) single_losses.append(b_single.feedback(rng_key, full_batches_2[0])) single_params.append(copy.deepcopy(b_single.params)) multi_params.append(copy.deepcopy(b_multi.params)) multi_losses.append(b_multi.feedback(rng_key, full_batches[0], algorithm_index=0)) multi_params.append(copy.deepcopy(b_multi.params)) multi_losses.append(b_multi.feedback(rng_key, full_batches_2[0], algorithm_index=0)) multi_params.append(copy.deepcopy(b_multi.params)) # Test that losses match np.testing.assert_array_equal(single_losses, multi_losses) # Test that loss decreased assert single_losses[1] < single_losses[0] # Test that param changes were the same in single and multi-algorithm for single, multi in zip(single_params, multi_params): assert hk.data_structures.is_subset(subset=single, superset=multi) for module_name, params in single.items(): jax.tree_util.tree_map(np.testing.assert_array_equal, params, multi[module_name]) # Test that params change for the trained algorithm, but not the others for module_name, params in multi_params[0].items(): param_changes = jax.tree_util.tree_map(lambda a, b: np.sum(np.abs(a - b)), params, multi_params[1][module_name]) param_change = sum(param_changes.values()) if module_name in single_params[0]: # params of trained algorithm assert param_change > 1e-3 else: # params of non-trained algorithms assert param_change == 0.0 @parameterized.parameters(True, False) def test_multi_algorithm_idx(self, is_chunked): """Test that algorithm selection works as intended.""" batch_size = 4 length = 8 algos = ['insertion_sort', 'activity_selector', 'bfs'] spec = [_remove_permutation_from_spec(specs.SPECS[algo]) for algo in algos] rng_key = jax.random.PRNGKey(42) if is_chunked: ds = [dataset.chunkify(_make_iterable_sampler(algo, batch_size, length), 2 * length) for algo in algos] else: ds = [_make_iterable_sampler(algo, batch_size, length) for algo in algos] batches = [next(d) for d in ds] processor_factory = processors.get_processor_factory( 'mpnn', use_ln=False, nb_triplet_fts=0) common_args = dict(processor_factory=processor_factory, hidden_dim=8, learning_rate=0.01, decode_hints=True, encode_hints=True) if is_chunked: baseline = baselines.BaselineModelChunked( spec, dummy_trajectory=batches, **common_args) baseline.init([[f.features for f in batches]], seed=0) # pytype: disable=wrong-arg-types # jax-ndarray else: baseline = baselines.BaselineModel( spec, dummy_trajectory=batches, **common_args) baseline.init([f.features for f in batches], seed=0) # pytype: disable=wrong-arg-types # jax-ndarray # Find out what parameters change when we train each algorithm def _change(x, y): changes = {} for module_name, params in x.items(): changes[module_name] = sum( jax.tree_util.tree_map( lambda a, b: np.sum(np.abs(a-b)), params, y[module_name] ).values()) return changes param_changes = [] for algo_idx in range(len(algos)): init_params = copy.deepcopy(baseline.params) _ = baseline.feedback( rng_key, batches[algo_idx], algorithm_index=(0, algo_idx) if is_chunked else algo_idx) param_changes.append(_change(init_params, baseline.params)) # Test that non-changing parameters correspond to encoders/decoders # associated with the non-trained algorithms unchanged = [[k for k in pc if pc[k] == 0] for pc in param_changes] def _get_other_algos(algo_idx, modules): return set([k for k in modules if '_construct_encoders_decoders' in k and f'algo_{algo_idx}' not in k]) for algo_idx in range(len(algos)): expected_unchanged = _get_other_algos(algo_idx, baseline.params.keys()) self.assertNotEmpty(expected_unchanged) self.assertSetEqual(expected_unchanged, set(unchanged[algo_idx])) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/baselines_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Searching algorithm generators. Currently implements the following: - Minimum - Binary search - Quickselect (Hoare, 1961) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple, Union import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Numeric = Union[int, float] _Out = Tuple[int, probing.ProbesDict] def minimum(A: _Array) -> _Out: """Minimum.""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['minimum']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'min_h': probing.mask_one(0, A.shape[0]), 'i': probing.mask_one(0, A.shape[0]) }) min_ = 0 for i in range(1, A.shape[0]): if A[min_] > A[i]: min_ = i probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'min_h': probing.mask_one(min_, A.shape[0]), 'i': probing.mask_one(i, A.shape[0]) }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'min': probing.mask_one(min_, A.shape[0])}) probing.finalize(probes) return min_, probes def binary_search(x: _Numeric, A: _Array) -> _Out: """Binary search.""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['binary_search']) T_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(T_pos) * 1.0 / A.shape[0], 'key': np.copy(A), 'target': x }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(T_pos)), 'low': probing.mask_one(0, A.shape[0]), 'high': probing.mask_one(A.shape[0] - 1, A.shape[0]), 'mid': probing.mask_one((A.shape[0] - 1) // 2, A.shape[0]), }) low = 0 high = A.shape[0] - 1 # make sure return is always in array while low < high: mid = (low + high) // 2 if x <= A[mid]: high = mid else: low = mid + 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(T_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one((low + high) // 2, A.shape[0]), }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'return': probing.mask_one(high, A.shape[0])}) probing.finalize(probes) return high, probes def quickselect( A: _Array, A_pos=None, p=None, r=None, i=None, probes=None, ) -> _Out: """Quickselect (Hoare, 1961).""" chex.assert_rank(A, 1) def partition(A, A_pos, p, r, target, probes): x = A[r] i = p - 1 for j in range(p, r): if A[j] <= x: i += 1 tmp = A[i] A[i] = A[j] A[j] = tmp tmp = A_pos[i] A_pos[i] = A_pos[j] A_pos[j] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'p': probing.mask_one(A_pos[p], A.shape[0]), 'r': probing.mask_one(A_pos[r], A.shape[0]), 'i': probing.mask_one(A_pos[i + 1], A.shape[0]), 'j': probing.mask_one(A_pos[j], A.shape[0]), 'i_rank': (i + 1) * 1.0 / A.shape[0], 'target': target * 1.0 / A.shape[0] }) tmp = A[i + 1] A[i + 1] = A[r] A[r] = tmp tmp = A_pos[i + 1] A_pos[i + 1] = A_pos[r] A_pos[r] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'p': probing.mask_one(A_pos[p], A.shape[0]), 'r': probing.mask_one(A_pos[r], A.shape[0]), 'i': probing.mask_one(A_pos[i + 1], A.shape[0]), 'j': probing.mask_one(A_pos[r], A.shape[0]), 'i_rank': (i + 1 - p) * 1.0 / A.shape[0], 'target': target * 1.0 / A.shape[0] }) return i + 1 if A_pos is None: A_pos = np.arange(A.shape[0]) if p is None: p = 0 if r is None: r = len(A) - 1 if i is None: i = len(A) // 2 if probes is None: probes = probing.initialize(specs.SPECS['quickselect']) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) q = partition(A, A_pos, p, r, i, probes) k = q - p if i == k: probing.push( probes, specs.Stage.OUTPUT, next_probe={'median': probing.mask_one(A_pos[q], A.shape[0])}) probing.finalize(probes) return A[q], probes elif i < k: return quickselect(A, A_pos, p, q - 1, i, probes) else: return quickselect(A, A_pos, q + 1, r, i - k - 1, probes)
clrs-master
clrs/_src/algorithms/searching.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `searching.py`.""" # pylint: disable=invalid-name from absl.testing import absltest from clrs._src.algorithms import searching import numpy as np EmptyArray = np.asarray([], dtype=np.int32) class SearchingTest(absltest.TestCase): def test_minimum(self): for _ in range(17): A = np.random.randint(0, 100, size=(13,)) idx, _ = searching.minimum(A) self.assertEqual(A.min(), A[idx]) def test_binary_search(self): A = np.random.randint(0, 100, size=(13,)) A.sort() x = np.random.choice(A) idx, _ = searching.binary_search(x, A) self.assertEqual(A[idx], x) def test_quickselect(self): A = np.random.randint(0, 100, size=(13,)) idx, _ = searching.quickselect(A) self.assertEqual(sorted(A)[len(A) // 2], idx) if __name__ == '__main__': absltest.main()
clrs-master
clrs/_src/algorithms/searching_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `graphs.py`.""" # pylint: disable=invalid-name from absl.testing import absltest from clrs._src.algorithms import graphs import numpy as np # Unweighted graphs. DAG = np.array([ [0, 1, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], ]) DIRECTED = np.array([ [0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1], ]) UNDIRECTED = np.array([ [0, 1, 0, 0, 1], [1, 0, 1, 1, 1], [0, 1, 0, 1, 0], [0, 1, 1, 0, 1], [1, 1, 0, 1, 0], ]) ANOTHER_UNDIRECTED = np.array([ [0, 1, 1, 1, 0], [1, 0, 1, 0, 0], [1, 1, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 1, 0], ]) # Weighted graphs. X = np.iinfo(np.int32).max # not connected WEIGHTED_DAG = np.array([ [X, 9, 3, X, X], [X, X, 6, X, 2], [X, X, X, 1, X], [X, X, X, X, 2], [X, X, X, X, X], ]) WEIGHTED_DIRECTED = np.array([ [X, 9, 3, X, X], [X, X, 6, X, 2], [X, 2, X, 1, X], [X, X, 2, X, 2], [X, X, X, X, X], ]) WEIGHTED_UNDIRECTED = np.array([ [X, 2, 3, X, X], [2, X, 1, 3, 2], [3, 1, X, X, 1], [X, 3, X, X, 5], [X, 2, 1, 5, X], ]) # Bipartite graphs. BIPARTITE = np.array([ [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]) BIPARTITE_2 = np.array([ [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], ]) class GraphsTest(absltest.TestCase): def test_dfs(self): expected_directed = np.array([0, 0, 2, 4, 1, 2]) out, _ = graphs.dfs(DIRECTED) np.testing.assert_array_equal(expected_directed, out) expected_undirected = np.array([0, 0, 1, 2, 3]) out, _ = graphs.dfs(UNDIRECTED) np.testing.assert_array_equal(expected_undirected, out) def test_bfs(self): expected_directed = np.array([0, 0, 2, 0, 1, 5]) out, _ = graphs.bfs(DIRECTED, 0) np.testing.assert_array_equal(expected_directed, out) expected_undirected = np.array([0, 0, 1, 1, 0]) out, _ = graphs.bfs(UNDIRECTED, 0) np.testing.assert_array_equal(expected_undirected, out) def test_topological_sort(self): expected_dag = np.array([3, 4, 0, 1, 4]) out, _ = graphs.topological_sort(DAG) np.testing.assert_array_equal(expected_dag, out) def test_articulation_points(self): expected = np.array([1, 0, 0, 1, 0]) out, _ = graphs.articulation_points(ANOTHER_UNDIRECTED) np.testing.assert_array_equal(expected, out) def test_bridges(self): expected = np.array([ [0, 0, 0, 1, -1], [0, 0, 0, -1, -1], [0, 0, 0, -1, -1], [1, -1, -1, 0, 1], [-1, -1, -1, 1, 0], ]) out, _ = graphs.bridges(ANOTHER_UNDIRECTED) np.testing.assert_array_equal(expected, out) def test_strongly_connected_components(self): expected_directed = np.array([0, 1, 2, 1, 1, 5]) out, _ = graphs.strongly_connected_components(DIRECTED) np.testing.assert_array_equal(expected_directed, out) expected_undirected = np.array([0, 0, 0, 0, 0]) out, _ = graphs.strongly_connected_components(UNDIRECTED) np.testing.assert_array_equal(expected_undirected, out) def test_mst_kruskal(self): expected = np.array([ [0, 1, 0, 0, 0], [1, 0, 1, 1, 0], [0, 1, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], ]) out, _ = graphs.mst_kruskal(WEIGHTED_UNDIRECTED) np.testing.assert_array_equal(expected, out) def test_mst_prim(self): expected = np.array([0, 0, 1, 1, 2]) out, _ = graphs.mst_prim(WEIGHTED_UNDIRECTED, 0) np.testing.assert_array_equal(expected, out) def test_bellman_ford(self): expected = np.array([0, 2, 0, 2, 3]) out, _ = graphs.bellman_ford(WEIGHTED_DIRECTED, 0) np.testing.assert_array_equal(expected, out) def test_dag_shortest_paths(self): expected = np.array([0, 0, 0, 2, 3]) out, _ = graphs.bellman_ford(WEIGHTED_DAG, 0) np.testing.assert_array_equal(expected, out) def test_dijkstra(self): expected = np.array([0, 2, 0, 2, 3]) out, _ = graphs.dijkstra(WEIGHTED_DIRECTED, 0) np.testing.assert_array_equal(expected, out) def test_floyd_warshall(self): expected = np.array([0, 2, 0, 2, 3]) out, _ = graphs.floyd_warshall(WEIGHTED_DIRECTED) np.testing.assert_array_equal(expected, out[0]) def test_bipartite_matching(self): expected = np.array([ [1, 1, 1, 1, 0, 0, -1, -1, -1, -1, -1], [0, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1], [0, -1, 1, -1, -1, -1, 0, -1, 1, -1, -1], [0, -1, -1, 1, -1, -1, -1, 1, 0, 0, -1], [0, -1, -1, -1, 1, -1, -1, -1, 0, -1, -1], [0, -1, -1, -1, -1, 1, -1, -1, 0, -1, -1], [-1, 0, 0, -1, -1, -1, 1, -1, -1, -1, 1], [-1, -1, -1, 0, -1, -1, -1, 1, -1, -1, 1], [-1, -1, 0, 0, 0, 0, -1, -1, 1, -1, 1], [-1, -1, -1, 0, -1, -1, -1, -1, -1, 1, 0], [-1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 1], ]) out, _ = graphs.bipartite_matching(BIPARTITE, 5, 4, 0, 10) np.testing.assert_array_equal(expected, out) expected_2 = np.array([ [1, 1, 1, 1, -1, -1, -1, -1], [0, 1, -1, -1, 0, 1, -1, -1], [0, -1, 1, -1, 1, -1, 0, -1], [0, -1, -1, 1, -1, -1, 1, -1], [-1, 0, 0, -1, 1, -1, -1, 1], [-1, 0, -1, -1, -1, 1, -1, 1], [-1, -1, 0, 0, -1, -1, 1, 1], [-1, -1, -1, -1, 0, 0, 0, 1], ]) out_2, _ = graphs.bipartite_matching(BIPARTITE_2, 3, 3, 0, 7) np.testing.assert_array_equal(expected_2, out_2) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/graphs_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Greedy algorithm generators. Currently implements the following: - Activity selection (Gavril, 1972) - Task scheduling (Lawler, 1985) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[_Array, probing.ProbesDict] def activity_selector(s: _Array, f: _Array) -> _Out: """Activity selection (Gavril, 1972).""" chex.assert_rank([s, f], 1) probes = probing.initialize(specs.SPECS['activity_selector']) A_pos = np.arange(s.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 's': np.copy(s), 'f': np.copy(f) }) A = np.zeros(s.shape[0]) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'm': probing.mask_one(0, A_pos.shape[0]), 'k': probing.mask_one(0, A_pos.shape[0]) }) ind = np.argsort(f) A[ind[0]] = 1 k = ind[0] probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'm': probing.mask_one(ind[0], A_pos.shape[0]), 'k': probing.mask_one(k, A_pos.shape[0]) }) for m in range(1, s.shape[0]): if s[ind[m]] >= f[k]: A[ind[m]] = 1 k = ind[m] probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'm': probing.mask_one(ind[m], A_pos.shape[0]), 'k': probing.mask_one(k, A_pos.shape[0]) }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'selected': np.copy(A)}) probing.finalize(probes) return A, probes def task_scheduling(d: _Array, w: _Array) -> _Out: """Task scheduling (Lawler, 1985).""" chex.assert_rank([d, w], 1) probes = probing.initialize(specs.SPECS['task_scheduling']) A_pos = np.arange(d.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'd': np.copy(d), 'w': np.copy(w) }) A = np.zeros(d.shape[0]) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'i': probing.mask_one(0, A_pos.shape[0]), 't': 0 }) ind = np.argsort(-w) A[ind[0]] = 1 t = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'i': probing.mask_one(ind[0], A_pos.shape[0]), 't': t }) for i in range(1, d.shape[0]): if t < d[ind[i]]: A[ind[i]] = 1 t += 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'selected_h': np.copy(A), 'i': probing.mask_one(ind[i], A_pos.shape[0]), 't': t }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'selected': np.copy(A)}) probing.finalize(probes) return A, probes
clrs-master
clrs/_src/algorithms/greedy.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `sorting.py`.""" # pylint: disable=invalid-name from absl.testing import absltest from absl.testing import parameterized from clrs._src.algorithms import sorting import numpy as np class SortingTest(parameterized.TestCase): @parameterized.named_parameters( ("Insertion sort", sorting.insertion_sort), ("Bubble sort", sorting.bubble_sort), ("Heapsort", sorting.heapsort), ("Quicksort", sorting.quicksort), ) def test_sorted(self, algorithm): for _ in range(17): A = np.random.randint(0, 100, size=(13,)) output, _ = algorithm(A) np.testing.assert_array_equal(sorted(A), output) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/sorting_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sorting algorithm generators. Currently implements the following: - Insertion sort - Bubble sort - Heapsort (Williams, 1964) - Quicksort (Hoare, 1962) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[_Array, probing.ProbesDict] def insertion_sort(A: _Array) -> _Out: """Insertion sort.""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['insertion_sort']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'i': probing.mask_one(0, A.shape[0]), 'j': probing.mask_one(0, A.shape[0]) }) for j in range(1, A.shape[0]): key = A[j] # Insert A[j] into the sorted sequence A[1 .. j - 1] i = j - 1 while i >= 0 and A[i] > key: A[i + 1] = A[i] A_pos[i + 1] = A_pos[i] i -= 1 A[i + 1] = key stor_pos = A_pos[i + 1] A_pos[i + 1] = j probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'i': probing.mask_one(stor_pos, np.copy(A.shape[0])), 'j': probing.mask_one(j, np.copy(A.shape[0])) }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'pred': probing.array(np.copy(A_pos))}) probing.finalize(probes) return A, probes def bubble_sort(A: _Array) -> _Out: """Bubble sort.""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['bubble_sort']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'i': probing.mask_one(0, A.shape[0]), 'j': probing.mask_one(0, A.shape[0]) }) for i in range(A.shape[0] - 1): for j in reversed(range(i + 1, A.shape[0])): if A[j] < A[j - 1]: tmp = A[j] A[j] = A[j - 1] A[j - 1] = tmp tmp = A_pos[j] A_pos[j] = A_pos[j - 1] A_pos[j - 1] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'i': probing.mask_one(A_pos[i], np.copy(A.shape[0])), 'j': probing.mask_one(A_pos[j], np.copy(A.shape[0])) }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'pred': probing.array(np.copy(A_pos))}, ) probing.finalize(probes) return A, probes def heapsort(A: _Array) -> _Out: """Heapsort (Williams, 1964).""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['heapsort']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'parent': probing.heap(np.copy(A_pos), A.shape[0]), 'i': probing.mask_one(A.shape[0] - 1, A.shape[0]), 'j': probing.mask_one(A.shape[0] - 1, A.shape[0]), 'largest': probing.mask_one(A.shape[0] - 1, A.shape[0]), 'heap_size': probing.mask_one(A.shape[0] - 1, A.shape[0]), 'phase': probing.mask_one(0, 3) }) def max_heapify(A, i, heap_size, ind, phase): l = 2 * i + 1 r = 2 * i + 2 if l < heap_size and A[l] > A[i]: largest = l else: largest = i if r < heap_size and A[r] > A[largest]: largest = r if largest != i: tmp = A[i] A[i] = A[largest] A[largest] = tmp tmp = A_pos[i] A_pos[i] = A_pos[largest] A_pos[largest] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'parent': probing.heap(np.copy(A_pos), heap_size), 'i': probing.mask_one(A_pos[ind], A.shape[0]), 'j': probing.mask_one(A_pos[i], A.shape[0]), 'largest': probing.mask_one(A_pos[largest], A.shape[0]), 'heap_size': probing.mask_one(A_pos[heap_size - 1], A.shape[0]), 'phase': probing.mask_one(phase, 3) }) if largest != i: max_heapify(A, largest, heap_size, ind, phase) def build_max_heap(A): for i in reversed(range(A.shape[0])): max_heapify(A, i, A.shape[0], i, 0) build_max_heap(A) heap_size = A.shape[0] for i in reversed(range(1, A.shape[0])): tmp = A[0] A[0] = A[i] A[i] = tmp tmp = A_pos[0] A_pos[0] = A_pos[i] A_pos[i] = tmp heap_size -= 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'parent': probing.heap(np.copy(A_pos), heap_size), 'i': probing.mask_one(A_pos[0], A.shape[0]), 'j': probing.mask_one(A_pos[i], A.shape[0]), 'largest': probing.mask_one(0, A.shape[0]), # Consider masking 'heap_size': probing.mask_one(A_pos[heap_size - 1], A.shape[0]), 'phase': probing.mask_one(1, 3) }) max_heapify(A, 0, heap_size, i, 2) # reduce heap_size! probing.push( probes, specs.Stage.OUTPUT, next_probe={'pred': probing.array(np.copy(A_pos))}, ) probing.finalize(probes) return A, probes def quicksort(A: _Array, A_pos=None, p=None, r=None, probes=None) -> _Out: """Quicksort (Hoare, 1962).""" chex.assert_rank(A, 1) def partition(A, A_pos, p, r, probes): x = A[r] i = p - 1 for j in range(p, r): if A[j] <= x: i += 1 tmp = A[i] A[i] = A[j] A[j] = tmp tmp = A_pos[i] A_pos[i] = A_pos[j] A_pos[j] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'p': probing.mask_one(A_pos[p], A.shape[0]), 'r': probing.mask_one(A_pos[r], A.shape[0]), 'i': probing.mask_one(A_pos[i + 1], A.shape[0]), 'j': probing.mask_one(A_pos[j], A.shape[0]) }) tmp = A[i + 1] A[i + 1] = A[r] A[r] = tmp tmp = A_pos[i + 1] A_pos[i + 1] = A_pos[r] A_pos[r] = tmp probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'p': probing.mask_one(A_pos[p], A.shape[0]), 'r': probing.mask_one(A_pos[r], A.shape[0]), 'i': probing.mask_one(A_pos[i + 1], A.shape[0]), 'j': probing.mask_one(A_pos[r], A.shape[0]) }) return i + 1 if A_pos is None: A_pos = np.arange(A.shape[0]) if p is None: p = 0 if r is None: r = len(A) - 1 if probes is None: probes = probing.initialize(specs.SPECS['quicksort']) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'key': np.copy(A) }) if p < r: q = partition(A, A_pos, p, r, probes) quicksort(A, A_pos, p, q - 1, probes) quicksort(A, A_pos, q + 1, r, probes) if p == 0 and r == len(A) - 1: probing.push( probes, specs.Stage.OUTPUT, next_probe={'pred': probing.array(np.copy(A_pos))}, ) probing.finalize(probes) return A, probes
clrs-master
clrs/_src/algorithms/sorting.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CLRS algorithm implementations.""" # pylint:disable=g-bad-import-order from clrs._src.algorithms.divide_and_conquer import find_maximum_subarray from clrs._src.algorithms.divide_and_conquer import find_maximum_subarray_kadane from clrs._src.algorithms.dynamic_programming import matrix_chain_order from clrs._src.algorithms.dynamic_programming import lcs_length from clrs._src.algorithms.dynamic_programming import optimal_bst from clrs._src.algorithms.geometry import segments_intersect from clrs._src.algorithms.geometry import graham_scan from clrs._src.algorithms.geometry import jarvis_march from clrs._src.algorithms.graphs import dfs from clrs._src.algorithms.graphs import bfs from clrs._src.algorithms.graphs import topological_sort from clrs._src.algorithms.graphs import articulation_points from clrs._src.algorithms.graphs import bridges from clrs._src.algorithms.graphs import strongly_connected_components from clrs._src.algorithms.graphs import mst_kruskal from clrs._src.algorithms.graphs import mst_prim from clrs._src.algorithms.graphs import bellman_ford from clrs._src.algorithms.graphs import dijkstra from clrs._src.algorithms.graphs import dag_shortest_paths from clrs._src.algorithms.graphs import floyd_warshall from clrs._src.algorithms.graphs import bipartite_matching from clrs._src.algorithms.greedy import activity_selector from clrs._src.algorithms.greedy import task_scheduling from clrs._src.algorithms.searching import minimum from clrs._src.algorithms.searching import binary_search from clrs._src.algorithms.searching import quickselect from clrs._src.algorithms.sorting import insertion_sort from clrs._src.algorithms.sorting import bubble_sort from clrs._src.algorithms.sorting import heapsort from clrs._src.algorithms.sorting import quicksort from clrs._src.algorithms.strings import naive_string_matcher from clrs._src.algorithms.strings import kmp_matcher
clrs-master
clrs/_src/algorithms/__init__.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `geometry.py`.""" from absl.testing import absltest from absl.testing import parameterized from clrs._src.algorithms import geometry import numpy as np class GeometryTest(parameterized.TestCase): def test_segments_simple(self): xs_no = np.array([0, 0, 1, 1]) ys_no = np.array([0, 1, 0, 1]) out, _ = geometry.segments_intersect(xs_no, ys_no) self.assertFalse(out) xs_yes = np.array([0, 1, 1, 0]) ys_yes = np.array([0, 1, 0, 1]) out, _ = geometry.segments_intersect(xs_yes, ys_yes) self.assertTrue(out) xs_just = np.array([-3, 5, 5, -4]) ys_just = np.array([-3, 5, 5, -4]) out, _ = geometry.segments_intersect(xs_just, ys_just) self.assertTrue(out) def test_segments_colinear(self): xs_no = np.array([-1, 1, 2, 4]) ys_no = np.array([-1, 1, 2, 4]) out, _ = geometry.segments_intersect(xs_no, ys_no) self.assertFalse(out) xs_yes = np.array([-3, 5, 1, 2]) ys_yes = np.array([-3, 5, 1, 2]) out, _ = geometry.segments_intersect(xs_yes, ys_yes) self.assertTrue(out) xs_just = np.array([-3, 5, 5, 7]) ys_just = np.array([-3, 5, 5, 7]) out, _ = geometry.segments_intersect(xs_just, ys_just) self.assertTrue(out) @parameterized.named_parameters( ("Graham scan convex hull", geometry.graham_scan), ("Jarvis' march convex hull", geometry.jarvis_march), ) def test_convex_hull_simple(self, algorithm): tt = np.linspace(-np.pi, np.pi, 10)[:-1] xs = np.cos(tt) ys = np.sin(tt) in_hull, _ = algorithm(xs, ys) self.assertTrue(np.all(in_hull == 1)) xs = np.append(xs, [0.1]) ys = np.append(ys, [0.1]) in_hull, _ = algorithm(xs, ys) self.assertTrue(np.all(in_hull[:-1] == 1)) self.assertTrue(np.all(in_hull[-1:] == 0)) @parameterized.named_parameters( ("Graham scan convex hull", geometry.graham_scan), ("Jarvis' march convex hull", geometry.jarvis_march), ) def test_convex_hull_points(self, algorithm): xs = np.array([0, 15, 20, 30, 50, 50, 55, 70]) ys = np.array([30, 25, 0, 60, 40, 10, 20, 30]) expected = np.array([1, 0, 1, 1, 0, 1, 0, 1]) out, _ = algorithm(xs, ys) np.testing.assert_array_equal(expected, out) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/geometry_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `strings.py`.""" from absl.testing import absltest from absl.testing import parameterized from clrs._src.algorithms import strings import numpy as np class StringsTest(parameterized.TestCase): @parameterized.named_parameters( ("Naive string matching", strings.naive_string_matcher), ("KMP string matching", strings.kmp_matcher), ) def test_string_matching(self, algorithm): offset, _ = algorithm(np.array([1, 2, 3]), np.array([1, 2, 3])) self.assertEqual(offset, 0) offset, _ = algorithm(np.array([1, 2, 3, 1, 2]), np.array([1, 2, 3])) self.assertEqual(offset, 0) offset, _ = algorithm(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3])) self.assertEqual(offset, 0) offset, _ = algorithm(np.array([1, 2, 1, 2, 3]), np.array([1, 2, 3])) self.assertEqual(offset, 2) offset, _ = algorithm(np.array([3, 2, 1]), np.array([1, 2, 3])) self.assertEqual(offset, 3) offset, _ = algorithm(np.array( [ 3, 2, 2, 1, 2, 1, 2, 3, 0, 0, 2, 3, 0, 0, 1, 0 ]), np.array([2, 1, 2, 3])) self.assertEqual(offset, 4) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/strings_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Geometry algorithm generators. Currently implements the following: - Segment intersection - Graham scan convex hull (Graham, 1972) - Jarvis' march convex hull (Jarvis, 1973) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name import math from typing import Any, Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[Any, probing.ProbesDict] def segments_intersect(xs: _Array, ys: _Array) -> _Out: """Segment intersection.""" assert xs.shape == (4,) assert ys.shape == (4,) probes = probing.initialize(specs.SPECS['segments_intersect']) A_pos = np.arange(xs.shape[0]) dirs = np.zeros(xs.shape[0]) on_seg = np.zeros(xs.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'x': np.copy(xs), 'y': np.copy(ys) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'i': probing.mask_one(0, xs.shape[0]), 'j': probing.mask_one(0, xs.shape[0]), 'k': probing.mask_one(0, xs.shape[0]), 'dir': np.copy(dirs), 'on_seg': np.copy(on_seg) }) def cross_product(x1, y1, x2, y2): return x1 * y2 - x2 * y1 def direction(xs, ys, i, j, k): return cross_product(xs[k] - xs[i], ys[k] - ys[i], xs[j] - xs[i], ys[j] - ys[i]) def on_segment(xs, ys, i, j, k): if min(xs[i], xs[j]) <= xs[k] and xs[k] <= max(xs[i], xs[j]): if min(ys[i], ys[j]) <= ys[k] and ys[k] <= max(ys[i], ys[j]): return 1 return 0 dirs[0] = direction(xs, ys, 2, 3, 0) on_seg[0] = on_segment(xs, ys, 2, 3, 0) probing.push( probes, specs.Stage.HINT, next_probe={ 'i': probing.mask_one(2, xs.shape[0]), 'j': probing.mask_one(3, xs.shape[0]), 'k': probing.mask_one(0, xs.shape[0]), 'dir': np.copy(dirs), 'on_seg': np.copy(on_seg) }) dirs[1] = direction(xs, ys, 2, 3, 1) on_seg[1] = on_segment(xs, ys, 2, 3, 1) probing.push( probes, specs.Stage.HINT, next_probe={ 'i': probing.mask_one(2, xs.shape[0]), 'j': probing.mask_one(3, xs.shape[0]), 'k': probing.mask_one(1, xs.shape[0]), 'dir': np.copy(dirs), 'on_seg': np.copy(on_seg) }) dirs[2] = direction(xs, ys, 0, 1, 2) on_seg[2] = on_segment(xs, ys, 0, 1, 2) probing.push( probes, specs.Stage.HINT, next_probe={ 'i': probing.mask_one(0, xs.shape[0]), 'j': probing.mask_one(1, xs.shape[0]), 'k': probing.mask_one(2, xs.shape[0]), 'dir': np.copy(dirs), 'on_seg': np.copy(on_seg) }) dirs[3] = direction(xs, ys, 0, 1, 3) on_seg[3] = on_segment(xs, ys, 0, 1, 3) probing.push( probes, specs.Stage.HINT, next_probe={ 'i': probing.mask_one(0, xs.shape[0]), 'j': probing.mask_one(1, xs.shape[0]), 'k': probing.mask_one(3, xs.shape[0]), 'dir': np.copy(dirs), 'on_seg': np.copy(on_seg) }) ret = 0 if ((dirs[0] > 0 and dirs[1] < 0) or (dirs[0] < 0 and dirs[1] > 0)) and ((dirs[2] > 0 and dirs[3] < 0) or (dirs[2] < 0 and dirs[3] > 0)): ret = 1 elif dirs[0] == 0 and on_seg[0]: ret = 1 elif dirs[1] == 0 and on_seg[1]: ret = 1 elif dirs[2] == 0 and on_seg[2]: ret = 1 elif dirs[3] == 0 and on_seg[3]: ret = 1 probing.push(probes, specs.Stage.OUTPUT, next_probe={'intersect': ret}) probing.finalize(probes) return ret, probes def graham_scan(xs: _Array, ys: _Array) -> _Out: """Graham scan convex hull (Graham, 1972).""" chex.assert_rank([xs, ys], 1) probes = probing.initialize(specs.SPECS['graham_scan']) A_pos = np.arange(xs.shape[0]) in_hull = np.zeros(xs.shape[0]) stack_prev = np.arange(xs.shape[0]) atans = np.zeros(xs.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'x': np.copy(xs), 'y': np.copy(ys) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'best': probing.mask_one(0, xs.shape[0]), 'atans': np.copy(atans), 'in_hull_h': np.copy(in_hull), 'stack_prev': np.copy(stack_prev), 'last_stack': probing.mask_one(0, xs.shape[0]), 'i': probing.mask_one(0, xs.shape[0]), 'phase': probing.mask_one(0, 5) }) def counter_clockwise(xs, ys, i, j, k): return ((xs[j] - xs[i]) * (ys[k] - ys[i]) - (ys[j] - ys[i]) * (xs[k] - xs[i])) <= 0 best = 0 for i in range(xs.shape[0]): if ys[i] < ys[best] or (ys[i] == ys[best] and xs[i] < xs[best]): best = i in_hull[best] = 1 last_stack = best probing.push( probes, specs.Stage.HINT, next_probe={ 'best': probing.mask_one(best, xs.shape[0]), 'atans': np.copy(atans), 'in_hull_h': np.copy(in_hull), 'stack_prev': np.copy(stack_prev), 'last_stack': probing.mask_one(last_stack, xs.shape[0]), 'i': probing.mask_one(best, xs.shape[0]), 'phase': probing.mask_one(1, 5) }) for i in range(xs.shape[0]): if i != best: atans[i] = math.atan2(ys[i] - ys[best], xs[i] - xs[best]) atans[best] = -123456789 ind = np.argsort(atans) atans[best] = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'best': probing.mask_one(best, xs.shape[0]), 'atans': np.copy(atans), 'in_hull_h': np.copy(in_hull), 'stack_prev': np.copy(stack_prev), 'last_stack': probing.mask_one(last_stack, xs.shape[0]), 'i': probing.mask_one(best, xs.shape[0]), 'phase': probing.mask_one(2, 5) }) for i in range(1, xs.shape[0]): if i >= 3: while counter_clockwise(xs, ys, stack_prev[last_stack], last_stack, ind[i]): prev_last = last_stack last_stack = stack_prev[last_stack] stack_prev[prev_last] = prev_last in_hull[prev_last] = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'best': probing.mask_one(best, xs.shape[0]), 'atans': np.copy(atans), 'in_hull_h': np.copy(in_hull), 'stack_prev': np.copy(stack_prev), 'last_stack': probing.mask_one(last_stack, xs.shape[0]), 'i': probing.mask_one(A_pos[ind[i]], xs.shape[0]), 'phase': probing.mask_one(3, 5) }) in_hull[ind[i]] = 1 stack_prev[ind[i]] = last_stack last_stack = ind[i] probing.push( probes, specs.Stage.HINT, next_probe={ 'best': probing.mask_one(best, xs.shape[0]), 'atans': np.copy(atans), 'in_hull_h': np.copy(in_hull), 'stack_prev': np.copy(stack_prev), 'last_stack': probing.mask_one(last_stack, xs.shape[0]), 'i': probing.mask_one(A_pos[ind[i]], xs.shape[0]), 'phase': probing.mask_one(4, 5) }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'in_hull': np.copy(in_hull)}, ) probing.finalize(probes) return in_hull, probes def jarvis_march(xs: _Array, ys: _Array) -> _Out: """Jarvis' march convex hull (Jarvis, 1973).""" chex.assert_rank([xs, ys], 1) probes = probing.initialize(specs.SPECS['jarvis_march']) A_pos = np.arange(xs.shape[0]) in_hull = np.zeros(xs.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'x': np.copy(xs), 'y': np.copy(ys) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'in_hull_h': np.copy(in_hull), 'best': probing.mask_one(0, xs.shape[0]), 'last_point': probing.mask_one(0, xs.shape[0]), 'endpoint': probing.mask_one(0, xs.shape[0]), 'i': probing.mask_one(0, xs.shape[0]), 'phase': probing.mask_one(0, 2) }) def counter_clockwise(xs, ys, i, j, k): if (k == i) or (k == j): return False return ((xs[j] - xs[i]) * (ys[k] - ys[i]) - (ys[j] - ys[i]) * (xs[k] - xs[i])) <= 0 best = 0 for i in range(xs.shape[0]): if ys[i] < ys[best] or (ys[i] == ys[best] and xs[i] < xs[best]): best = i in_hull[best] = 1 last_point = best endpoint = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'in_hull_h': np.copy(in_hull), 'best': probing.mask_one(best, xs.shape[0]), 'last_point': probing.mask_one(last_point, xs.shape[0]), 'endpoint': probing.mask_one(endpoint, xs.shape[0]), 'i': probing.mask_one(0, xs.shape[0]), 'phase': probing.mask_one(1, 2) }) while True: for i in range(xs.shape[0]): if endpoint == last_point or counter_clockwise(xs, ys, last_point, endpoint, i): endpoint = i probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'in_hull_h': np.copy(in_hull), 'best': probing.mask_one(best, xs.shape[0]), 'last_point': probing.mask_one(last_point, xs.shape[0]), 'endpoint': probing.mask_one(endpoint, xs.shape[0]), 'i': probing.mask_one(i, xs.shape[0]), 'phase': probing.mask_one(1, 2) }) if in_hull[endpoint] > 0: break in_hull[endpoint] = 1 last_point = endpoint endpoint = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'in_hull_h': np.copy(in_hull), 'best': probing.mask_one(best, xs.shape[0]), 'last_point': probing.mask_one(last_point, xs.shape[0]), 'endpoint': probing.mask_one(endpoint, xs.shape[0]), 'i': probing.mask_one(0, xs.shape[0]), 'phase': probing.mask_one(1, 2) }) probing.push( probes, specs.Stage.OUTPUT, next_probe={'in_hull': np.copy(in_hull)}) probing.finalize(probes) return in_hull, probes
clrs-master
clrs/_src/algorithms/geometry.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `dynamic_programming.py`.""" from absl.testing import absltest from clrs._src.algorithms import dynamic_programming import numpy as np class DynamicProgrammingTest(absltest.TestCase): def test_matrix_chain_order_1(self): expected = np.array([ [0, 1, 1, 3, 3, 3], [0, 0, 2, 3, 3, 3], [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 0], ]) for shift in [0, 1, 2]: for scale in [1, 3, 17]: ps = shift + scale * np.array([30, 35, 15, 5, 10, 20, 25]) order, _ = dynamic_programming.matrix_chain_order(ps) np.testing.assert_array_equal(expected, order) def test_matrix_chain_order_2(self): expected = np.array([ [0, 1, 2, 2, 4, 2], [0, 0, 2, 2, 2, 2], [0, 0, 0, 3, 4, 4], [0, 0, 0, 0, 4, 4], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 0], ]) for shift in [0, 1]: for scale in [1, 3, 17]: ps = shift + scale * np.array([5, 10, 3, 12, 5, 50, 6]) order, _ = dynamic_programming.matrix_chain_order(ps) np.testing.assert_array_equal(expected, order) def test_lcs_length(self): xs = np.array([0, 1, 2, 1, 3, 0, 1]) ys = np.array([1, 3, 2, 0, 1, 0]) expected = np.array([ [1, 1, 1, 0, 2, 0], [0, 2, 2, 1, 0, 2], [1, 1, 0, 2, 1, 1], [0, 1, 1, 1, 0, 2], [1, 0, 1, 1, 1, 1], [1, 1, 1, 0, 1, 0], [0, 1, 1, 1, 0, 1], ]) out, _ = dynamic_programming.lcs_length(xs, ys) np.testing.assert_array_equal(expected, out) def test_optimal_bst(self): p = np.array([0.15, 0.10, 0.05, 0.10, 0.2]) q = np.array([0.05, 0.10, 0.05, 0.05, 0.05, 0.10]) assert p.sum() + q.sum() == 1. expected = np.array([ [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 3], [0, 0, 0, 2, 3, 4], [0, 0, 0, 0, 3, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], ]) out, _ = dynamic_programming.optimal_bst(p, q) np.testing.assert_array_equal(expected, out) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/dynamic_programming_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `divide_and_conquer.py`.""" # pylint: disable=invalid-name from absl.testing import absltest from absl.testing import parameterized from clrs._src.algorithms import divide_and_conquer import numpy as np class DivideAndConquerTest(parameterized.TestCase): @parameterized.named_parameters( ("Maximum subarray", divide_and_conquer.find_maximum_subarray), ("Kadane's variant", divide_and_conquer.find_maximum_subarray_kadane), ) def test_find_maximum_subarray_pos(self, algorithm): A = np.random.randint(0, 100, size=(13,)) (low, high, sum_), _ = algorithm(A) self.assertEqual(low, 0) self.assertEqual(high, len(A) - 1) self.assertEqual(sum_, np.sum(A)) @parameterized.named_parameters( ("Maximum subarray", divide_and_conquer.find_maximum_subarray), ("Kadane's variant", divide_and_conquer.find_maximum_subarray_kadane), ) def test_find_maximum_subarray(self, algorithm): A = np.random.randint(-100, 100, size=(13,)) (low, high, sum_), _ = algorithm(A.copy()) # Brute force solution. best = (0, len(A) - 1) best_sum = np.sum(A) for start in range(len(A)): for stop in range(start, len(A)): range_sum = np.sum(A[start:stop + 1]) if range_sum > best_sum: best = (start, stop) best_sum = range_sum self.assertEqual(low, best[0]) self.assertEqual(high, best[1]) self.assertEqual(sum_, best_sum) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/divide_and_conquer_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Divide and conquer algorithm generators. Currently implements the following: - Maximum subarray - Kadane's variant of Maximum subarray (Bentley, 1984) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Any, Union import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Numeric = Union[int, float] _Out = Any def find_maximum_subarray( A: _Array, A_pos=None, low=None, high=None, probes=None, ) -> _Out: """Maximum subarray.""" chex.assert_rank(A, 1) def find_max_crossing_subarray(A, A_pos, low, mid, high, left_ctx, right_ctx, probes): (left_low, left_high, l_ctx_sum) = left_ctx (right_low, right_high, r_ctx_sum) = right_ctx left_sum = A[mid] - 0.1 sum_ = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': l_ctx_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': r_ctx_sum, 'cross_low': probing.mask_one(mid, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': A[mid] + A[mid + 1] - 0.2, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(mid, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': 0.0, 'left_x_sum': A[mid] - 0.1, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(1, 3) }) for i in range(mid, low - 1, -1): sum_ += A[i] if sum_ > left_sum: left_sum = sum_ max_left = i probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': l_ctx_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': r_ctx_sum, 'cross_low': probing.mask_one(max_left, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': left_sum + A[mid + 1] - 0.1, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(i, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': sum_, 'left_x_sum': left_sum, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(1, 3) }) right_sum = A[mid + 1] - 0.1 sum_ = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': left_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': right_sum, 'cross_low': probing.mask_one(max_left, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': left_sum + right_sum, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(i, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': 0.0, 'left_x_sum': left_sum, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(2, 3) }) for j in range(mid + 1, high + 1): sum_ += A[j] if sum_ > right_sum: right_sum = sum_ max_right = j probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': left_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': right_sum, 'cross_low': probing.mask_one(max_left, A.shape[0]), 'cross_high': probing.mask_one(max_right, A.shape[0]), 'cross_sum': left_sum + right_sum, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(i, A.shape[0]), 'j': probing.mask_one(j, A.shape[0]), 'sum': sum_, 'left_x_sum': left_sum, 'right_x_sum': right_sum, 'phase': probing.mask_one(2, 3) }) return (max_left, max_right, left_sum + right_sum), (sum_, left_sum, right_sum) if A_pos is None: A_pos = np.arange(A.shape[0]) if low is None: low = 0 if high is None: high = A.shape[0] - 1 if probes is None: probes = probing.initialize(specs.SPECS['find_maximum_subarray']) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'key': np.copy(A) }) mid = (low + high) // 2 if high == low: if A.shape[0] == 1: probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'start': probing.mask_one(low, A.shape[0]), 'end': probing.mask_one(high, A.shape[0]) }) probing.finalize(probes) return (low, high, A[low]), probes else: probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(low, A.shape[0]), 'left_high': probing.mask_one(high, A.shape[0]), 'left_sum': 0.0, 'right_low': probing.mask_one(low, A.shape[0]), 'right_high': probing.mask_one(high, A.shape[0]), 'right_sum': 0.0, 'cross_low': probing.mask_one(low, A.shape[0]), 'cross_high': probing.mask_one(high, A.shape[0]), 'cross_sum': 0.0, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': A[low], 'i': probing.mask_one(low, A.shape[0]), 'j': probing.mask_one(high, A.shape[0]), 'sum': 0.0, 'left_x_sum': A[low] - 0.1, 'right_x_sum': A[high] - 0.1, 'phase': probing.mask_one(0, 3) }) return (low, high, A[low]) else: probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(low, A.shape[0]), 'left_high': probing.mask_one(mid, A.shape[0]), 'left_sum': 0.0, 'right_low': probing.mask_one(mid + 1, A.shape[0]), 'right_high': probing.mask_one(high, A.shape[0]), 'right_sum': 0.0, 'cross_low': probing.mask_one(mid, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': A[mid] + A[mid + 1] - 0.2, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(mid, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': 0.0, 'left_x_sum': A[mid] - 0.1, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(0, 3) }) (left_low, left_high, # pylint: disable=unbalanced-tuple-unpacking left_sum) = find_maximum_subarray(A, A_pos, low, mid, probes) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': left_sum, 'right_low': probing.mask_one(mid + 1, A.shape[0]), 'right_high': probing.mask_one(high, A.shape[0]), 'right_sum': 0.0, 'cross_low': probing.mask_one(mid, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': A[mid] + A[mid + 1] - 0.2, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(mid, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': 0.0, 'left_x_sum': A[mid] - 0.1, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(0, 3) }) (right_low, right_high, # pylint: disable=unbalanced-tuple-unpacking right_sum) = find_maximum_subarray(A, A_pos, mid + 1, high, probes) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': left_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': right_sum, 'cross_low': probing.mask_one(mid, A.shape[0]), 'cross_high': probing.mask_one(mid + 1, A.shape[0]), 'cross_sum': A[mid] + A[mid + 1] - 0.2, 'ret_low': probing.mask_one(low, A.shape[0]), 'ret_high': probing.mask_one(high, A.shape[0]), 'ret_sum': 0.0, 'i': probing.mask_one(mid, A.shape[0]), 'j': probing.mask_one(mid + 1, A.shape[0]), 'sum': 0.0, 'left_x_sum': A[mid] - 0.1, 'right_x_sum': A[mid + 1] - 0.1, 'phase': probing.mask_one(0, 3) }) (cross_low, cross_high, cross_sum), (x_sum, x_left, x_right) = find_max_crossing_subarray( A, A_pos, low, mid, high, (left_low, left_high, left_sum), (right_low, right_high, right_sum), probes) if left_sum >= right_sum and left_sum >= cross_sum: best = (left_low, left_high, left_sum) elif right_sum >= left_sum and right_sum >= cross_sum: best = (right_low, right_high, right_sum) else: best = (cross_low, cross_high, cross_sum) if low == 0 and high == A.shape[0] - 1: probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'start': probing.mask_one(best[0], A.shape[0]), 'end': probing.mask_one(best[1], A.shape[0]) }) probing.finalize(probes) return best, probes else: probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'low': probing.mask_one(low, A.shape[0]), 'high': probing.mask_one(high, A.shape[0]), 'mid': probing.mask_one(mid, A.shape[0]), 'left_low': probing.mask_one(left_low, A.shape[0]), 'left_high': probing.mask_one(left_high, A.shape[0]), 'left_sum': left_sum, 'right_low': probing.mask_one(right_low, A.shape[0]), 'right_high': probing.mask_one(right_high, A.shape[0]), 'right_sum': right_sum, 'cross_low': probing.mask_one(cross_low, A.shape[0]), 'cross_high': probing.mask_one(cross_high, A.shape[0]), 'cross_sum': cross_sum, 'ret_low': probing.mask_one(best[0], A.shape[0]), 'ret_high': probing.mask_one(best[1], A.shape[0]), 'ret_sum': best[2], 'i': probing.mask_one(low, A.shape[0]), 'j': probing.mask_one(high, A.shape[0]), 'sum': x_sum, 'left_x_sum': x_left, 'right_x_sum': x_right, 'phase': probing.mask_one(0, 3) }) return best def find_maximum_subarray_kadane(A: _Array) -> _Out: """Kadane's variant of Maximum subarray (Bentley, 1984).""" chex.assert_rank(A, 1) probes = probing.initialize(specs.SPECS['find_maximum_subarray_kadane']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'key': np.copy(A) }) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'best_low': probing.mask_one(0, A.shape[0]), 'best_high': probing.mask_one(0, A.shape[0]), 'best_sum': A[0], 'i': probing.mask_one(0, A.shape[0]), 'j': probing.mask_one(0, A.shape[0]), 'sum': A[0] }) best_low = 0 best_high = 0 best_sum = A[0] i = 0 sum_ = A[0] for j in range(1, A.shape[0]): x = A[j] if sum_ + x >= x: sum_ += x else: i = j sum_ = x if sum_ > best_sum: best_low = i best_high = j best_sum = sum_ probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'best_low': probing.mask_one(best_low, A.shape[0]), 'best_high': probing.mask_one(best_high, A.shape[0]), 'best_sum': best_sum, 'i': probing.mask_one(i, A.shape[0]), 'j': probing.mask_one(j, A.shape[0]), 'sum': sum_ }) probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'start': probing.mask_one(best_low, A.shape[0]), 'end': probing.mask_one(best_high, A.shape[0]) }) probing.finalize(probes) return (best_low, best_high, best_sum), probes
clrs-master
clrs/_src/algorithms/divide_and_conquer.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Graph algorithm generators. Currently implements the following: - Depth-first search (Moore, 1959) - Breadth-first search (Moore, 1959) - Topological sorting (Knuth, 1973) - Articulation points - Bridges - Kosaraju's strongly-connected components (Aho et al., 1974) - Kruskal's minimum spanning tree (Kruskal, 1956) - Prim's minimum spanning tree (Prim, 1957) - Bellman-Ford's single-source shortest path (Bellman, 1958) - Dijkstra's single-source shortest path (Dijkstra, 1959) - DAG shortest path - Floyd-Warshall's all-pairs shortest paths (Floyd, 1962) - Edmonds-Karp bipartite matching (Edmund & Karp, 1972) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[_Array, probing.ProbesDict] _OutputClass = specs.OutputClass def dfs(A: _Array) -> _Out: """Depth-first search (Moore, 1959).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['dfs']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) color = np.zeros(A.shape[0], dtype=np.int32) pi = np.arange(A.shape[0]) d = np.zeros(A.shape[0]) f = np.zeros(A.shape[0]) s_prev = np.arange(A.shape[0]) time = 0 for s in range(A.shape[0]): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) while True: if color[u] == 0 or d[u] == 0.0: time += 0.01 d[u] = time color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: pi[v] = u color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) break if s_last == u: color[u] = 2 time += 0.01 f[u] = time probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def bfs(A: _Array, s: int) -> _Out: """Breadth-first search (Moore, 1959).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['bfs']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 's': probing.mask_one(s, A.shape[0]), 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) reach = np.zeros(A.shape[0]) pi = np.arange(A.shape[0]) reach[s] = 1 while True: prev_reach = np.copy(reach) probing.push( probes, specs.Stage.HINT, next_probe={ 'reach_h': np.copy(prev_reach), 'pi_h': np.copy(pi) }) for i in range(A.shape[0]): for j in range(A.shape[0]): if A[i, j] > 0 and prev_reach[i] == 1: if pi[j] == j and j != s: pi[j] = i reach[j] = 1 if np.all(reach == prev_reach): break probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def topological_sort(A: _Array) -> _Out: """Topological sorting (Knuth, 1973).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['topological_sort']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) color = np.zeros(A.shape[0], dtype=np.int32) topo = np.arange(A.shape[0]) s_prev = np.arange(A.shape[0]) topo_head = 0 for s in range(A.shape[0]): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]) }) while True: if color[u] == 0: color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]) }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]) }) break if s_last == u: color[u] = 2 if color[topo_head] == 2: topo[u] = topo_head topo_head = u probing.push( probes, specs.Stage.HINT, next_probe={ 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]) }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'topo': np.copy(topo), 'topo_head': probing.mask_one(topo_head, A.shape[0]) }) probing.finalize(probes) return topo, probes def articulation_points(A: _Array) -> _Out: """Articulation points.""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['articulation_points']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) color = np.zeros(A.shape[0], dtype=np.int32) pi = np.arange(A.shape[0]) d = np.zeros(A.shape[0]) f = np.zeros(A.shape[0]) s_prev = np.arange(A.shape[0]) time = 0 low = np.zeros(A.shape[0]) child_cnt = np.zeros(A.shape[0]) is_cut = np.zeros(A.shape[0]) for s in range(A.shape[0]): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'is_cut_h': np.copy(is_cut), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 'child_cnt': np.copy(child_cnt), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) while True: if color[u] == 0 or d[u] == 0.0: time += 0.01 d[u] = time low[u] = time color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'is_cut_h': np.copy(is_cut), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 'child_cnt': np.copy(child_cnt), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: pi[v] = u color[v] = 1 s_prev[v] = s_last s_last = v child_cnt[u] += 0.01 probing.push( probes, specs.Stage.HINT, next_probe={ 'is_cut_h': np.copy(is_cut), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 'child_cnt': np.copy(child_cnt), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) break elif v != pi[u]: low[u] = min(low[u], d[v]) probing.push( probes, specs.Stage.HINT, next_probe={ 'is_cut_h': np.copy(is_cut), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 'child_cnt': np.copy(child_cnt), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) if s_last == u: color[u] = 2 time += 0.01 f[u] = time for v in range(A.shape[0]): if pi[v] == u: low[u] = min(low[u], low[v]) if pi[u] != u and low[v] >= d[u]: is_cut[u] = 1 if pi[u] == u and child_cnt[u] > 0.01: is_cut[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'is_cut_h': np.copy(is_cut), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 'child_cnt': np.copy(child_cnt), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last probing.push( probes, specs.Stage.OUTPUT, next_probe={'is_cut': np.copy(is_cut)}, ) probing.finalize(probes) return is_cut, probes def bridges(A: _Array) -> _Out: """Bridges.""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['bridges']) A_pos = np.arange(A.shape[0]) adj = probing.graph(np.copy(A)) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': adj }) color = np.zeros(A.shape[0], dtype=np.int32) pi = np.arange(A.shape[0]) d = np.zeros(A.shape[0]) f = np.zeros(A.shape[0]) s_prev = np.arange(A.shape[0]) time = 0 low = np.zeros(A.shape[0]) is_bridge = ( np.zeros((A.shape[0], A.shape[0])) + _OutputClass.MASKED + adj) for s in range(A.shape[0]): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'is_bridge_h': np.copy(is_bridge), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) while True: if color[u] == 0 or d[u] == 0.0: time += 0.01 d[u] = time low[u] = time color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'is_bridge_h': np.copy(is_bridge), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: pi[v] = u color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'is_bridge_h': np.copy(is_bridge), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) break elif v != pi[u]: low[u] = min(low[u], d[v]) probing.push( probes, specs.Stage.HINT, next_probe={ 'is_bridge_h': np.copy(is_bridge), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) if s_last == u: color[u] = 2 time += 0.01 f[u] = time for v in range(A.shape[0]): if pi[v] == u: low[u] = min(low[u], low[v]) if low[v] > d[u]: is_bridge[u, v] = 1 is_bridge[v, u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'is_bridge_h': np.copy(is_bridge), 'pi_h': np.copy(pi), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 'low': np.copy(low), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last probing.push( probes, specs.Stage.OUTPUT, next_probe={'is_bridge': np.copy(is_bridge)}, ) probing.finalize(probes) return is_bridge, probes def strongly_connected_components(A: _Array) -> _Out: """Kosaraju's strongly-connected components (Aho et al., 1974).""" chex.assert_rank(A, 2) probes = probing.initialize( specs.SPECS['strongly_connected_components']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) scc_id = np.arange(A.shape[0]) color = np.zeros(A.shape[0], dtype=np.int32) d = np.zeros(A.shape[0]) f = np.zeros(A.shape[0]) s_prev = np.arange(A.shape[0]) time = 0 A_t = np.transpose(A) for s in range(A.shape[0]): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 0 }) while True: if color[u] == 0 or d[u] == 0.0: time += 0.01 d[u] = time color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 0 }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 0 }) break if s_last == u: color[u] = 2 time += 0.01 f[u] = time probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 0 }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last color = np.zeros(A.shape[0], dtype=np.int32) s_prev = np.arange(A.shape[0]) for s in np.argsort(-f): if color[s] == 0: s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 1 }) while True: scc_id[u] = s if color[u] == 0 or d[u] == 0.0: time += 0.01 d[u] = time color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 1 }) for v in range(A.shape[0]): if A_t[u, v] != 0: if color[v] == 0: color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 1 }) break if s_last == u: color[u] = 2 time += 0.01 f[u] = time probing.push( probes, specs.Stage.HINT, next_probe={ 'scc_id_h': np.copy(scc_id), 'A_t': probing.graph(np.copy(A_t)), 'color': probing.array_cat(color, 3), 'd': np.copy(d), 'f': np.copy(f), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'time': time, 'phase': 1 }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last probing.push( probes, specs.Stage.OUTPUT, next_probe={'scc_id': np.copy(scc_id)}, ) probing.finalize(probes) return scc_id, probes def mst_kruskal(A: _Array) -> _Out: """Kruskal's minimum spanning tree (Kruskal, 1956).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['mst_kruskal']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) pi = np.arange(A.shape[0]) def mst_union(u, v, in_mst, probes): root_u = u root_v = v mask_u = np.zeros(in_mst.shape[0]) mask_v = np.zeros(in_mst.shape[0]) mask_u[u] = 1 mask_v[v] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'in_mst_h': np.copy(in_mst), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 'root_u': probing.mask_one(root_u, A.shape[0]), 'root_v': probing.mask_one(root_v, A.shape[0]), 'mask_u': np.copy(mask_u), 'mask_v': np.copy(mask_v), 'phase': probing.mask_one(1, 3) }) while pi[root_u] != root_u: root_u = pi[root_u] for i in range(mask_u.shape[0]): if mask_u[i] == 1: pi[i] = root_u mask_u[root_u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'in_mst_h': np.copy(in_mst), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 'root_u': probing.mask_one(root_u, A.shape[0]), 'root_v': probing.mask_one(root_v, A.shape[0]), 'mask_u': np.copy(mask_u), 'mask_v': np.copy(mask_v), 'phase': probing.mask_one(1, 3) }) while pi[root_v] != root_v: root_v = pi[root_v] for i in range(mask_v.shape[0]): if mask_v[i] == 1: pi[i] = root_v mask_v[root_v] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'in_mst_h': np.copy(in_mst), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 'root_u': probing.mask_one(root_u, A.shape[0]), 'root_v': probing.mask_one(root_v, A.shape[0]), 'mask_u': np.copy(mask_u), 'mask_v': np.copy(mask_v), 'phase': probing.mask_one(2, 3) }) if root_u < root_v: in_mst[u, v] = 1 in_mst[v, u] = 1 pi[root_u] = root_v elif root_u > root_v: in_mst[u, v] = 1 in_mst[v, u] = 1 pi[root_v] = root_u probing.push( probes, specs.Stage.HINT, next_probe={ 'in_mst_h': np.copy(in_mst), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 'root_u': probing.mask_one(root_u, A.shape[0]), 'root_v': probing.mask_one(root_v, A.shape[0]), 'mask_u': np.copy(mask_u), 'mask_v': np.copy(mask_v), 'phase': probing.mask_one(0, 3) }) in_mst = np.zeros((A.shape[0], A.shape[0])) # Prep to sort edge array lx = [] ly = [] wts = [] for i in range(A.shape[0]): for j in range(i + 1, A.shape[0]): if A[i, j] > 0: lx.append(i) ly.append(j) wts.append(A[i, j]) probing.push( probes, specs.Stage.HINT, next_probe={ 'in_mst_h': np.copy(in_mst), 'pi': np.copy(pi), 'u': probing.mask_one(0, A.shape[0]), 'v': probing.mask_one(0, A.shape[0]), 'root_u': probing.mask_one(0, A.shape[0]), 'root_v': probing.mask_one(0, A.shape[0]), 'mask_u': np.zeros(A.shape[0]), 'mask_v': np.zeros(A.shape[0]), 'phase': probing.mask_one(0, 3) }) for ind in np.argsort(wts): u = lx[ind] v = ly[ind] mst_union(u, v, in_mst, probes) probing.push( probes, specs.Stage.OUTPUT, next_probe={'in_mst': np.copy(in_mst)}, ) probing.finalize(probes) return in_mst, probes def mst_prim(A: _Array, s: int) -> _Out: """Prim's minimum spanning tree (Prim, 1957).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['mst_prim']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 's': probing.mask_one(s, A.shape[0]), 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) key = np.zeros(A.shape[0]) mark = np.zeros(A.shape[0]) in_queue = np.zeros(A.shape[0]) pi = np.arange(A.shape[0]) key[s] = 0 in_queue[s] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'key': np.copy(key), 'mark': np.copy(mark), 'in_queue': np.copy(in_queue), 'u': probing.mask_one(s, A.shape[0]) }) for _ in range(A.shape[0]): u = np.argsort(key + (1.0 - in_queue) * 1e9)[0] # drop-in for extract-min if in_queue[u] == 0: break mark[u] = 1 in_queue[u] = 0 for v in range(A.shape[0]): if A[u, v] != 0: if mark[v] == 0 and (in_queue[v] == 0 or A[u, v] < key[v]): pi[v] = u key[v] = A[u, v] in_queue[v] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'key': np.copy(key), 'mark': np.copy(mark), 'in_queue': np.copy(in_queue), 'u': probing.mask_one(u, A.shape[0]) }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def bellman_ford(A: _Array, s: int) -> _Out: """Bellman-Ford's single-source shortest path (Bellman, 1958).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['bellman_ford']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 's': probing.mask_one(s, A.shape[0]), 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) d = np.zeros(A.shape[0]) pi = np.arange(A.shape[0]) msk = np.zeros(A.shape[0]) d[s] = 0 msk[s] = 1 while True: prev_d = np.copy(d) prev_msk = np.copy(msk) probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(prev_d), 'msk': np.copy(prev_msk) }) for u in range(A.shape[0]): for v in range(A.shape[0]): if prev_msk[u] == 1 and A[u, v] != 0: if msk[v] == 0 or prev_d[u] + A[u, v] < d[v]: d[v] = prev_d[u] + A[u, v] pi[v] = u msk[v] = 1 if np.all(d == prev_d): break probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def dijkstra(A: _Array, s: int) -> _Out: """Dijkstra's single-source shortest path (Dijkstra, 1959).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['dijkstra']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 's': probing.mask_one(s, A.shape[0]), 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) d = np.zeros(A.shape[0]) mark = np.zeros(A.shape[0]) in_queue = np.zeros(A.shape[0]) pi = np.arange(A.shape[0]) d[s] = 0 in_queue[s] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'in_queue': np.copy(in_queue), 'u': probing.mask_one(s, A.shape[0]) }) for _ in range(A.shape[0]): u = np.argsort(d + (1.0 - in_queue) * 1e9)[0] # drop-in for extract-min if in_queue[u] == 0: break mark[u] = 1 in_queue[u] = 0 for v in range(A.shape[0]): if A[u, v] != 0: if mark[v] == 0 and (in_queue[v] == 0 or d[u] + A[u, v] < d[v]): pi[v] = u d[v] = d[u] + A[u, v] in_queue[v] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'in_queue': np.copy(in_queue), 'u': probing.mask_one(u, A.shape[0]) }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def dag_shortest_paths(A: _Array, s: int) -> _Out: """DAG shortest path.""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['dag_shortest_paths']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 's': probing.mask_one(s, A.shape[0]), 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) pi = np.arange(A.shape[0]) d = np.zeros(A.shape[0]) mark = np.zeros(A.shape[0]) color = np.zeros(A.shape[0], dtype=np.int32) topo = np.arange(A.shape[0]) s_prev = np.arange(A.shape[0]) topo_head = 0 s_last = s u = s v = s probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 0 }) while True: if color[u] == 0: color[u] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 0 }) for v in range(A.shape[0]): if A[u, v] != 0: if color[v] == 0: color[v] = 1 s_prev[v] = s_last s_last = v probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 0 }) break if s_last == u: color[u] = 2 if color[topo_head] == 2: topo[u] = topo_head topo_head = u probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 0 }) if s_prev[u] == u: assert s_prev[s_last] == s_last break pr = s_prev[s_last] s_prev[s_last] = s_last s_last = pr u = s_last assert topo_head == s d[topo_head] = 0 mark[topo_head] = 1 while topo[topo_head] != topo_head: i = topo_head mark[topo_head] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 1 }) for j in range(A.shape[0]): if A[i, j] != 0.0: if mark[j] == 0 or d[i] + A[i, j] < d[j]: d[j] = d[i] + A[i, j] pi[j] = i mark[j] = 1 topo_head = topo[topo_head] probing.push( probes, specs.Stage.HINT, next_probe={ 'pi_h': np.copy(pi), 'd': np.copy(d), 'mark': np.copy(mark), 'topo_h': np.copy(topo), 'topo_head_h': probing.mask_one(topo_head, A.shape[0]), 'color': probing.array_cat(color, 3), 's_prev': np.copy(s_prev), 's': probing.mask_one(s, A.shape[0]), 'u': probing.mask_one(u, A.shape[0]), 'v': probing.mask_one(v, A.shape[0]), 's_last': probing.mask_one(s_last, A.shape[0]), 'phase': 1 }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)}) probing.finalize(probes) return pi, probes def floyd_warshall(A: _Array) -> _Out: """Floyd-Warshall's all-pairs shortest paths (Floyd, 1962).""" chex.assert_rank(A, 2) probes = probing.initialize(specs.SPECS['floyd_warshall']) A_pos = np.arange(A.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) / A.shape[0], 'A': np.copy(A), 'adj': probing.graph(np.copy(A)) }) D = np.copy(A) Pi = np.zeros((A.shape[0], A.shape[0])) msk = probing.graph(np.copy(A)) for i in range(A.shape[0]): for j in range(A.shape[0]): Pi[i, j] = i for k in range(A.shape[0]): prev_D = np.copy(D) prev_msk = np.copy(msk) probing.push( probes, specs.Stage.HINT, next_probe={ 'Pi_h': np.copy(Pi), 'D': np.copy(prev_D), 'msk': np.copy(prev_msk), 'k': probing.mask_one(k, A.shape[0]) }) for i in range(A.shape[0]): for j in range(A.shape[0]): if prev_msk[i, k] > 0 and prev_msk[k, j] > 0: if msk[i, j] == 0 or prev_D[i, k] + prev_D[k, j] < D[i, j]: D[i, j] = prev_D[i, k] + prev_D[k, j] Pi[i, j] = Pi[k, j] else: D[i, j] = prev_D[i, j] msk[i, j] = 1 probing.push(probes, specs.Stage.OUTPUT, next_probe={'Pi': np.copy(Pi)}) probing.finalize(probes) return Pi, probes def bipartite_matching(A: _Array, n: int, m: int, s: int, t: int) -> _Out: """Edmonds-Karp bipartite matching (Edmund & Karp, 1972).""" chex.assert_rank(A, 2) assert A.shape[0] == n + m + 2 # add source and sink vertices assert s == 0 and t == n + m + 1 # ensure for consistency probes = probing.initialize(specs.SPECS['bipartite_matching']) A_pos = np.arange(A.shape[0]) adj = probing.graph(np.copy(A)) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A.shape[0], 'A': np.copy(A), 'adj': adj, 's': probing.mask_one(s, A.shape[0]), 't': probing.mask_one(t, A.shape[0]) }) in_matching = ( np.zeros((A.shape[0], A.shape[1])) + _OutputClass.MASKED + adj + adj.T) u = t while True: mask = np.zeros(A.shape[0]) d = np.zeros(A.shape[0]) pi = np.arange(A.shape[0]) d[s] = 0 mask[s] = 1 while True: prev_d = np.copy(d) prev_mask = np.copy(mask) probing.push( probes, specs.Stage.HINT, next_probe={ 'in_matching_h': np.copy(in_matching), 'A_h': np.copy(A), 'adj_h': probing.graph(np.copy(A)), 'd': np.copy(prev_d), 'msk': np.copy(prev_mask), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'phase': 0 }) for u in range(A.shape[0]): for v in range(A.shape[0]): if A[u, v] != 0: if prev_mask[u] == 1 and ( mask[v] == 0 or prev_d[u] + A[u, v] < d[v]): d[v] = prev_d[u] + A[u, v] pi[v] = u mask[v] = 1 if np.all(d == prev_d): probing.push( probes, specs.Stage.OUTPUT, next_probe={'in_matching': np.copy(in_matching)}, ) probing.finalize(probes) return in_matching, probes elif pi[t] != t: break u = t probing.push( probes, specs.Stage.HINT, next_probe={ 'in_matching_h': np.copy(in_matching), 'A_h': np.copy(A), 'adj_h': probing.graph(np.copy(A)), 'd': np.copy(prev_d), 'msk': np.copy(prev_mask), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'phase': 1 }) while pi[u] != u: if pi[u] < u: in_matching[pi[u], u] = 1 else: in_matching[u, pi[u]] = 0 A[pi[u], u] = 0 A[u, pi[u]] = 1 u = pi[u] probing.push( probes, specs.Stage.HINT, next_probe={ 'in_matching_h': np.copy(in_matching), 'A_h': np.copy(A), 'adj_h': probing.graph(np.copy(A)), 'd': np.copy(prev_d), 'msk': np.copy(prev_mask), 'pi': np.copy(pi), 'u': probing.mask_one(u, A.shape[0]), 'phase': 1 })
clrs-master
clrs/_src/algorithms/graphs.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `greedy.py`.""" from absl.testing import absltest from clrs._src.algorithms import greedy import numpy as np class GreedyTest(absltest.TestCase): def test_greedy_activity_selector(self): s = np.array([1, 3, 0, 5, 3, 5, 6, 8, 8, 2, 12]) f = np.array([4, 5, 6, 7, 9, 9, 10, 11, 12, 14, 16]) expected = np.array([1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]) out, _ = greedy.activity_selector(s, f) np.testing.assert_array_equal(expected, out) def test_task_scheduling(self): d = np.array([4, 2, 4, 3, 1, 4, 6]) w = np.array([70, 60, 50, 40, 30, 20, 10]) expected = np.array([1, 1, 1, 0, 0, 1, 1]) out, _ = greedy.task_scheduling(d, w) np.testing.assert_array_equal(expected, out) if __name__ == "__main__": absltest.main()
clrs-master
clrs/_src/algorithms/greedy_test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Strings algorithm generators. Currently implements the following: - Naive string matching - Knuth-Morris-Pratt string matching (Knuth et al., 1977) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[int, probing.ProbesDict] _ALPHABET_SIZE = 4 def naive_string_matcher(T: _Array, P: _Array) -> _Out: """Naive string matching.""" chex.assert_rank([T, P], 1) probes = probing.initialize(specs.SPECS['naive_string_matcher']) T_pos = np.arange(T.shape[0]) P_pos = np.arange(P.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'string': probing.strings_id(T_pos, P_pos), 'pos': probing.strings_pos(T_pos, P_pos), 'key': probing.array_cat( np.concatenate([np.copy(T), np.copy(P)]), _ALPHABET_SIZE), }) s = 0 while s <= T.shape[0] - P.shape[0]: i = s j = 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 's': probing.mask_one(s, T.shape[0] + P.shape[0]), 'i': probing.mask_one(i, T.shape[0] + P.shape[0]), 'j': probing.mask_one(T.shape[0] + j, T.shape[0] + P.shape[0]) }) while True: if T[i] != P[j]: break elif j == P.shape[0] - 1: probing.push( probes, specs.Stage.OUTPUT, next_probe={'match': probing.mask_one(s, T.shape[0] + P.shape[0])}) probing.finalize(probes) return s, probes else: i += 1 j += 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 's': probing.mask_one(s, T.shape[0] + P.shape[0]), 'i': probing.mask_one(i, T.shape[0] + P.shape[0]), 'j': probing.mask_one(T.shape[0] + j, T.shape[0] + P.shape[0]) }) s += 1 # By convention, set probe to head of needle if no match is found probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'match': probing.mask_one(T.shape[0], T.shape[0] + P.shape[0]) }) return T.shape[0], probes def kmp_matcher(T: _Array, P: _Array) -> _Out: """Knuth-Morris-Pratt string matching (Knuth et al., 1977).""" chex.assert_rank([T, P], 1) probes = probing.initialize(specs.SPECS['kmp_matcher']) T_pos = np.arange(T.shape[0]) P_pos = np.arange(P.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'string': probing.strings_id(T_pos, P_pos), 'pos': probing.strings_pos(T_pos, P_pos), 'key': probing.array_cat( np.concatenate([np.copy(T), np.copy(P)]), _ALPHABET_SIZE), }) pi = np.arange(P.shape[0]) is_reset = np.zeros(P.shape[0]) k = 0 k_reset = 1 is_reset[0] = 1 # Cover the edge case where |P| = 1, and the first half is not executed. delta = 1 if P.shape[0] > 1 else 0 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 'pi': probing.strings_pi(T_pos, P_pos, pi), 'is_reset': np.concatenate( [np.zeros(T.shape[0]), np.copy(is_reset)]), 'k': probing.mask_one(T.shape[0], T.shape[0] + P.shape[0]), 'k_reset': k_reset, 'q': probing.mask_one(T.shape[0] + delta, T.shape[0] + P.shape[0]), 'q_reset': 1, 's': probing.mask_one(0, T.shape[0] + P.shape[0]), 'i': probing.mask_one(0, T.shape[0] + P.shape[0]), 'phase': 0 }) for q in range(1, P.shape[0]): while k_reset == 0 and P[k + 1] != P[q]: if is_reset[k] == 1: k_reset = 1 k = 0 else: k = pi[k] probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 'pi': probing.strings_pi(T_pos, P_pos, pi), 'is_reset': np.concatenate( [np.zeros(T.shape[0]), np.copy(is_reset)]), 'k': probing.mask_one(T.shape[0] + k, T.shape[0] + P.shape[0]), 'k_reset': k_reset, 'q': probing.mask_one(T.shape[0] + q, T.shape[0] + P.shape[0]), 'q_reset': 1, 's': probing.mask_one(0, T.shape[0] + P.shape[0]), 'i': probing.mask_one(0, T.shape[0] + P.shape[0]), 'phase': 0 }) if k_reset == 1: k_reset = 0 k = -1 if P[k + 1] == P[q]: k += 1 if k == -1: k = 0 k_reset = 1 is_reset[q] = 1 pi[q] = k probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 'pi': probing.strings_pi(T_pos, P_pos, pi), 'is_reset': np.concatenate( [np.zeros(T.shape[0]), np.copy(is_reset)]), 'k': probing.mask_one(T.shape[0] + k, T.shape[0] + P.shape[0]), 'k_reset': k_reset, 'q': probing.mask_one(T.shape[0] + q, T.shape[0] + P.shape[0]), 'q_reset': 1, 's': probing.mask_one(0, T.shape[0] + P.shape[0]), 'i': probing.mask_one(0, T.shape[0] + P.shape[0]), 'phase': 0 }) q = 0 q_reset = 1 s = 0 for i in range(T.shape[0]): if i >= P.shape[0]: s += 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 'pi': probing.strings_pi(T_pos, P_pos, pi), 'is_reset': np.concatenate( [np.zeros(T.shape[0]), np.copy(is_reset)]), 'k': probing.mask_one(T.shape[0] + k, T.shape[0] + P.shape[0]), 'k_reset': k_reset, 'q': probing.mask_one(T.shape[0] + q, T.shape[0] + P.shape[0]), 'q_reset': q_reset, 's': probing.mask_one(s, T.shape[0] + P.shape[0]), 'i': probing.mask_one(i, T.shape[0] + P.shape[0]), 'phase': 1 }) while q_reset == 0 and P[q + 1] != T[i]: if is_reset[q] == 1: q = 0 q_reset = 1 else: q = pi[q] probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(T_pos, P_pos), 'pi': probing.strings_pi(T_pos, P_pos, pi), 'is_reset': np.concatenate( [np.zeros(T.shape[0]), np.copy(is_reset)]), 'k': probing.mask_one(T.shape[0] + k, T.shape[0] + P.shape[0]), 'k_reset': k_reset, 'q': probing.mask_one(T.shape[0] + q, T.shape[0] + P.shape[0]), 'q_reset': q_reset, 's': probing.mask_one(s, T.shape[0] + P.shape[0]), 'i': probing.mask_one(i, T.shape[0] + P.shape[0]), 'phase': 1 }) if q_reset == 1: q = -1 q_reset = 0 if P[q + 1] == T[i]: if q == P.shape[0] - 2: probing.push( probes, specs.Stage.OUTPUT, next_probe={'match': probing.mask_one(s, T.shape[0] + P.shape[0])}) probing.finalize(probes) return s, probes q += 1 if q == -1: q_reset = 1 q = 0 # By convention, set probe to head of needle if no match is found probing.push( probes, specs.Stage.OUTPUT, next_probe={ 'match': probing.mask_one(T.shape[0], T.shape[0] + P.shape[0]) }) probing.finalize(probes) return T.shape[0], probes
clrs-master
clrs/_src/algorithms/strings.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Dynamic programming algorithm generators. Currently implements the following: - Matrix-chain multiplication - Longest common subsequence - Optimal binary search tree (Aho et al., 1974) See "Introduction to Algorithms" 3ed (CLRS3) for more information. """ # pylint: disable=invalid-name from typing import Tuple import chex from clrs._src import probing from clrs._src import specs import numpy as np _Array = np.ndarray _Out = Tuple[_Array, probing.ProbesDict] def matrix_chain_order(p: _Array) -> _Out: """Matrix-chain multiplication.""" chex.assert_rank(p, 1) probes = probing.initialize(specs.SPECS['matrix_chain_order']) A_pos = np.arange(p.shape[0]) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0], 'p': np.copy(p) }) m = np.zeros((p.shape[0], p.shape[0])) s = np.zeros((p.shape[0], p.shape[0])) msk = np.zeros((p.shape[0], p.shape[0])) for i in range(1, p.shape[0]): m[i, i] = 0 msk[i, i] = 1 while True: prev_m = np.copy(m) prev_msk = np.copy(msk) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'm': np.copy(prev_m), 's_h': np.copy(s), 'msk': np.copy(msk) }) for i in range(1, p.shape[0]): for j in range(i + 1, p.shape[0]): flag = prev_msk[i, j] for k in range(i, j): if prev_msk[i, k] == 1 and prev_msk[k + 1, j] == 1: msk[i, j] = 1 q = prev_m[i, k] + prev_m[k + 1, j] + p[i - 1] * p[k] * p[j] if flag == 0 or q < m[i, j]: m[i, j] = q s[i, j] = k flag = 1 if np.all(prev_m == m): break probing.push(probes, specs.Stage.OUTPUT, next_probe={'s': np.copy(s)}) probing.finalize(probes) return s[1:, 1:], probes def lcs_length(x: _Array, y: _Array) -> _Out: """Longest common subsequence.""" chex.assert_rank([x, y], 1) probes = probing.initialize(specs.SPECS['lcs_length']) x_pos = np.arange(x.shape[0]) y_pos = np.arange(y.shape[0]) b = np.zeros((x.shape[0], y.shape[0])) c = np.zeros((x.shape[0], y.shape[0])) probing.push( probes, specs.Stage.INPUT, next_probe={ 'string': probing.strings_id(x_pos, y_pos), 'pos': probing.strings_pos(x_pos, y_pos), 'key': probing.array_cat(np.concatenate([np.copy(x), np.copy(y)]), 4) }) for i in range(x.shape[0]): if x[i] == y[0]: c[i, 0] = 1 b[i, 0] = 0 elif i > 0 and c[i - 1, 0] == 1: c[i, 0] = 1 b[i, 0] = 1 else: c[i, 0] = 0 b[i, 0] = 1 for j in range(y.shape[0]): if x[0] == y[j]: c[0, j] = 1 b[0, j] = 0 elif j > 0 and c[0, j - 1] == 1: c[0, j] = 1 b[0, j] = 2 else: c[0, j] = 0 b[0, j] = 1 while True: prev_c = np.copy(c) probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.strings_pred(x_pos, y_pos), 'b_h': probing.strings_pair_cat(np.copy(b), 3), 'c': probing.strings_pair(prev_c) }) for i in range(1, x.shape[0]): for j in range(1, y.shape[0]): if x[i] == y[j]: c[i, j] = prev_c[i - 1, j - 1] + 1 b[i, j] = 0 elif prev_c[i - 1, j] >= prev_c[i, j - 1]: c[i, j] = prev_c[i - 1, j] b[i, j] = 1 else: c[i, j] = prev_c[i, j - 1] b[i, j] = 2 if np.all(prev_c == c): break probing.push( probes, specs.Stage.OUTPUT, next_probe={'b': probing.strings_pair_cat(np.copy(b), 3)}) probing.finalize(probes) return b, probes def optimal_bst(p: _Array, q: _Array) -> _Out: """Optimal binary search tree (Aho et al., 1974).""" chex.assert_rank([p, q], 1) probes = probing.initialize(specs.SPECS['optimal_bst']) A_pos = np.arange(q.shape[0]) p_cpy = np.zeros(q.shape[0]) p_cpy[:-1] = np.copy(p) probing.push( probes, specs.Stage.INPUT, next_probe={ 'pos': np.copy(A_pos) * 1.0 / q.shape[0], 'p': np.copy(p_cpy), 'q': np.copy(q) }) e = np.zeros((q.shape[0], q.shape[0])) w = np.zeros((q.shape[0], q.shape[0])) root = np.zeros((q.shape[0], q.shape[0])) msks = np.zeros((q.shape[0], q.shape[0])) for i in range(q.shape[0]): e[i, i] = q[i] w[i, i] = q[i] msks[i, i] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'root_h': np.copy(root), 'e': np.copy(e), 'w': np.copy(w), 'msk': np.copy(msks) }) for l in range(1, p.shape[0] + 1): for i in range(p.shape[0] - l + 1): j = i + l e[i, j] = 1e9 w[i, j] = w[i, j - 1] + p[j - 1] + q[j] for r in range(i, j): t = e[i, r] + e[r + 1, j] + w[i, j] if t < e[i, j]: e[i, j] = t root[i, j] = r msks[i, j] = 1 probing.push( probes, specs.Stage.HINT, next_probe={ 'pred_h': probing.array(np.copy(A_pos)), 'root_h': np.copy(root), 'e': np.copy(e), 'w': np.copy(w), 'msk': np.copy(msks) }) probing.push(probes, specs.Stage.OUTPUT, next_probe={'root': np.copy(root)}) probing.finalize(probes) return root, probes
clrs-master
clrs/_src/algorithms/dynamic_programming.py