File size: 13,161 Bytes
44a25da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
import torch
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import h5py
from omegaconf import OmegaConf
from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer
from Bio.PDB import PDBList, PDBParser, is_aa
device = torch.device("cuda:0")
# Optional: map 3-letter residue names to 1-letter codes
three_to_one = {
'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V',
'SEC': 'U', 'PYL': 'O', 'ASX': 'B', 'GLX': 'Z',
'XLE': 'J', 'UNK': 'X'
}
def get_backbone_coords_from_local_pdb(pdb_path, chain_id='A', sequence_length=None, target="data", device=device):
"""
Load backbone coordinates and residue types from a local PDB file.
Returns:
coords_tensor: torch.Tensor of shape (1, N, 3, 3)
residue_types: List of one-letter residue codes
"""
parser = PDBParser(QUIET=True)
structure = parser.get_structure("local_structure", pdb_path)
coords = []
residue_types = []
model = structure[0]
if chain_id not in model:
raise ValueError(f"Chain {chain_id} not found in {pdb_path}")
chain = model[chain_id]
for residue in chain:
if sequence_length is not None and len(coords) >= sequence_length:
break
if not is_aa(residue):
continue
try:
n = residue['N'].get_coord()
ca = residue['CA'].get_coord()
c = residue['C'].get_coord()
coords.append([n, ca, c])
resname = residue.get_resname().upper()
residue_types.append(three_to_one.get(resname, 'X')) # default to 'X' if unknown
except KeyError:
continue
if not coords:
raise ValueError("No residues with complete backbone atoms found.")
# Add infinity-padding before and after
pad = [[float('inf')]*3, [float('inf')]*3, [float('inf')]*3]
coords.insert(0, pad)
coords.append(pad)
if target == "ParD2":
coords = [pad, pad] + coords + [pad, pad]
elif target == "ParD3":
coords = [pad]*2 + coords + [pad]*6
elif target == "TrpB4":
coords = [pad] + coords
coords_tensor = torch.tensor(coords, device=device).unsqueeze(0) # (1, N, 3, 3)
return coords_tensor, residue_types
num_replicates = 10
campaign_number = 1 # change this according to the campaign we are interested in
dataset_size = 96 # change this according to the dataset size we are interested in
sequence_tokenizer = EsmSequenceTokenizer()
datasets = ["GB1", "TrpB4"]
data_root_path = "/global/cfs/projectdirs/m4235/sebastian/data"
for data in datasets:
print(data)
for i in range(num_replicates):
cfg_filename = f"./config.yaml"
cfg = OmegaConf.load(cfg_filename)
sampling_temperature=1
OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature)
mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"]
bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"]
eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"]
pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"]
if not data.startswith("TrpB"):
df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv")
with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file:
parent_sequence_decoded = file.readlines()[1].strip()
else:
df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv")
with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file:
parent_sequence_decoded = file.readlines()[1].strip()
if data != "GB1":
muts = df["muts"].iloc[0]
else:
muts = df["muts"].iloc[100000]
numbers = re.findall(r'\d+', muts)
mask_indices = list(map(int, numbers))
# mask_indices = [i-1 for i in mask_indices] #convert to 0-based indexing
fitness_scores = []
# Load from base_model_{dataset_size}
trpb_base = torch.load(f"./{data}/base_model_{dataset_size}/trpb_post_rd_{campaign_number-1}_{i}.pt")
all_unmasked_sequences_decoded_base = trpb_base["all_unmasked_sequences_decoded"]
all_unmasked_sequences_base = trpb_base["all_unmasked_sequences"]
all_masked_sequences_base = trpb_base["all_masked_sequences"]
all_unmasked_sequences_base = all_unmasked_sequences_base.reshape(-1, all_unmasked_sequences_base.shape[-1])
all_logps_base = trpb_base["all_logps"]
for unmasked_sequence_decoded, unmasked_sequence in zip(all_unmasked_sequences_decoded_base, all_unmasked_sequences_base):
index_residue_0 = unmasked_sequence_decoded[mask_indices[0]-1]
index_residue_1 = unmasked_sequence_decoded[mask_indices[1]-1]
index_residue_2 = unmasked_sequence_decoded[mask_indices[2]-1]
try:
index_residue_3 = unmasked_sequence_decoded[mask_indices[3]-1]
mutations = [index_residue_0, index_residue_1, index_residue_2, index_residue_3]
muts = ''.join(mutations)
except:
mutations = [index_residue_0, index_residue_1, index_residue_2]
muts = ''.join(mutations)
df_filtered = df[df["AAs"] == muts]
if len(df_filtered) == 0:
if torch.any((unmasked_sequence[1:-1] > 23) | (unmasked_sequence[1:-1] < 4)):
print(f"Invalid sequence {muts}")
fitness_score = -2
else:
print(f"Invalid sequence {muts}")
fitness_score = -2
else:
fitness_score = df_filtered["fitness"].values[0]
fitness_scores.append(fitness_score)
# Load from aligned_0_{dataset_size}
trpb_aligned = torch.load(f"./{data}/aligned_{campaign_number-1}_{dataset_size}_{i}/trpb_{i}.pt")
all_unmasked_sequences_decoded_aligned_0 = trpb_aligned["all_unmasked_sequences_decoded"]
all_unmasked_sequences_aligned_0 = trpb_aligned["all_unmasked_sequences"]
all_masked_sequences_aligned_0 = trpb_aligned["all_masked_sequences"]
all_unmasked_sequences_aligned_0 = all_unmasked_sequences_aligned_0.reshape(-1, all_unmasked_sequences_aligned_0.shape[-1])
all_logps_aligned_0 = trpb_aligned["all_logps"]
for unmasked_sequence_decoded, unmasked_sequence in zip(all_unmasked_sequences_decoded_aligned_0, all_unmasked_sequences_aligned_0):
index_residue_0 = unmasked_sequence_decoded[mask_indices[0]-1]
index_residue_1 = unmasked_sequence_decoded[mask_indices[1]-1]
index_residue_2 = unmasked_sequence_decoded[mask_indices[2]-1]
try:
index_residue_3 = unmasked_sequence_decoded[mask_indices[3]-1]
mutations = [index_residue_0, index_residue_1, index_residue_2, index_residue_3]
muts = ''.join(mutations)
except:
mutations = [index_residue_0, index_residue_1, index_residue_2]
muts = ''.join(mutations)
df_filtered = df[df["AAs"] == muts]
if len(df_filtered) == 0:
if torch.any((unmasked_sequence[1:-1] > 23) | (unmasked_sequence[1:-1] < 4)):
print(f"Invalid sequence {muts}")
fitness_score = -2
else:
print(f"Invalid sequence {muts}")
fitness_score = -2
else:
fitness_score = df_filtered["fitness"].values[0]
fitness_scores.append(fitness_score)
# Concatenate the sequences and logps from all models
all_unmasked_sequences = torch.cat((all_unmasked_sequences_base, all_unmasked_sequences_aligned_0),dim=0)
all_masked_sequences = torch.cat((all_masked_sequences_base, all_masked_sequences_aligned_0),dim=0)
print(all_logps_base.shape, all_logps_aligned_0.shape)
all_logps = torch.cat((all_logps_base, all_logps_aligned_0),dim=0)
all_fitness_scores = fitness_scores
# Check for duplicates in all_unmasked_sequences
unique_sequences, counts = torch.unique(all_unmasked_sequences, dim=0, return_counts=True)
num_duplicates = torch.sum(counts > 1).item()
print(f"Number of duplicate sequences: {num_duplicates}")
all_fitness_scores = np.array(all_fitness_scores)
all_fitness_scores = np.where(all_fitness_scores > 0, -np.log(all_fitness_scores), 10)
sampling_temperature = 1 # hard-coding a sampling temperature of 1 for mixed-temperature alignment
sequence_length = all_unmasked_sequences.shape[1]
sequence_id = torch.ones((all_unmasked_sequences.shape[0], sequence_length), device=device).long() * 1
structure_tokens = torch.ones((1, sequence_length), device=device).long() * 4096
structure_tokens[:, 0] = 4098
structure_tokens[:, -1] = 4097
coords, residue_types = get_backbone_coords_from_local_pdb(f"{data_root_path}/{data}/{data}.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) if not data.startswith("TrpB") else get_backbone_coords_from_local_pdb(f"{data_root_path}/TrpB/TrpB.pdb", chain_id='A', sequence_length=sequence_length-2, target=data)
# parent sequence sanity check
coords_trimmed = coords[:, 1:-1] # shape: (1, N-2, 3, 3)
# Step 2: Determine mask of non-padding residues (i.e., not all coords are inf)
valid_mask = ~(torch.isinf(coords_trimmed).view(-1, 9).any(dim=1)) # shape: (N-2,)
residues_to_compare = [r for r, valid in zip(list(parent_sequence_decoded), valid_mask) if valid]
if residue_types != residues_to_compare:
print("Residue mismatch detected!")
for i, (ref, pdb) in enumerate(zip(residues_to_compare, residue_types)):
if ref != pdb:
print(f"Position {i}: expected {ref}, got {pdb}")
else:
print("Residues match.")
print(coords.shape)
assert coords.shape[1] == sequence_length, f"Coords length {coords.shape[1]} does not match sequence length {sequence_length}"
average_plddt = torch.ones((1), device=device)
per_res_plddt = torch.zeros((1, sequence_length), device=device)
ss8_tokens = torch.zeros((1, sequence_length), device=device).long()
sasa_tokens = torch.zeros((1, sequence_length), device=device).long()
function_tokens = torch.zeros((1, sequence_length, 8), device=device).long()
residue_annotation_tokens = torch.zeros((1, sequence_length, 16), device=device).long()
with h5py.File(f"./{data}/alignment_dataset_{campaign_number}_{dataset_size}_from_ESM3_{i}.hdf5", "w") as f:
masked_sequence_tokens = f.create_dataset("masked_sequence_tokens", data=all_masked_sequences.cpu().numpy())
unmasked_sequence_tokens = f.create_dataset("unmasked_sequence_tokens", data=all_unmasked_sequences.cpu().numpy())
sequence_id = f.create_dataset("sequence_id", data=sequence_id.cpu().numpy())
structure_tokens = f.create_dataset("structural_tokens", data=structure_tokens.cpu().numpy())
coords = f.create_dataset("bb_coords", data=coords.cpu().numpy())
average_plddt = f.create_dataset("average_plddt", data=average_plddt.cpu().numpy())
per_res_plddt = f.create_dataset("per_res_plddt", data=per_res_plddt.cpu().numpy())
ss8_tokens = f.create_dataset("ss8_tokens", data=ss8_tokens.cpu().numpy())
sasa_tokens = f.create_dataset("sasa_tokens", data=sasa_tokens.cpu().numpy())
function_tokens = f.create_dataset("function_tokens", data=function_tokens.cpu().numpy())
residue_annotation_tokens = f.create_dataset("residue_annotation_tokens", data=residue_annotation_tokens.cpu().numpy())
ref_logps = f.create_dataset("ref_logps", data=all_logps.cpu().numpy())
energies = f.create_dataset("energies", data=all_fitness_scores)
f.attrs["num_prompts"] = 1
f.attrs["num_examples_per_prompt"] = masked_sequence_tokens.shape[0]
f.attrs["fixed_bb_coords"] = True
f.attrs["fixed_average_plddt"] = True
f.attrs["fixed_per_res_plddt"] = True
f.attrs["fixed_ss8_tokens"] = True
f.attrs["fixed_sasa_tokens"] = True
f.attrs["fixed_function_tokens"] = True
f.attrs["fixed_residue_annotation_tokens"] = True
f.attrs["fixed_structural_tokens"] = True
f.attrs["sampling_temperature"] = sampling_temperature
|