era-directed-evolution
/
iterative_alignment_experiment_structure
/create_alignment_dataset_second_round.py
| import torch | |
| import re | |
| import pandas as pd | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| import h5py | |
| from omegaconf import OmegaConf | |
| from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer | |
| from Bio.PDB import PDBList, PDBParser, is_aa | |
| device = torch.device("cuda:0") | |
| # Optional: map 3-letter residue names to 1-letter codes | |
| three_to_one = { | |
| 'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', | |
| 'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G', | |
| 'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K', | |
| 'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', | |
| 'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V', | |
| 'SEC': 'U', 'PYL': 'O', 'ASX': 'B', 'GLX': 'Z', | |
| 'XLE': 'J', 'UNK': 'X' | |
| } | |
| def get_backbone_coords_from_local_pdb(pdb_path, chain_id='A', sequence_length=None, target="data", device=device): | |
| """ | |
| Load backbone coordinates and residue types from a local PDB file. | |
| Returns: | |
| coords_tensor: torch.Tensor of shape (1, N, 3, 3) | |
| residue_types: List of one-letter residue codes | |
| """ | |
| parser = PDBParser(QUIET=True) | |
| structure = parser.get_structure("local_structure", pdb_path) | |
| coords = [] | |
| residue_types = [] | |
| model = structure[0] | |
| if chain_id not in model: | |
| raise ValueError(f"Chain {chain_id} not found in {pdb_path}") | |
| chain = model[chain_id] | |
| for residue in chain: | |
| if sequence_length is not None and len(coords) >= sequence_length: | |
| break | |
| if not is_aa(residue): | |
| continue | |
| try: | |
| n = residue['N'].get_coord() | |
| ca = residue['CA'].get_coord() | |
| c = residue['C'].get_coord() | |
| coords.append([n, ca, c]) | |
| resname = residue.get_resname().upper() | |
| residue_types.append(three_to_one.get(resname, 'X')) # default to 'X' if unknown | |
| except KeyError: | |
| continue | |
| if not coords: | |
| raise ValueError("No residues with complete backbone atoms found.") | |
| # Add infinity-padding before and after | |
| pad = [[float('inf')]*3, [float('inf')]*3, [float('inf')]*3] | |
| coords.insert(0, pad) | |
| coords.append(pad) | |
| if target == "ParD2": | |
| coords = [pad, pad] + coords + [pad, pad] | |
| elif target == "ParD3": | |
| coords = [pad]*2 + coords + [pad]*6 | |
| elif target == "TrpB4": | |
| coords = [pad] + coords | |
| coords_tensor = torch.tensor(coords, device=device).unsqueeze(0) # (1, N, 3, 3) | |
| return coords_tensor, residue_types | |
| num_replicates = 10 | |
| campaign_number = 1 # change this according to the campaign we are interested in | |
| dataset_size = 96 # change this according to the dataset size we are interested in | |
| sequence_tokenizer = EsmSequenceTokenizer() | |
| datasets = ["GB1", "TrpB4"] | |
| data_root_path = "/global/cfs/projectdirs/m4235/sebastian/data" | |
| for data in datasets: | |
| print(data) | |
| for i in range(num_replicates): | |
| cfg_filename = f"./config.yaml" | |
| cfg = OmegaConf.load(cfg_filename) | |
| sampling_temperature=1 | |
| OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature) | |
| mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"] | |
| bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"] | |
| eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"] | |
| pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"] | |
| if not data.startswith("TrpB"): | |
| df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") | |
| with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: | |
| parent_sequence_decoded = file.readlines()[1].strip() | |
| else: | |
| df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv") | |
| with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file: | |
| parent_sequence_decoded = file.readlines()[1].strip() | |
| if data != "GB1": | |
| muts = df["muts"].iloc[0] | |
| else: | |
| muts = df["muts"].iloc[100000] | |
| numbers = re.findall(r'\d+', muts) | |
| mask_indices = list(map(int, numbers)) | |
| # mask_indices = [i-1 for i in mask_indices] #convert to 0-based indexing | |
| fitness_scores = [] | |
| # Load from base_model_{dataset_size} | |
| trpb_base = torch.load(f"./{data}/base_model_{dataset_size}/trpb_post_rd_{campaign_number-1}_{i}.pt") | |
| all_unmasked_sequences_decoded_base = trpb_base["all_unmasked_sequences_decoded"] | |
| all_unmasked_sequences_base = trpb_base["all_unmasked_sequences"] | |
| all_masked_sequences_base = trpb_base["all_masked_sequences"] | |
| all_unmasked_sequences_base = all_unmasked_sequences_base.reshape(-1, all_unmasked_sequences_base.shape[-1]) | |
| all_logps_base = trpb_base["all_logps"] | |
| for unmasked_sequence_decoded, unmasked_sequence in zip(all_unmasked_sequences_decoded_base, all_unmasked_sequences_base): | |
| index_residue_0 = unmasked_sequence_decoded[mask_indices[0]-1] | |
| index_residue_1 = unmasked_sequence_decoded[mask_indices[1]-1] | |
| index_residue_2 = unmasked_sequence_decoded[mask_indices[2]-1] | |
| try: | |
| index_residue_3 = unmasked_sequence_decoded[mask_indices[3]-1] | |
| mutations = [index_residue_0, index_residue_1, index_residue_2, index_residue_3] | |
| muts = ''.join(mutations) | |
| except: | |
| mutations = [index_residue_0, index_residue_1, index_residue_2] | |
| muts = ''.join(mutations) | |
| df_filtered = df[df["AAs"] == muts] | |
| if len(df_filtered) == 0: | |
| if torch.any((unmasked_sequence[1:-1] > 23) | (unmasked_sequence[1:-1] < 4)): | |
| print(f"Invalid sequence {muts}") | |
| fitness_score = -2 | |
| else: | |
| print(f"Invalid sequence {muts}") | |
| fitness_score = -2 | |
| else: | |
| fitness_score = df_filtered["fitness"].values[0] | |
| fitness_scores.append(fitness_score) | |
| # Load from aligned_0_{dataset_size} | |
| trpb_aligned = torch.load(f"./{data}/aligned_{campaign_number-1}_{dataset_size}_{i}/trpb_{i}.pt") | |
| all_unmasked_sequences_decoded_aligned_0 = trpb_aligned["all_unmasked_sequences_decoded"] | |
| all_unmasked_sequences_aligned_0 = trpb_aligned["all_unmasked_sequences"] | |
| all_masked_sequences_aligned_0 = trpb_aligned["all_masked_sequences"] | |
| all_unmasked_sequences_aligned_0 = all_unmasked_sequences_aligned_0.reshape(-1, all_unmasked_sequences_aligned_0.shape[-1]) | |
| all_logps_aligned_0 = trpb_aligned["all_logps"] | |
| for unmasked_sequence_decoded, unmasked_sequence in zip(all_unmasked_sequences_decoded_aligned_0, all_unmasked_sequences_aligned_0): | |
| index_residue_0 = unmasked_sequence_decoded[mask_indices[0]-1] | |
| index_residue_1 = unmasked_sequence_decoded[mask_indices[1]-1] | |
| index_residue_2 = unmasked_sequence_decoded[mask_indices[2]-1] | |
| try: | |
| index_residue_3 = unmasked_sequence_decoded[mask_indices[3]-1] | |
| mutations = [index_residue_0, index_residue_1, index_residue_2, index_residue_3] | |
| muts = ''.join(mutations) | |
| except: | |
| mutations = [index_residue_0, index_residue_1, index_residue_2] | |
| muts = ''.join(mutations) | |
| df_filtered = df[df["AAs"] == muts] | |
| if len(df_filtered) == 0: | |
| if torch.any((unmasked_sequence[1:-1] > 23) | (unmasked_sequence[1:-1] < 4)): | |
| print(f"Invalid sequence {muts}") | |
| fitness_score = -2 | |
| else: | |
| print(f"Invalid sequence {muts}") | |
| fitness_score = -2 | |
| else: | |
| fitness_score = df_filtered["fitness"].values[0] | |
| fitness_scores.append(fitness_score) | |
| # Concatenate the sequences and logps from all models | |
| all_unmasked_sequences = torch.cat((all_unmasked_sequences_base, all_unmasked_sequences_aligned_0),dim=0) | |
| all_masked_sequences = torch.cat((all_masked_sequences_base, all_masked_sequences_aligned_0),dim=0) | |
| print(all_logps_base.shape, all_logps_aligned_0.shape) | |
| all_logps = torch.cat((all_logps_base, all_logps_aligned_0),dim=0) | |
| all_fitness_scores = fitness_scores | |
| # Check for duplicates in all_unmasked_sequences | |
| unique_sequences, counts = torch.unique(all_unmasked_sequences, dim=0, return_counts=True) | |
| num_duplicates = torch.sum(counts > 1).item() | |
| print(f"Number of duplicate sequences: {num_duplicates}") | |
| all_fitness_scores = np.array(all_fitness_scores) | |
| all_fitness_scores = np.where(all_fitness_scores > 0, -np.log(all_fitness_scores), 10) | |
| sampling_temperature = 1 # hard-coding a sampling temperature of 1 for mixed-temperature alignment | |
| sequence_length = all_unmasked_sequences.shape[1] | |
| sequence_id = torch.ones((all_unmasked_sequences.shape[0], sequence_length), device=device).long() * 1 | |
| structure_tokens = torch.ones((1, sequence_length), device=device).long() * 4096 | |
| structure_tokens[:, 0] = 4098 | |
| structure_tokens[:, -1] = 4097 | |
| coords, residue_types = get_backbone_coords_from_local_pdb(f"{data_root_path}/{data}/{data}.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) if not data.startswith("TrpB") else get_backbone_coords_from_local_pdb(f"{data_root_path}/TrpB/TrpB.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) | |
| # parent sequence sanity check | |
| coords_trimmed = coords[:, 1:-1] # shape: (1, N-2, 3, 3) | |
| # Step 2: Determine mask of non-padding residues (i.e., not all coords are inf) | |
| valid_mask = ~(torch.isinf(coords_trimmed).view(-1, 9).any(dim=1)) # shape: (N-2,) | |
| residues_to_compare = [r for r, valid in zip(list(parent_sequence_decoded), valid_mask) if valid] | |
| if residue_types != residues_to_compare: | |
| print("Residue mismatch detected!") | |
| for i, (ref, pdb) in enumerate(zip(residues_to_compare, residue_types)): | |
| if ref != pdb: | |
| print(f"Position {i}: expected {ref}, got {pdb}") | |
| else: | |
| print("Residues match.") | |
| print(coords.shape) | |
| assert coords.shape[1] == sequence_length, f"Coords length {coords.shape[1]} does not match sequence length {sequence_length}" | |
| average_plddt = torch.ones((1), device=device) | |
| per_res_plddt = torch.zeros((1, sequence_length), device=device) | |
| ss8_tokens = torch.zeros((1, sequence_length), device=device).long() | |
| sasa_tokens = torch.zeros((1, sequence_length), device=device).long() | |
| function_tokens = torch.zeros((1, sequence_length, 8), device=device).long() | |
| residue_annotation_tokens = torch.zeros((1, sequence_length, 16), device=device).long() | |
| with h5py.File(f"./{data}/alignment_dataset_{campaign_number}_{dataset_size}_from_ESM3_{i}.hdf5", "w") as f: | |
| masked_sequence_tokens = f.create_dataset("masked_sequence_tokens", data=all_masked_sequences.cpu().numpy()) | |
| unmasked_sequence_tokens = f.create_dataset("unmasked_sequence_tokens", data=all_unmasked_sequences.cpu().numpy()) | |
| sequence_id = f.create_dataset("sequence_id", data=sequence_id.cpu().numpy()) | |
| structure_tokens = f.create_dataset("structural_tokens", data=structure_tokens.cpu().numpy()) | |
| coords = f.create_dataset("bb_coords", data=coords.cpu().numpy()) | |
| average_plddt = f.create_dataset("average_plddt", data=average_plddt.cpu().numpy()) | |
| per_res_plddt = f.create_dataset("per_res_plddt", data=per_res_plddt.cpu().numpy()) | |
| ss8_tokens = f.create_dataset("ss8_tokens", data=ss8_tokens.cpu().numpy()) | |
| sasa_tokens = f.create_dataset("sasa_tokens", data=sasa_tokens.cpu().numpy()) | |
| function_tokens = f.create_dataset("function_tokens", data=function_tokens.cpu().numpy()) | |
| residue_annotation_tokens = f.create_dataset("residue_annotation_tokens", data=residue_annotation_tokens.cpu().numpy()) | |
| ref_logps = f.create_dataset("ref_logps", data=all_logps.cpu().numpy()) | |
| energies = f.create_dataset("energies", data=all_fitness_scores) | |
| f.attrs["num_prompts"] = 1 | |
| f.attrs["num_examples_per_prompt"] = masked_sequence_tokens.shape[0] | |
| f.attrs["fixed_bb_coords"] = True | |
| f.attrs["fixed_average_plddt"] = True | |
| f.attrs["fixed_per_res_plddt"] = True | |
| f.attrs["fixed_ss8_tokens"] = True | |
| f.attrs["fixed_sasa_tokens"] = True | |
| f.attrs["fixed_function_tokens"] = True | |
| f.attrs["fixed_residue_annotation_tokens"] = True | |
| f.attrs["fixed_structural_tokens"] = True | |
| f.attrs["sampling_temperature"] = sampling_temperature | |