| import os |
| import torch |
| import numpy as np |
| import pandas as pd |
| import h5py |
| import re |
| from omegaconf import OmegaConf |
| import h5py |
| import lightning as L |
| from pera.nn import BidirectionalModel, sample_components_from_bidirectional_transformer, sample_perturbations, sample_embedding_perturbations |
| from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer |
| from Bio.Seq import Seq |
| from Bio.PDB import PDBList, PDBParser, is_aa |
|
|
| device = torch.device("cuda:0") |
|
|
| |
| three_to_one = { |
| 'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', |
| 'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G', |
| 'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K', |
| 'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', |
| 'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V', |
| 'SEC': 'U', 'PYL': 'O', 'ASX': 'B', 'GLX': 'Z', |
| 'XLE': 'J', 'UNK': 'X' |
| } |
|
|
| def get_backbone_coords_from_local_pdb(pdb_path, chain_id='A', sequence_length=None, target="data", device=device): |
| """ |
| Load backbone coordinates and residue types from a local PDB file. |
| |
| Returns: |
| coords_tensor: torch.Tensor of shape (1, N, 3, 3) |
| residue_types: List of one-letter residue codes |
| """ |
| parser = PDBParser(QUIET=True) |
| structure = parser.get_structure("local_structure", pdb_path) |
|
|
| coords = [] |
| residue_types = [] |
| model = structure[0] |
|
|
| if chain_id not in model: |
| raise ValueError(f"Chain {chain_id} not found in {pdb_path}") |
|
|
| chain = model[chain_id] |
|
|
| for residue in chain: |
| if sequence_length is not None and len(coords) >= sequence_length: |
| break |
| if not is_aa(residue): |
| continue |
| try: |
| n = residue['N'].get_coord() |
| ca = residue['CA'].get_coord() |
| c = residue['C'].get_coord() |
| coords.append([n, ca, c]) |
| resname = residue.get_resname().upper() |
| residue_types.append(three_to_one.get(resname, 'X')) |
| except KeyError: |
| continue |
|
|
| if not coords: |
| raise ValueError("No residues with complete backbone atoms found.") |
|
|
| |
| pad = [[float('inf')]*3, [float('inf')]*3, [float('inf')]*3] |
| coords.insert(0, pad) |
| coords.append(pad) |
|
|
| if target == "ParD2": |
| coords = [pad, pad] + coords + [pad, pad] |
| elif target == "ParD3": |
| coords = [pad]*2 + coords + [pad]*6 |
| elif target == "TrpB4": |
| coords = [pad] + coords |
|
|
| coords_tensor = torch.tensor(coords, device=device).unsqueeze(0) |
|
|
| return coords_tensor, residue_types |
|
|
| sequence_tokenizer = EsmSequenceTokenizer() |
|
|
| import argparse |
|
|
| |
| parser = parser = argparse.ArgumentParser(description="Calculating the log-likelihood of a sequence") |
| parser.add_argument('--target', type=str, required=True, help='Dataset as a string') |
| parser.add_argument('--num_samples', type=int, required=False, default=384, help='Number of samples to process (default: 100000)') |
| parser.add_argument('--alignment_round', type=int, required=False, default=1, help='Alignment round as an integer') |
| parser.add_argument('--version_number', type=str, required=False, default=1, help='Version number as a string') |
| parser.add_argument('--replicate', type=int, required=False, default=1, help='Replicate number as an integer') |
| args = parser.parse_args() |
|
|
| target = args.target |
| alignment_round = args.alignment_round |
| version_number = args.version_number |
| num_samples = args.num_samples |
| replicate = args.replicate |
|
|
| cfg_filename = f"{target}/lightning_logs/{version_number}/config.yaml" |
| network_filename = f"{target}/lightning_logs/{version_number}/checkpoints/best_model.ckpt" |
| save_folder_name = f"{target}/aligned_{alignment_round}_{num_samples}_{replicate}" |
|
|
| cfg = OmegaConf.load(cfg_filename) |
| sampling_temperature=1 |
| OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature) |
| esm_model = BidirectionalModel(cfg["nn"]["model"], |
| cfg["nn"]["model_args"], |
| **cfg["train"]["lightning_model_args"]).to(device) |
| esm_model.load_model_from_ckpt(network_filename) |
| esm_model.eval() |
| print("") |
| mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"] |
| bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"] |
| eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"] |
| pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"] |
|
|
|
|
| os.makedirs(save_folder_name, exist_ok=True) |
|
|
| past_generations =[f"{target}/base_model_{num_samples}"] |
| for i in range(alignment_round): |
| past_generations.append(f"{target}/aligned_{i}_{num_samples}_{replicate}") |
|
|
| previous_unmasked_sequences_decoded = [] |
|
|
| for round in past_generations: |
| trpb = torch.load(f"{round}/trpb_{replicate}.pt") |
| previous_unmasked_sequences_decoded.extend(trpb['all_unmasked_sequences_decoded']) |
| |
| assert len(previous_unmasked_sequences_decoded) == len(set(previous_unmasked_sequences_decoded)), "There are duplicate sequences in previous_unmasked_sequences_decoded" |
| print("All elements in previous_unmasked_sequences_decoded are unique.") |
|
|
| data = target |
| data_root_path = "/global/cfs/cdirs/m4235/sebastian/data" |
|
|
| sequence_tokenizer = EsmSequenceTokenizer() |
|
|
| if data.startswith("TrpB"): |
| df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv") |
| with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file: |
| parent_sequence_decoded = file.readlines()[1].strip() |
| |
| elif data == "DHFR": |
| df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
| with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
| nucleotide_seq = file.readlines()[1].strip() |
| nucleotide_seq = Seq(nucleotide_seq) |
| parent_sequence_decoded = str(nucleotide_seq.translate()) |
| |
| else: |
| df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
| with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
| parent_sequence_decoded = file.readlines()[1].strip() |
| |
| if data != "GB1": |
| muts = df["muts"].iloc[0] |
| else: |
| muts = df["muts"].iloc[100000] |
|
|
| numbers = re.findall(r'\d+', muts) |
| mask_indices = list(map(int, numbers)) |
| num_masks_per_sequence = num_samples // 4 |
| num_to_generate_per_mask = 4 |
|
|
|
|
| parent_sequence = torch.tensor(sequence_tokenizer.encode(parent_sequence_decoded, |
| add_special_tokens=True), device=device).unsqueeze(0).long() |
| sequence_length = parent_sequence.shape[1] |
|
|
|
|
| all_masked_sequences = [] |
| all_unmasked_sequences_decoded = [] |
| all_unmasked_sequences = [] |
| all_logps = [] |
|
|
|
|
| while len(all_unmasked_sequences_decoded) < num_samples: |
| |
| print(len(all_unmasked_sequences_decoded)) |
|
|
| masked_sequences = parent_sequence.clone().repeat(num_to_generate_per_mask, 1) |
| masked_sequences[:, mask_indices] = mask_token_sequence |
|
|
|
|
|
|
|
|
| |
| sequence_id = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 1 |
| |
| structure_tokens = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 4096 |
| structure_tokens[:, 0] = 4098 |
| structure_tokens[:, -1] = 4097 |
|
|
| coords, residue_types = get_backbone_coords_from_local_pdb(f"{data_root_path}/{data}/{data}.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) if not data.startswith("TrpB") else get_backbone_coords_from_local_pdb(f"{data_root_path}/TrpB/TrpB.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) |
|
|
| |
| coords_trimmed = coords[:, 1:-1] |
|
|
| |
| valid_mask = ~(torch.isinf(coords_trimmed).view(-1, 9).any(dim=1)) |
| residues_to_compare = [r for r, valid in zip(list(parent_sequence_decoded), valid_mask) if valid] |
|
|
| if residue_types != residues_to_compare: |
| print("Residue mismatch detected!") |
| for i, (ref, pdb) in enumerate(zip(residues_to_compare, residue_types)): |
| if ref != pdb: |
| print(f"Position {i}: expected {ref}, got {pdb}") |
| else: |
| print("Residues match.") |
| print(coords.shape) |
|
|
| assert coords.shape[1] == sequence_length, f"Coords length {coords.shape[1]} does not match sequence length {sequence_length}" |
|
|
| |
| coords = coords.repeat(num_to_generate_per_mask, 1, 1, 1) |
|
|
| average_plddt = torch.ones((num_to_generate_per_mask), device=device) |
|
|
| per_res_plddt = torch.zeros((num_to_generate_per_mask, sequence_length), device=device) |
| ss8_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
| sasa_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
|
|
| function_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 8), device=device).long() |
| residue_annotation_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 16), device=device).long() |
|
|
|
|
|
|
|
|
| with torch.no_grad(): |
| unmasked_sequences = sample_components_from_bidirectional_transformer(transformer_model=esm_model, |
| masked_sequence_tokens=masked_sequences, |
| structure_tokens=structure_tokens, |
| average_plddt=average_plddt, |
| per_res_plddt=per_res_plddt, |
| ss8_tokens=ss8_tokens, |
| sasa_tokens=sasa_tokens, |
| function_tokens=function_tokens, |
| residue_annotation_tokens=residue_annotation_tokens, |
| bb_coords=coords, |
| sequence_id=sequence_id, |
| mask_token_sequence=mask_token_sequence, |
| bos_token_sequence=bos_token_sequence, |
| eos_token_sequence=eos_token_sequence, |
| pad_token_sequence=pad_token_sequence, |
| inference_batch_size=1) |
|
|
|
|
| |
| masked_indices = (masked_sequences == mask_token_sequence).float() |
| logits = esm_model.nn(sequence_tokens=masked_sequences, |
| structure_tokens=structure_tokens, |
| average_plddt=average_plddt, |
| per_res_plddt=per_res_plddt, |
| ss8_tokens=ss8_tokens, |
| sasa_tokens=sasa_tokens, |
| function_tokens=function_tokens, |
| residue_annotation_tokens=residue_annotation_tokens, |
| sequence_id=sequence_id, |
| bb_coords=coords)["sequence_logits"].detach() |
| logps = torch.nn.functional.log_softmax(logits/sampling_temperature, dim=-1) |
| logps = torch.gather(logps, dim=-1, index=unmasked_sequences.unsqueeze(-1)).squeeze(-1) |
| logps = (logps * masked_indices).sum(-1).detach() |
| |
| decoded_seqs = [sequence.replace(" ", "") for sequence in sequence_tokenizer.batch_decode(unmasked_sequences[:, 1:-1])] |
| for seq, logp, masked_seq, unmasked_seq in zip(decoded_seqs, logps, masked_sequences, unmasked_sequences): |
| if seq in all_unmasked_sequences_decoded or seq in previous_unmasked_sequences_decoded: |
| continue |
| else: |
| all_unmasked_sequences_decoded.append(seq) |
| all_logps.append(logp) |
| all_masked_sequences.append(masked_seq) |
| all_unmasked_sequences.append(unmasked_seq) |
|
|
|
|
|
|
| all_unmasked_sequences_decoded = all_unmasked_sequences_decoded[:num_samples] |
| all_masked_sequences = all_masked_sequences[:num_samples] |
| all_unmasked_sequences = all_unmasked_sequences[:num_samples] |
| all_logps = all_logps[:num_samples] |
| |
| all_masked_sequences = torch.stack(all_masked_sequences, dim=0) |
| all_unmasked_sequences = torch.stack(all_unmasked_sequences, dim=0) |
| all_logps = torch.stack(all_logps, dim=0) |
|
|
|
|
|
|
| to_save = {"parent_sequence": parent_sequence, |
| "all_masked_sequences": all_masked_sequences, |
| "all_unmasked_sequences": all_unmasked_sequences, |
| "all_unmasked_sequences_decoded": all_unmasked_sequences_decoded, |
| "all_logps": all_logps} |
| torch.save(to_save, f"{save_folder_name}/trpb_{replicate}.pt") |