ProtEnrich
Collection
ProtEnrich models and dataset • 9 items • Updated
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained('hugohrban/progen2-base', trust_remote_code=True)
encoder = AutoModelForCausalLM.from_pretrained("hugohrban/progen2-base", trust_remote_code=True)
protenrich = AutoModel.from_pretrained("SaeedLab/ProtEnrich-ProGen2", trust_remote_code=True)
seqs = ["MKTFFVLLL"]
inputs = tokenizer(seqs, return_tensors="pt")
with torch.no_grad():
outputs = encoder(**inputs, output_hidden_states=True)
pooled = outputs.hidden_states[-1][0].mean(axis=0)
enriched = protenrich(pooled)
print('H enrich:', enriched.h_enrich)
print('H anchor:', enriched.h_anchor)
print('H algn:', enriched.h_algn)
print('Structure:', enriched.struct)
print('Dynamics:', enriched.dyn)