|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Tokenization classes for ESM.""" |
|
import os |
|
from huggingface_hub import hf_hub_download |
|
from typing import List, Optional |
|
|
|
|
|
from transformers import EsmTokenizer, PreTrainedTokenizer |
|
|
|
|
|
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} |
|
|
|
|
|
def load_vocab_file(vocab_file): |
|
with open(vocab_file, "r") as f: |
|
lines = f.read().splitlines() |
|
return [l.strip() for l in lines] |
|
|
|
|
|
class IsoformerTokenizer(PreTrainedTokenizer): |
|
""" |
|
Constructs Isoformer tokenizer. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
**kwargs |
|
): |
|
|
|
model_id = kwargs.get("name_or_path", None) |
|
|
|
|
|
|
|
if model_id: |
|
try: |
|
dna_vocab_path = hf_hub_download(repo_id=model_id, filename="dna_vocab_list.txt") |
|
rna_vocab_path = hf_hub_download(repo_id=model_id, filename="rna_vocab_list.txt") |
|
protein_vocab_path = hf_hub_download(repo_id=model_id, filename="protein_vocab_list.txt") |
|
except Exception as e: |
|
|
|
|
|
print(f"Warning: Failed to resolve model files via hf_hub_download. Attempting local fallback. Error: {e}") |
|
dna_vocab_path = os.path.join(model_id, "dna_vocab_list.txt") |
|
rna_vocab_path = os.path.join(model_id, "rna_vocab_list.txt") |
|
protein_vocab_path = os.path.join(model_id, "protein_vocab_list.txt") |
|
else: |
|
|
|
print("Warning: Could not determine model_id from kwargs. Falling back to relative paths.") |
|
dna_vocab_path = "dna_vocab_list.txt" |
|
rna_vocab_path = "rna_vocab_list.txt" |
|
protein_vocab_path = "protein_vocab_list.txt" |
|
|
|
dna_hf_tokenizer = EsmTokenizer(dna_vocab_path, model_max_length=196608) |
|
dna_hf_tokenizer.eos_token = None |
|
dna_hf_tokenizer.init_kwargs["eos_token"] = None |
|
dna_hf_tokenizer.bos_token = None |
|
dna_hf_tokenizer.init_kwargs["bos_token"] = None |
|
|
|
rna_hf_tokenizer = EsmTokenizer(rna_vocab_path, model_max_length=1024) |
|
rna_hf_tokenizer.eos_token = None |
|
rna_hf_tokenizer.init_kwargs["eos_token"] = None |
|
|
|
protein_hf_tokenizer = EsmTokenizer(protein_vocab_path, model_max_length=1024) |
|
|
|
|
|
|
|
self.dna_tokenizer = dna_hf_tokenizer |
|
self.rna_tokenizer = rna_hf_tokenizer |
|
self.protein_tokenizer = protein_hf_tokenizer |
|
|
|
self.dna_tokens = open(dna_vocab_path, "r").read() .split("\n") |
|
self.rna_tokens = open(rna_vocab_path, "r").read() .split("\n") |
|
self.protein_tokens = open(protein_vocab_path, "r").read() .split("\n") |
|
|
|
super().__init__(**kwargs) |
|
|
|
def __call__(self, dna_input, rna_input, protein_input): |
|
dna_output = self.dna_tokenizer(dna_input) |
|
rna_output = self.rna_tokenizer(rna_input, max_length=1024, padding="max_length") |
|
protein_output = self.protein_tokenizer(protein_input, max_length=1024, padding="max_length") |
|
return dna_output, rna_output, protein_output |
|
|
|
def _add_tokens(self, *args, **kwargs): |
|
pass |
|
|
|
def save_vocabulary(self, save_directory, filename_prefix): |
|
vocab_file_dna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "dna_vocab_list.txt") |
|
vocab_file_rna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "rna_vocab_list.txt") |
|
vocab_file_protein = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "protein_vocab_list.txt") |
|
|
|
with open(vocab_file_dna, "w") as f: |
|
f.write("\n".join(self.dna_tokens)) |
|
with open(vocab_file_rna, "w") as f: |
|
f.write("\n".join(self.rna_tokens)) |
|
with open(vocab_file_protein, "w") as f: |
|
f.write("\n".join(self.protein_tokens)) |
|
return (vocab_file_dna,vocab_file_rna,vocab_file_protein, ) |