Upload model.py with huggingface_hub
Browse files
model.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig, PreTrainedModel, AutoModel, AutoConfig
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from huggingface_hub import snapshot_download
|
6 |
+
|
7 |
+
class IndicASRConfig(PretrainedConfig):
|
8 |
+
model_type = "iasr"
|
9 |
+
|
10 |
+
def __init__(self, ts_folder: str = "path", BLANK_ID: int = 256, RNNT_MAX_SYMBOLS: int = 10,
|
11 |
+
PRED_RNN_LAYERS: int = 2, PRED_RNN_HIDDEN_DIM: int = 640, SOS: int = 5632, **kwargs):
|
12 |
+
super().__init__(**kwargs)
|
13 |
+
self.ts_folder = ts_folder
|
14 |
+
self.BLANK_ID = BLANK_ID
|
15 |
+
self.RNNT_MAX_SYMBOLS = RNNT_MAX_SYMBOLS
|
16 |
+
self.PRED_RNN_LAYERS = PRED_RNN_LAYERS
|
17 |
+
self.PRED_RNN_HIDDEN_DIM = PRED_RNN_HIDDEN_DIM
|
18 |
+
self.SOS = SOS
|
19 |
+
|
20 |
+
class IndicASRModel(PreTrainedModel):
|
21 |
+
config_class = IndicASRConfig
|
22 |
+
|
23 |
+
def __init__(self, config):
|
24 |
+
super().__init__(config)
|
25 |
+
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
26 |
+
|
27 |
+
# Load model components
|
28 |
+
self.models = {}
|
29 |
+
names = ['preprocessor','encoder', 'ctc_decoder', 'rnnt_decoder', 'joint_enc', 'joint_pred', 'joint_pre_net'] + \
|
30 |
+
[f'joint_post_net_{z}' for z in ['as', 'bn', 'brx', 'doi', 'gu', 'hi', 'kn', 'kok', 'ks', 'mai', 'ml', 'mni', 'mr', 'ne', 'or', 'pa', 'sa', 'sat', 'sd', 'ta', 'te', 'ur']]
|
31 |
+
|
32 |
+
for n in names:
|
33 |
+
component_name = f'{config.ts_folder}/assets/{n}.ts'
|
34 |
+
if os.path.exists(component_name):
|
35 |
+
self.models[n] = torch.jit.load(component_name)
|
36 |
+
else:
|
37 |
+
self.models[n] = None
|
38 |
+
print(f'Failed to load {component_name}')
|
39 |
+
|
40 |
+
# Load vocab and language masks
|
41 |
+
with open(f'{config.ts_folder}/assets/vocab.json') as reader:
|
42 |
+
self.vocab = json.load(reader)
|
43 |
+
|
44 |
+
with open(f'{config.ts_folder}/assets/language_masks.json') as reader:
|
45 |
+
self.language_masks = json.load(reader)
|
46 |
+
|
47 |
+
def forward(self, wav, lang, decoding='ctc'):
|
48 |
+
encoder_outputs, encoded_lengths = self.encode(wav)
|
49 |
+
if decoding == 'ctc':
|
50 |
+
return self._ctc_decode(encoder_outputs, encoded_lengths, lang)
|
51 |
+
if decoding == 'rnnt':
|
52 |
+
return self._rnnt_decode(encoder_outputs, encoded_lengths, lang)
|
53 |
+
|
54 |
+
def encode(self, wav):
|
55 |
+
audio_signal, length = self.models['preprocessor'](input_signal=wav, length=torch.tensor([wav.shape[-1]]))
|
56 |
+
outputs, encoded_lengths = self.models['encoder'](audio_signal=audio_signal, length=length)
|
57 |
+
return outputs, encoded_lengths
|
58 |
+
|
59 |
+
def _ctc_decode(self, encoder_outputs, encoded_lengths, lang):
|
60 |
+
logprobs = self.models['ctc_decoder'](encoder_output=encoder_outputs)
|
61 |
+
logprobs = logprobs[:,:,self.language_masks[lang]].log_softmax(dim=-1)
|
62 |
+
indices = torch.argmax(logprobs[0],dim=-1)
|
63 |
+
collapsed_indices = torch.unique_consecutive(indices, dim=-1)
|
64 |
+
return ''.join([self.vocab[lang][x] for x in collapsed_indices if x != self.config.BLANK_ID]).replace('▁',' ').strip()
|
65 |
+
|
66 |
+
def _rnnt_decode(self, encoder_outputs, encoded_lengths, lang):
|
67 |
+
joint_enc = self.models['joint_enc'](encoder_outputs.transpose(1, 2))
|
68 |
+
hyp = [self.config.SOS]
|
69 |
+
prev_dec_state = (torch.zeros(self.config.PRED_RNN_LAYERS,1,self.config.PRED_RNN_HIDDEN_DIM),
|
70 |
+
torch.zeros(self.config.PRED_RNN_LAYERS,1,self.config.PRED_RNN_HIDDEN_DIM))
|
71 |
+
|
72 |
+
for t in range(joint_enc.size(1)):
|
73 |
+
f = joint_enc[:, t, :].unsqueeze(1)
|
74 |
+
not_blank = True
|
75 |
+
symbols_added = 0
|
76 |
+
|
77 |
+
while not_blank and ((self.config.RNNT_MAX_SYMBOLS is None) or (symbols_added < self.config.RNNT_MAX_SYMBOLS)):
|
78 |
+
g, _, dec_state = self.models['rnnt_decoder'](targets=torch.Tensor([[hyp[-1]]]).long(), target_length=torch.tensor([1]), states=prev_dec_state)
|
79 |
+
g = self.models['joint_pred'](g.transpose(1,2))
|
80 |
+
joint_out = f + g
|
81 |
+
joint_out = self.models['joint_pre_net'](joint_out)
|
82 |
+
logits = self.models[f'joint_post_net_{lang}'](joint_out)
|
83 |
+
log_probs = logits.log_softmax(dim=-1)
|
84 |
+
pred_token = log_probs.argmax(dim=-1).item()
|
85 |
+
|
86 |
+
if pred_token == self.config.BLANK_ID:
|
87 |
+
not_blank = False
|
88 |
+
else:
|
89 |
+
hyp.append(pred_token)
|
90 |
+
prev_dec_state = dec_state
|
91 |
+
symbols_added += 1
|
92 |
+
|
93 |
+
return ''.join([self.vocab[lang][x] for x in hyp if x != self.config.SOS]).replace('▁',' ').strip()
|
94 |
+
|
95 |
+
def _save_pretrained(self, save_directory) -> None:
|
96 |
+
# define how to serialize your model
|
97 |
+
os.makedirs(f'{save_directory}/assets', exist_ok=True)
|
98 |
+
for m_name, m in self.models.items():
|
99 |
+
if m is not None:
|
100 |
+
m.save(os.path.join(save_directory,'assets',m_name+'.ts'))
|
101 |
+
|
102 |
+
# load the vocab
|
103 |
+
with open(f'{save_directory}/assets/vocab.json','w') as writer:
|
104 |
+
print(json.dumps(self.vocab),file=writer)
|
105 |
+
|
106 |
+
# load the language_masks
|
107 |
+
with open(f'{save_directory}/assets/language_masks.json','w') as writer:
|
108 |
+
print(json.dumps(self.language_masks),file=writer)
|
109 |
+
|
110 |
+
@classmethod
|
111 |
+
def from_pretrained(cls,
|
112 |
+
pretrained_model_name_or_path,
|
113 |
+
*,
|
114 |
+
force_download=False,
|
115 |
+
resume_download=None,
|
116 |
+
proxies=None,
|
117 |
+
token=None,
|
118 |
+
cache_dir=None,
|
119 |
+
local_files_only=False,
|
120 |
+
revision=None, **kwargs):
|
121 |
+
loc = snapshot_download(repo_id=pretrained_model_name_or_path, token=token)
|
122 |
+
return cls(IndicASRConfig(ts_folder=loc))
|
123 |
+
|
124 |
+
if __name__ == '__main__':
|
125 |
+
from transformers import AutoConfig, AutoModel
|
126 |
+
|
127 |
+
# Register the model so it can be used with AutoModel
|
128 |
+
AutoConfig.register("iasr", IndicASRConfig)
|
129 |
+
AutoModel.register(IndicASRConfig, IndicASRModel)
|
130 |
+
|
131 |
+
|
132 |
+
|