Upload model_ts.py with huggingface_hub
Browse files- model_ts.py +106 -0
model_ts.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig, PreTrainedModel, AutoModel, AutoConfig
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from huggingface_hub import snapshot_download
|
6 |
+
|
7 |
+
class IndicASRConfig(PretrainedConfig):
|
8 |
+
model_type = "iasr"
|
9 |
+
|
10 |
+
def __init__(self, ts_folder: str = "path", BLANK_ID: int = 256, RNNT_MAX_SYMBOLS: int = 10,
|
11 |
+
PRED_RNN_LAYERS: int = 2, PRED_RNN_HIDDEN_DIM: int = 640, SOS: int = 256, **kwargs):
|
12 |
+
super().__init__(**kwargs)
|
13 |
+
self.ts_folder = ts_folder
|
14 |
+
self.BLANK_ID = BLANK_ID
|
15 |
+
self.RNNT_MAX_SYMBOLS = RNNT_MAX_SYMBOLS
|
16 |
+
self.PRED_RNN_LAYERS = PRED_RNN_LAYERS
|
17 |
+
self.PRED_RNN_HIDDEN_DIM = PRED_RNN_HIDDEN_DIM
|
18 |
+
self.SOS = SOS
|
19 |
+
|
20 |
+
class IndicASRModel(PreTrainedModel):
|
21 |
+
config_class = IndicASRConfig
|
22 |
+
|
23 |
+
def __init__(self, config):
|
24 |
+
super().__init__(config)
|
25 |
+
|
26 |
+
# Load model components
|
27 |
+
self.models = {}
|
28 |
+
component_names = ['preprocessor', 'encoder', 'ctc_decoder', 'rnnt_decoder', 'joint_enc', 'joint_pred', 'joint_pre_net']
|
29 |
+
component_names += [f'joint_post_net_{lang}' for lang in ['as', 'bn', 'brx', 'doi', 'gu', 'hi', 'kn', 'kok', 'ks', 'mai', 'ml', 'mni', 'mr', 'ne', 'or', 'pa', 'sa', 'sat', 'sd', 'ta', 'te', 'ur']]
|
30 |
+
|
31 |
+
for name in component_names:
|
32 |
+
component_path = os.path.join(config.ts_folder, 'assets', f'{name}.ts')
|
33 |
+
if os.path.exists(component_path):
|
34 |
+
self.models[name] = torch.jit.load(component_path, map_location=torch.device(self.config.device))
|
35 |
+
else:
|
36 |
+
self.models[name] = None
|
37 |
+
print(f'Warning: {component_path} not found')
|
38 |
+
|
39 |
+
# Load vocab and language masks
|
40 |
+
with open(os.path.join(config.ts_folder, 'assets', 'vocab.json')) as f:
|
41 |
+
self.vocab = json.load(f)
|
42 |
+
|
43 |
+
with open(os.path.join(config.ts_folder, 'assets', 'language_masks.json')) as f:
|
44 |
+
self.language_masks = json.load(f)
|
45 |
+
|
46 |
+
def forward(self, wav, lang, decoding='ctc'):
|
47 |
+
encoder_outputs, encoded_lengths = self.encode(wav)
|
48 |
+
if decoding == 'ctc':
|
49 |
+
return self._ctc_decode(encoder_outputs, encoded_lengths, lang)
|
50 |
+
if decoding == 'rnnt':
|
51 |
+
return self._rnnt_decode(encoder_outputs, encoded_lengths, lang)
|
52 |
+
|
53 |
+
def encode(self, wav):
|
54 |
+
audio_signal, length = self.models['preprocessor'](wav.to(torch.device(self.config.device)), torch.tensor([wav.shape[-1]]).to(torch.device(self.config.device)))
|
55 |
+
outputs, encoded_lengths = self.models['encoder'](audio_signal, length)
|
56 |
+
return outputs, encoded_lengths
|
57 |
+
|
58 |
+
def _ctc_decode(self, encoder_outputs, encoded_lengths, lang):
|
59 |
+
logprobs = self.models['ctc_decoder'](encoder_outputs)
|
60 |
+
logprobs = logprobs[:, :, self.language_masks[lang]].log_softmax(dim=-1)
|
61 |
+
indices = torch.argmax(logprobs[0], dim=-1)
|
62 |
+
collapsed_indices = torch.unique_consecutive(indices, dim=-1)
|
63 |
+
return ''.join([self.vocab[lang][x] for x in collapsed_indices if x != self.config.BLANK_ID]).replace('▁', ' ').strip()
|
64 |
+
|
65 |
+
def _rnnt_decode(self, encoder_outputs, encoded_lengths, lang):
|
66 |
+
joint_enc = self.models['joint_enc'](encoder_outputs.transpose(1, 2))
|
67 |
+
hyp = [self.config.SOS]
|
68 |
+
prev_dec_state = (torch.zeros(self.config.PRED_RNN_LAYERS,1,self.config.PRED_RNN_HIDDEN_DIM).to(torch.device(self.config.device)),
|
69 |
+
torch.zeros(self.config.PRED_RNN_LAYERS,1,self.config.PRED_RNN_HIDDEN_DIM).to(torch.device(self.config.device)))
|
70 |
+
|
71 |
+
for t in range(joint_enc.size(1)):
|
72 |
+
f = joint_enc[:, t, :].unsqueeze(1)
|
73 |
+
not_blank = True
|
74 |
+
symbols_added = 0
|
75 |
+
|
76 |
+
while not_blank and (self.config.RNNT_MAX_SYMBOLS is None or symbols_added < self.config.RNNT_MAX_SYMBOLS):
|
77 |
+
g, _, dec_state = self.models['rnnt_decoder'](torch.Tensor([[hyp[-1]]]).long().to(torch.device(self.config.device)), torch.tensor([1]).to(torch.device(self.config.device)), states=prev_dec_state)
|
78 |
+
g = self.models['joint_pred'](g.transpose(1, 2))
|
79 |
+
joint_out = f + g
|
80 |
+
joint_out = self.models['joint_pre_net'](joint_out)
|
81 |
+
logits = self.models[f'joint_post_net_{lang}'](joint_out)
|
82 |
+
log_probs = logits.log_softmax(dim=-1)
|
83 |
+
pred_token = log_probs.argmax(dim=-1).item()
|
84 |
+
|
85 |
+
if pred_token == self.config.BLANK_ID:
|
86 |
+
not_blank = False
|
87 |
+
else:
|
88 |
+
hyp.append(pred_token)
|
89 |
+
prev_dec_state = dec_state
|
90 |
+
symbols_added += 1
|
91 |
+
|
92 |
+
return ''.join([self.vocab[lang][x] for x in hyp if x != self.config.SOS]).replace('▁', ' ').strip()
|
93 |
+
|
94 |
+
@classmethod
|
95 |
+
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
96 |
+
loc = snapshot_download(repo_id=pretrained_model_name_or_path)
|
97 |
+
return cls(IndicASRConfig(ts_folder=loc, **kwargs))
|
98 |
+
|
99 |
+
# Register model classes
|
100 |
+
AutoConfig.register("iasr", IndicASRConfig)
|
101 |
+
AutoModel.register(IndicASRConfig, IndicASRModel)
|
102 |
+
|
103 |
+
if __name__ == '__main__':
|
104 |
+
model_path = "ai4bharat/indic-conformer-600m-multilingual"
|
105 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
|
106 |
+
print(f"Loaded model: {type(model)}")
|