File size: 6,174 Bytes
2a5aa2b
 
 
 
 
 
60a396e
2a5aa2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12587db
 
2a5aa2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12587db
2a5aa2b
 
 
 
 
 
 
 
 
 
 
 
60a396e
2a5aa2b
 
 
 
 
 
 
60a396e
 
2a5aa2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12587db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import gradio as gr 
import numpy as np
import pytubefix as pt
import os, time, librosa, torch
from pyannote.audio import Pipeline
from transformers import pipeline
import spaces


def second_to_timecode(x: float) -> str:
    """Float x second to HH:MM:SS.DDD format."""
    hour, x = divmod(x, 3600)
    minute, x = divmod(x, 60)
    second, x = divmod(x, 1)
    millisecond = int(x * 1000.)

    return '%.2d:%.2d:%.2d,%.3d' % (hour, minute, second, millisecond)


def download_from_youtube(youtube_link: str) -> str:
    yt = pt.YouTube(youtube_link)
    available_streams = yt.streams.filter(only_audio=True)
    print('available streams:')
    print(available_streams)
    stream = available_streams.first()
    # , audio_codec='wav'
    
    stream.download(filename="audio.wav")
    return "audio.wav"


MODEL_NAME = 'bayartsogt/whisper-large-v2-mn-13'
#MODEL_NAME = 'Dorjzodovsuren/whisper-large-v3-turbo-mn-2'
lang = 'mn'

chunk_length_s = 9
vad_activation_min_duration = 9 # sec
device = 0 if torch.cuda.is_available() else "cpu"
SAMPLE_RATE = 16_000

######## LOAD MODELS FROM HUB ########
dia_model = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=os.environ['TOKEN'])
vad_model = Pipeline.from_pretrained("pyannote/voice-activity-detection", use_auth_token=os.environ['TOKEN'])

import torch
from transformers import pipeline
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq

if MODEL_NAME == 'bayartsogt/whisper-large-v2-mn-13':
  processor = AutoProcessor.from_pretrained(MODEL_NAME)

else:
  processor = AutoProcessor.from_pretrained("openai/whisper-large-v3-turbo")

model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME)

asr_pipeline = pipeline(
    "automatic-speech-recognition",
    model=model,
    tokenizer=processor.tokenizer,
    feature_extractor=processor.feature_extractor,
    chunk_length_s=chunk_length_s, 
    device_map="auto"
)

lang = 'mn'
asr_pipeline.model.config.forced_decoder_ids = asr_pipeline.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")

print("----------> Loaded models <-----------")

gpu_timeout = int(os.getenv("GPU_TIMEOUT", 60))
@spaces.GPU(duration=gpu_timeout)
def generator(youtube_link, microphone, file_upload, num_speakers, max_duration, history):

    if int(youtube_link != '') + int(microphone is not None) + int(file_upload is not None) != 1:
        raise Exception(f"Only one of the source should be given youtube_link={youtube_link}, microphone={microphone}, file_upload={file_upload}")

    history = history or ""
    
    if microphone:
        path = microphone
    elif file_upload:
        path = file_upload
    elif youtube_link:
        path = download_from_youtube(youtube_link)
    
    waveform, sampling_rate = librosa.load(path, sr=SAMPLE_RATE, mono=True, duration=max_duration)

    print(waveform.shape, sampling_rate)
    waveform_tensor = torch.unsqueeze(torch.tensor(waveform), 0).to(device)

    dia_result = dia_model({
        "waveform": waveform_tensor,
        "sample_rate": sampling_rate,
    }, num_speakers=num_speakers)

    counter = 1
    
    for speech_turn, track, speaker in dia_result.itertracks(yield_label=True):
        print(f"{speech_turn.start:4.1f} {speech_turn.end:4.1f} {speaker}")
        _start = int(sampling_rate * speech_turn.start)
        _end = int(sampling_rate * speech_turn.end)
        data = waveform[_start: _end]

        if speech_turn.end - speech_turn.start > vad_activation_min_duration:
            print(f'audio duration {speech_turn.end - speech_turn.start} sec ----> activating VAD')
            vad_output = vad_model({
                'waveform': waveform_tensor[:, _start:_end],
                'sample_rate': sampling_rate})
            for vad_turn in vad_output.get_timeline().support():
                vad_start = _start + int(sampling_rate * vad_turn.start)
                vad_end = _start + int(sampling_rate * vad_turn.end)
                prediction = asr_pipeline(waveform[vad_start: vad_end])['text']
                history +=  f"{counter}\n" + \
                            f"{second_to_timecode(speech_turn.start + vad_turn.start)} --> {second_to_timecode(speech_turn.start + vad_turn.end)}\n" + \
                            f"{prediction}\n\n"
                            # f">> {speaker}: {prediction}\n\n"
                yield history, history, None
                counter += 1

        else:
            prediction = asr_pipeline(data)['text']
            history +=  f"{counter}\n" + \
                        f"{second_to_timecode(speech_turn.start)} --> {second_to_timecode(speech_turn.end)}\n" + \
                        f"{prediction}\n\n"
                        # f">> {speaker}: {prediction}\n\n"
            counter += 1
            yield history, history, None
    
    # https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats%2Csubrip-srt-example%2Csubviewer-sbv-example
    file_name = 'transcript.srt'
    with open(file_name, 'w') as fp:
        fp.write(history)
    
    yield history, history, file_name


demo = gr.Interface(
    generator, 
    inputs=[
        gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
        gr.Audio(type="filepath"),
        gr.Audio(type="filepath"),
        gr.Number(value=1, label="Number of Speakers"),
        gr.Number(value=120, label="Maximum Duration (Seconds)"),
        'state',
    ],
    outputs=['text', 'state', 'file'],
    theme="huggingface",
    title="Transcribe Mongolian Whisper πŸ‡²πŸ‡³",
    description=(
        "Transcribe Youtube Video / Microphone / Uploaded File in Mongolian Whisper Model." + \
        " | You can upload SubRip file (`.srt`) [to your youtube video](https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats)." + \
        " | Please REFRESH πŸ”„ the page after you transcribed!" + \
        " | 🐦 [@_tsogoo_](https://twitter.com/_tsogoo_)" + \
        " | πŸ€— [@bayartsogt](https://huggingface.co/bayartsogt)" + \
        ""
    ),
    allow_flagging="never",
)

# define queue - required for generators
demo.queue()

demo.launch(debug=True)