File size: 6,463 Bytes
2a5aa2b 60a396e 2a5aa2b 35b0ccd 12587db 2a5aa2b ca01eaa 2a5aa2b 35b0ccd 12587db 2a5aa2b f7db5f2 2a5aa2b 60a396e 2a5aa2b 60a396e 2ff2d5b 2a5aa2b 2ff2d5b 2a5aa2b 12587db 2ff2d5b f7a4003 4eda297 f2f11bb 81ab59b f2f11bb 4eda297 f2f11bb 4eda297 f2f11bb 4eda297 12587db 2ff2d5b 12587db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import gradio as gr
import numpy as np
import pytubefix as pt
import os, time, librosa, torch
from pyannote.audio import Pipeline
from transformers import pipeline
import spaces
def second_to_timecode(x: float) -> str:
"""Float x second to HH:MM:SS.DDD format."""
hour, x = divmod(x, 3600)
minute, x = divmod(x, 60)
second, x = divmod(x, 1)
millisecond = int(x * 1000.)
return '%.2d:%.2d:%.2d,%.3d' % (hour, minute, second, millisecond)
def download_from_youtube(youtube_link: str) -> str:
yt = pt.YouTube(youtube_link)
available_streams = yt.streams.filter(only_audio=True)
print('available streams:')
print(available_streams)
stream = available_streams.first()
# , audio_codec='wav'
stream.download(filename="audio.wav")
return "audio.wav"
MODEL_NAME = 'Dorjzodovsuren/whisper-large-v2-mn'
#MODEL_NAME = 'Dorjzodovsuren/whisper-large-v3-turbo-mn-2'
lang = 'mn'
chunk_length_s = 9
vad_activation_min_duration = 9 # sec
device = 0 if torch.cuda.is_available() else "cpu"
SAMPLE_RATE = 16_000
######## LOAD MODELS FROM HUB ########
dia_model = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=os.environ['TOKEN'])
vad_model = Pipeline.from_pretrained("pyannote/voice-activity-detection", use_auth_token=os.environ['TOKEN'])
dia_model = dia_model.to(torch.device('cuda'))
vad_model = vad_model.to(torch.device('cuda'))
import torch
from transformers import pipeline
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
if MODEL_NAME == 'Dorjzodovsuren/whisper-large-v2-mn':
processor = AutoProcessor.from_pretrained(MODEL_NAME)
else:
processor = AutoProcessor.from_pretrained("openai/whisper-large-v3-turbo")
model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME)
asr_pipeline = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
chunk_length_s=chunk_length_s,
device_map="auto"
)
lang = 'mn'
asr_pipeline.model.config.forced_decoder_ids = asr_pipeline.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
print("----------> Loaded models <-----------")
gpu_timeout = int(os.getenv("GPU_TIMEOUT", 60))
@spaces.GPU(duration=gpu_timeout)
def generator(file_upload, num_speakers, max_duration, history):
history = history or ""
path = file_upload
waveform, sampling_rate = librosa.load(path, sr=SAMPLE_RATE, mono=True, duration=max_duration)
print(waveform.shape, sampling_rate)
waveform_tensor = torch.unsqueeze(torch.tensor(waveform), 0).to(device)
dia_result = dia_model({
"waveform": waveform_tensor,
"sample_rate": sampling_rate,
}, num_speakers=num_speakers)
counter = 1
for speech_turn, track, speaker in dia_result.itertracks(yield_label=True):
print(f"{speech_turn.start:4.1f} {speech_turn.end:4.1f} {speaker}")
_start = int(sampling_rate * speech_turn.start)
_end = int(sampling_rate * speech_turn.end)
data = waveform[_start: _end]
if speech_turn.end - speech_turn.start > vad_activation_min_duration:
print(f'audio duration {speech_turn.end - speech_turn.start} sec ----> activating VAD')
vad_output = vad_model({
'waveform': waveform_tensor[:, _start:_end],
'sample_rate': sampling_rate})
for vad_turn in vad_output.get_timeline().support():
vad_start = _start + int(sampling_rate * vad_turn.start)
vad_end = _start + int(sampling_rate * vad_turn.end)
prediction = asr_pipeline(waveform[vad_start: vad_end])['text']
history += f"{counter}\n" + \
f"{second_to_timecode(speech_turn.start + vad_turn.start)} --> {second_to_timecode(speech_turn.start + vad_turn.end)}\n" + \
f"{prediction}\n\n"
# f">> {speaker}: {prediction}\n\n"
yield history, history, None
counter += 1
else:
prediction = asr_pipeline(data)['text']
history += f"{counter}\n" + \
f"{second_to_timecode(speech_turn.start)} --> {second_to_timecode(speech_turn.end)}\n" + \
f"{prediction}\n\n"
# f">> {speaker}: {prediction}\n\n"
counter += 1
yield history, history, None
# https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats%2Csubrip-srt-example%2Csubviewer-sbv-example
file_name = 'transcript.srt'
with open(file_name, 'w') as fp:
fp.write(history)
yield history, history, file_name
# demo = gr.Interface(
# generator,
# inputs=[
# gr.Audio(type="filepath"),
# gr.Number(value=1, label="Number of Speakers"),
# gr.Number(value=120, label="Maximum Duration (Seconds)"),
# 'state',
# ],
# outputs=['text', 'state', 'file'],
# title="Mongolian Whisper 🇲🇳",
# description=(
# "Transcribe Microphone / Uploaded File in Mongolian Whisper Model."
# )
# )
with gr.Blocks(title="Mongolian Whisper 🇲🇳") as demo:
with gr.Column(): # everything in one vertical stack
# ---- inputs ----
audio_input = gr.Audio(type="filepath", label="Дуу")
with gr.Accordion("Нэмэлт тохиргоо", open=False):
speakers_input = gr.Number(value=1, label="Яриж буй нийт хүний тоо хэд вэ?")
duration_input = gr.Slider(
minimum=0,
maximum=300,
step=1,
value=120,
label="Дууны хамгийн урт хэмжээ (Seconds) хэд вэ?"
)
state_input = gr.State() # hidden, but part of the workflow
# ---- outputs ----
text_output = gr.Textbox(label="Текст хөрвүүлгэ")
state_output = gr.State() # hidden, passed through
file_output = gr.File(label="Үр дүнгийн файл")
# hook it all up
audio_input.change(
fn=generator,
inputs=[audio_input, speakers_input, duration_input, state_input],
outputs=[text_output, state_output, file_output],
)
# define queue - required for generators
demo.queue()
demo.launch(debug=True) |