Muhammad Anas Akhtar commited on
Commit
9ad8324
·
verified ·
1 Parent(s): 3d80114

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import scipy.io.wavfile as wavfile
5
+
6
+ # Use a pipeline as a high-level helper
7
+ from transformers import pipeline
8
+
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+
12
+ caption_image = pipeline("image-to-text",
13
+ model="Salesforce/blip-image-captioning-large", device=device)
14
+
15
+ narrator = pipeline("text-to-speech",
16
+ model="kakao-enterprise/vits-ljs")
17
+
18
+
19
+ def generate_audio(text):
20
+ # Generate the narrated text
21
+ narrated_text = narrator(text)
22
+
23
+ # Save the audio to a WAV file
24
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
25
+ data=narrated_text["audio"][0])
26
+ # Return the path to the saved audio file
27
+ return "output.wav"
28
+
29
+
30
+ def caption_my_image(pil_image):
31
+ semantics = caption_image(images=pil_image)[0]['generated_text']
32
+ return generate_audio(semantics)
33
+
34
+ demo = gr.Interface(fn=caption_my_image,
35
+ inputs=[gr.Image(label="Select Image",type="pil")],
36
+ outputs=[gr.Audio(label="Image Caption")],
37
+ title="@GenAILearniverse Project 8: Image Captioning",
38
+ description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE.")
39
+ demo.launch()