import os import gradio as gr from diffusers import StableVideoDiffusionPipeline import torch # Ensure dependencies are installed os.system("pip install -r requirements.txt") # Model Load (CPU Compatible) model_id = "stabilityai/stable-video-diffusion-img2vid" pipe = StableVideoDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) # Float32 use karo pipe.to("cpu") # Ensure model runs on CPU # Function to Generate Video def generate_video(prompt): video = pipe(prompt, num_inference_steps=30).videos video_path = "generated_video.mp4" video[0].save(video_path) return video_path # Gradio Interface iface = gr.Interface(fn=generate_video, inputs="text", outputs="video") # Launch Gradio Server on Hugging Face Space iface.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))