salomonsky commited on
Commit
8ae0bf1
·
verified ·
1 Parent(s): 829982b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -60
app.py CHANGED
@@ -10,49 +10,15 @@ from PIL import Image
10
  import uuid
11
  import random
12
  from huggingface_hub import hf_hub_download
13
- import spaces
14
- from tqdm import tqdm
15
 
16
  max_64_bit_int = 2**63 - 1
17
 
18
- pipe = StableVideoDiffusionPipeline.from_pretrained("vdo/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16")
 
 
 
19
  pipe.to("cpu")
20
- @spaces.GPU(duration=120)
21
-
22
- def sample(
23
- progress: gr.Progress,
24
- image: Image,
25
- seed: Optional[int] = 42,
26
- randomize_seed: bool = True,
27
- motion_bucket_id: int = 127,
28
- fps_id: int = 6,
29
- version: str = "svd_xt",
30
- cond_aug: float = 0.02,
31
- decoding_t: int = 3,
32
- device: str = "cuda",
33
- output_folder: str = "outputs",
34
- ):
35
- if image.mode == "RGBA":
36
- image = image.convert("RGB")
37
-
38
- if(randomize_seed):
39
- seed = random.randint(0, max_64_bit_int)
40
- generator = torch.manual_seed(seed)
41
-
42
- os.makedirs(output_folder, exist_ok=True)
43
- base_count = len(glob(os.path.join(output_folder, "*.mp4")))
44
- video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
45
-
46
- frames = []
47
- for i in tqdm(range(25), desc="Generando frames"):
48
- frame = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=1).frames[0]
49
- frames.extend(frame)
50
- progress.update(i/25)
51
-
52
- export_to_video(frames, video_path, fps=fps_id)
53
- torch.manual_seed(seed)
54
-
55
- return video_path, frames, seed
56
 
57
 
58
  def resize_image(image, output_size=(1024, 576)):
@@ -80,24 +46,55 @@ def resize_image(image, output_size=(1024, 576)):
80
  return cropped_image
81
 
82
 
83
- with gr.Blocks() as demo:
84
- with gr.Row():
85
- with gr.Column():
86
- image = gr.Image(label="Upload your image", type="pil")
87
- with gr.Accordion("Advanced options", open=False):
88
- seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
89
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
90
- motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
91
- fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
92
- generate_btn = gr.Button(value="Animate", variant="primary")
93
- with gr.Column():
94
- video = gr.Video(label="Generated video")
95
- gallery = gr.Gallery(label="Generated frames")
96
- progress = gr.Progress(label="Progress")
97
-
98
- image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
99
- generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id, "svd_xt", 0.02, 3, "cuda", "outputs", progress], outputs=[video, gallery, seed, progress], api_name="video")
100
-
101
-
102
- if __name__ == "__main__":
103
- demo.launch(share=True, show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  import uuid
11
  import random
12
  from huggingface_hub import hf_hub_download
13
+
 
14
 
15
  max_64_bit_int = 2**63 - 1
16
 
17
+
18
+ pipe = StableVideoDiffusionPipeline.from_pretrained(
19
+ "vdo/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
20
+ )
21
  pipe.to("cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
24
  def resize_image(image, output_size=(1024, 576)):
 
46
  return cropped_image
47
 
48
 
49
+ @gr.Blocks()
50
+ def demo():
51
+ with gr.Row():
52
+ with gr.Column():
53
+ image = gr.Image(label="Upload your image", type="pil")
54
+ with gr.Accordion("Advanced options", open=False):
55
+ seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
56
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
57
+ motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
58
+ fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
59
+ generate_btn = gr.Button(value="Animate", variant="primary")
60
+ with gr.Column():
61
+ video = gr.Video(label="Generated video")
62
+ gallery = gr.Gallery(label="Generated frames")
63
+ progress_label = gr.Label("Progress")
64
+ progress = gr.Progress()
65
+
66
+
67
+ def resize_and_animate(image, progress, seed, randomize_seed, motion_bucket_id, fps_id):
68
+ image = resize_image(image)
69
+ if image.mode == "RGBA":
70
+ image = image.convert("RGB")
71
+
72
+ if(randomize_seed):
73
+ seed = random.randint(0, max_64_bit_int)
74
+ generator = torch.manual_seed(seed)
75
+
76
+ os.makedirs("outputs", exist_ok=True)
77
+ base_count = len(glob(os.path.join("outputs", "*.mp4")))
78
+ video_path = os.path.join("outputs", f"{base_count:06d}.mp4")
79
+
80
+ frames = []
81
+ for i in range(25):
82
+ frame = pipe(image, decode_chunk_size=3, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=1).frames[0]
83
+ frames.extend(frame)
84
+ progress.update((i+1)/25)
85
+
86
+ export_to_video(frames, video_path, fps=fps_id)
87
+ torch.manual_seed(seed)
88
+
89
+ return video_path, frames, seed
90
+
91
+
92
+ image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
93
+ generate_btn.click(
94
+ fn=resize_and_animate,
95
+ inputs=[image, progress, seed, randomize_seed, motion_bucket_id, fps_id],
96
+ outputs=[video, gallery, progress],
97
+ )
98
+
99
+
100
+ demo.launch()