Spaces:
Running
on
Zero
Running
on
Zero
fix version
Browse files- app.py +59 -19
- app_old.py +176 -0
- requirements.txt +0 -1
app.py
CHANGED
@@ -1,29 +1,69 @@
|
|
1 |
-
|
2 |
import spaces
|
3 |
import os
|
4 |
-
import gradio as gr
|
5 |
import json
|
6 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
model_path = "ckpts"
|
19 |
-
os.makedirs(model_path, exist_ok=True)
|
20 |
-
|
21 |
-
|
22 |
-
print("Downloading models from Hugging Face...")
|
23 |
-
snapshot_download(repo_id="pandaphd/generative_photography", local_dir=model_path)
|
24 |
-
|
25 |
|
26 |
|
|
|
|
|
|
|
27 |
|
28 |
|
29 |
torch.manual_seed(42)
|
|
|
|
|
1 |
import spaces
|
2 |
import os
|
|
|
3 |
import json
|
4 |
import torch
|
5 |
+
import requests
|
6 |
+
import subprocess
|
7 |
+
import gradio as gr
|
8 |
+
from omegaconf import OmegaConf
|
9 |
+
|
10 |
+
# ================== 新增的下载逻辑 ==================
|
11 |
+
MODEL_REPO = "pandaphd/generative_photography"
|
12 |
+
BRANCH = "main"
|
13 |
+
LOCAL_DIR = "ckpts"
|
14 |
+
|
15 |
+
|
16 |
+
def download_hf_folder():
|
17 |
+
os.makedirs(LOCAL_DIR, exist_ok=True)
|
18 |
+
|
19 |
+
def get_file_list(path=""):
|
20 |
+
api_url = f"https://huggingface.co/api/spaces/{MODEL_REPO}/tree/{BRANCH}/{path}"
|
21 |
+
try:
|
22 |
+
response = requests.get(api_url, timeout=10)
|
23 |
+
response.raise_for_status()
|
24 |
+
return response.json()
|
25 |
+
except Exception as e:
|
26 |
+
raise RuntimeError(f"Failed to get file list: {str(e)}")
|
27 |
+
|
28 |
+
def download_file(remote_path):
|
29 |
+
url = f"https://huggingface.co/spaces/{MODEL_REPO}/resolve/{BRANCH}/{remote_path}"
|
30 |
+
local_path = os.path.join(LOCAL_DIR, remote_path)
|
31 |
+
os.makedirs(os.path.dirname(local_path), exist_ok=True)
|
32 |
+
|
33 |
+
# 使用 wget 下载(支持断点续传)
|
34 |
+
cmd = [
|
35 |
+
"wget", "-c", "-q", "--show-progress",
|
36 |
+
"-O", local_path, url
|
37 |
+
]
|
38 |
+
try:
|
39 |
+
subprocess.run(cmd, check=True)
|
40 |
+
print(f"Downloaded: {remote_path}")
|
41 |
+
except subprocess.CalledProcessError:
|
42 |
+
print(f"Failed to download: {remote_path}")
|
43 |
+
raise
|
44 |
+
|
45 |
+
print("Downloading models from Hugging Face...")
|
46 |
+
|
47 |
+
# 递归下载函数
|
48 |
+
def download_recursive(path=""):
|
49 |
+
for item in get_file_list(path):
|
50 |
+
item_path = os.path.join(path, item["path"])
|
51 |
+
if item["type"] == "file":
|
52 |
+
download_file(item_path)
|
53 |
+
elif item["type"] == "directory":
|
54 |
+
download_recursive(item_path)
|
55 |
|
56 |
+
try:
|
57 |
+
download_recursive()
|
58 |
+
print("All files downloaded successfully!")
|
59 |
+
except Exception as e:
|
60 |
+
print(f"Critical error during download: {str(e)}")
|
61 |
+
exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
|
64 |
+
# ================== 执行下载 ==================
|
65 |
+
if not os.path.exists(os.path.join(LOCAL_DIR, "models")):
|
66 |
+
download_hf_folder()
|
67 |
|
68 |
|
69 |
torch.manual_seed(42)
|
app_old.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import spaces
|
3 |
+
import os
|
4 |
+
import gradio as gr
|
5 |
+
import json
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from huggingface_hub import snapshot_download
|
9 |
+
|
10 |
+
from inference_bokehK import load_models as load_bokeh_models, run_inference as run_bokeh_inference, OmegaConf
|
11 |
+
from inference_focal_length import load_models as load_focal_models, run_inference as run_focal_inference
|
12 |
+
from inference_shutter_speed import load_models as load_shutter_models, run_inference as run_shutter_inference
|
13 |
+
from inference_color_temperature import load_models as load_color_models, run_inference as run_color_inference
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
model_path = "ckpts"
|
19 |
+
os.makedirs(model_path, exist_ok=True)
|
20 |
+
|
21 |
+
|
22 |
+
print("Downloading models from Hugging Face...")
|
23 |
+
snapshot_download(repo_id="pandaphd/generative_photography", local_dir=model_path)
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
torch.manual_seed(42)
|
30 |
+
|
31 |
+
bokeh_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_bokehK.yaml")
|
32 |
+
bokeh_pipeline, bokeh_device = load_bokeh_models(bokeh_cfg)
|
33 |
+
|
34 |
+
focal_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_focal_length.yaml")
|
35 |
+
focal_pipeline, focal_device = load_focal_models(focal_cfg)
|
36 |
+
|
37 |
+
shutter_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_shutter_speed.yaml")
|
38 |
+
shutter_pipeline, shutter_device = load_shutter_models(shutter_cfg)
|
39 |
+
|
40 |
+
color_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml")
|
41 |
+
color_pipeline, color_device = load_color_models(color_cfg)
|
42 |
+
|
43 |
+
@spaces.GPU(duration=30)
|
44 |
+
def generate_bokeh_video(base_scene, bokehK_list):
|
45 |
+
try:
|
46 |
+
torch.manual_seed(42)
|
47 |
+
if len(json.loads(bokehK_list)) != 5:
|
48 |
+
raise ValueError("Exactly 5 Bokeh K values required")
|
49 |
+
return run_bokeh_inference(
|
50 |
+
pipeline=bokeh_pipeline, tokenizer=bokeh_pipeline.tokenizer,
|
51 |
+
text_encoder=bokeh_pipeline.text_encoder, base_scene=base_scene,
|
52 |
+
bokehK_list=bokehK_list, device=bokeh_device
|
53 |
+
)
|
54 |
+
except Exception as e:
|
55 |
+
return f"Error: {str(e)}"
|
56 |
+
|
57 |
+
@spaces.GPU(duration=30)
|
58 |
+
def generate_focal_video(base_scene, focal_length_list):
|
59 |
+
try:
|
60 |
+
torch.manual_seed(42)
|
61 |
+
if len(json.loads(focal_length_list)) != 5:
|
62 |
+
raise ValueError("Exactly 5 focal length values required")
|
63 |
+
return run_focal_inference(
|
64 |
+
pipeline=focal_pipeline, tokenizer=focal_pipeline.tokenizer,
|
65 |
+
text_encoder=focal_pipeline.text_encoder, base_scene=base_scene,
|
66 |
+
focal_length_list=focal_length_list, device=focal_device
|
67 |
+
)
|
68 |
+
except Exception as e:
|
69 |
+
return f"Error: {str(e)}"
|
70 |
+
|
71 |
+
@spaces.GPU(duration=30)
|
72 |
+
def generate_shutter_video(base_scene, shutter_speed_list):
|
73 |
+
try:
|
74 |
+
torch.manual_seed(42)
|
75 |
+
if len(json.loads(shutter_speed_list)) != 5:
|
76 |
+
raise ValueError("Exactly 5 shutter speed values required")
|
77 |
+
return run_shutter_inference(
|
78 |
+
pipeline=shutter_pipeline, tokenizer=shutter_pipeline.tokenizer,
|
79 |
+
text_encoder=shutter_pipeline.text_encoder, base_scene=base_scene,
|
80 |
+
shutter_speed_list=shutter_speed_list, device=shutter_device
|
81 |
+
)
|
82 |
+
except Exception as e:
|
83 |
+
return f"Error: {str(e)}"
|
84 |
+
|
85 |
+
|
86 |
+
@spaces.GPU(duration=30)
|
87 |
+
def generate_color_video(base_scene, color_temperature_list):
|
88 |
+
try:
|
89 |
+
torch.manual_seed(42)
|
90 |
+
if len(json.loads(color_temperature_list)) != 5:
|
91 |
+
raise ValueError("Exactly 5 color temperature values required")
|
92 |
+
return run_color_inference(
|
93 |
+
pipeline=color_pipeline, tokenizer=color_pipeline.tokenizer,
|
94 |
+
text_encoder=color_pipeline.text_encoder, base_scene=base_scene,
|
95 |
+
color_temperature_list=color_temperature_list, device=color_device
|
96 |
+
)
|
97 |
+
except Exception as e:
|
98 |
+
return f"Error: {str(e)}"
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
bokeh_examples = [
|
103 |
+
["A variety of potted plants are displayed on a window sill, with some of them placed in yellow and white cups. The plants are arranged in different sizes and shapes, creating a visually appealing display.", "[18.0, 14.0, 10.0, 6.0, 2.0]"],
|
104 |
+
["A colorful backpack with a floral pattern is sitting on a table next to a computer monitor.", "[2.3, 5.8, 10.2, 14.8, 24.9]"]
|
105 |
+
]
|
106 |
+
|
107 |
+
focal_examples = [
|
108 |
+
["A small office cubicle with a desk.", "[25.1, 36.1, 47.1, 58.1, 69.1]"],
|
109 |
+
["A large white couch in a living room.", "[55.0, 46.0, 37.0, 28.0, 25.0]"]
|
110 |
+
]
|
111 |
+
|
112 |
+
shutter_examples = [
|
113 |
+
["A brown and orange leather handbag.", "[0.11, 0.22, 0.33, 0.44, 0.55]"],
|
114 |
+
["A variety of potted plants.", "[0.2, 0.49, 0.69, 0.75, 0.89]"]
|
115 |
+
]
|
116 |
+
|
117 |
+
color_examples = [
|
118 |
+
["A blue sky with mountains.", "[5455.0, 5155.0, 5555.0, 6555.0, 7555.0]"],
|
119 |
+
["A red couch in front of a window.", "[3500.0, 5500.0, 6500.0, 7500.0, 8500.0]"]
|
120 |
+
]
|
121 |
+
|
122 |
+
|
123 |
+
with gr.Blocks(title="Generative Photography") as demo:
|
124 |
+
gr.Markdown("# **Generative Photography: Scene-Consistent Camera Control for Realistic Text-to-Image Synthesis** ")
|
125 |
+
|
126 |
+
with gr.Tabs():
|
127 |
+
with gr.Tab("BokehK Effect"):
|
128 |
+
gr.Markdown("### Generate Frames with Bokeh Blur Effect")
|
129 |
+
with gr.Row():
|
130 |
+
with gr.Column():
|
131 |
+
scene_input_bokeh = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
|
132 |
+
bokeh_input = gr.Textbox(label="Bokeh Blur Values", placeholder="Enter 5 comma-separated values from 1-30, e.g., [2.44, 8.3, 10.1, 17.2, 24.0]")
|
133 |
+
submit_bokeh = gr.Button("Generate Video")
|
134 |
+
with gr.Column():
|
135 |
+
video_output_bokeh = gr.Video(label="Generated Video")
|
136 |
+
gr.Examples(bokeh_examples, [scene_input_bokeh, bokeh_input], [video_output_bokeh], generate_bokeh_video)
|
137 |
+
submit_bokeh.click(generate_bokeh_video, [scene_input_bokeh, bokeh_input], [video_output_bokeh])
|
138 |
+
|
139 |
+
with gr.Tab("Focal Length Effect"):
|
140 |
+
gr.Markdown("### Generate Frames with Focal Length Effect")
|
141 |
+
with gr.Row():
|
142 |
+
with gr.Column():
|
143 |
+
scene_input_focal = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
|
144 |
+
focal_input = gr.Textbox(label="Focal Length Values", placeholder="Enter 5 comma-separated values from 24-70, e.g., [25.1, 30.2, 33.3, 40.8, 54.0]")
|
145 |
+
submit_focal = gr.Button("Generate Video")
|
146 |
+
with gr.Column():
|
147 |
+
video_output_focal = gr.Video(label="Generated Video")
|
148 |
+
gr.Examples(focal_examples, [scene_input_focal, focal_input], [video_output_focal], generate_focal_video)
|
149 |
+
submit_focal.click(generate_focal_video, [scene_input_focal, focal_input], [video_output_focal])
|
150 |
+
|
151 |
+
with gr.Tab("Shutter Speed Effect"):
|
152 |
+
gr.Markdown("### Generate Frames with Shutter Speed Effect")
|
153 |
+
with gr.Row():
|
154 |
+
with gr.Column():
|
155 |
+
scene_input_shutter = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
|
156 |
+
shutter_input = gr.Textbox(label="Shutter Speed Values", placeholder="Enter 5 comma-separated values from 0.1-1.0, e.g., [0.15, 0.32, 0.53, 0.62, 0.82]")
|
157 |
+
submit_shutter = gr.Button("Generate Video")
|
158 |
+
with gr.Column():
|
159 |
+
video_output_shutter = gr.Video(label="Generated Video")
|
160 |
+
gr.Examples(shutter_examples, [scene_input_shutter, shutter_input], [video_output_shutter], generate_shutter_video)
|
161 |
+
submit_shutter.click(generate_shutter_video, [scene_input_shutter, shutter_input], [video_output_shutter])
|
162 |
+
|
163 |
+
with gr.Tab("Color Temperature Effect"):
|
164 |
+
gr.Markdown("### Generate Frames with Color Temperature Effect")
|
165 |
+
with gr.Row():
|
166 |
+
with gr.Column():
|
167 |
+
scene_input_color = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
|
168 |
+
color_input = gr.Textbox(label="Color Temperature Values", placeholder="Enter 5 comma-separated values from 2000-10000, e.g., [3001.3, 4000.2, 4400.34, 5488.23, 8888.82]")
|
169 |
+
submit_color = gr.Button("Generate Video")
|
170 |
+
with gr.Column():
|
171 |
+
video_output_color = gr.Video(label="Generated Video")
|
172 |
+
gr.Examples(color_examples, [scene_input_color, color_input], [video_output_color], generate_color_video)
|
173 |
+
submit_color.click(generate_color_video, [scene_input_color, color_input], [video_output_color])
|
174 |
+
|
175 |
+
if __name__ == "__main__":
|
176 |
+
demo.launch(share=True)
|
requirements.txt
CHANGED
@@ -7,7 +7,6 @@ imageio-ffmpeg
|
|
7 |
numpy==1.24.4
|
8 |
transformers==4.39.3
|
9 |
accelerate==0.30.0
|
10 |
-
huggingface_hub==0.25.2
|
11 |
opencv-python
|
12 |
gdown
|
13 |
einops
|
|
|
7 |
numpy==1.24.4
|
8 |
transformers==4.39.3
|
9 |
accelerate==0.30.0
|
|
|
10 |
opencv-python
|
11 |
gdown
|
12 |
einops
|