Spaces:
Runtime error
Runtime error
Commit
·
b8da6bf
1
Parent(s):
331cf26
ipadapter & Florence-2-base one time init
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import torch
|
|
6 |
import gradio as gr
|
7 |
import spaces
|
8 |
from comfy import model_management
|
9 |
-
|
10 |
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
from huggingface_hub import snapshot_download
|
@@ -46,16 +46,23 @@ print("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors")
|
|
46 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/image_encoder/model.safetensors", local_dir="models/clip_vision/")
|
47 |
# rename
|
48 |
try:
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
except FileNotFoundError:
|
54 |
-
print(
|
55 |
-
except OSError as e:
|
56 |
-
print(f"An OS error occurred: {e}")
|
57 |
-
except Exception as e:
|
58 |
-
print(f"An unexpected error occurred: {e}")
|
59 |
|
60 |
print("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors done")
|
61 |
|
@@ -71,6 +78,25 @@ hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-plus_sd15.
|
|
71 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-plus-face_sd15.safetensors", local_dir="models/ipadapter/")
|
72 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-full-face_sd15.safetensors", local_dir="models/ipadapter/")
|
73 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter_sd15_vit-G.safetensors", local_dir="models/ipadapter/")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
print("ipadapter done")
|
75 |
|
76 |
|
@@ -222,6 +248,9 @@ print("load done: control_v11p_sd15_openpose.pth")
|
|
222 |
|
223 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
224 |
|
|
|
|
|
|
|
225 |
|
226 |
|
227 |
|
@@ -317,15 +346,14 @@ print("model_management.load_models_gpu(model_loaders) done")
|
|
317 |
|
318 |
@spaces.GPU(duration=60)
|
319 |
def generate_image(model_image, hairstyle_template_image):
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
florence2run = NODE_CLASS_MAPPINGS["Florence2Run"]()
|
329 |
|
330 |
with torch.inference_mode():
|
331 |
cliptextencode_52 = cliptextencode.encode(
|
|
|
6 |
import gradio as gr
|
7 |
import spaces
|
8 |
from comfy import model_management
|
9 |
+
import subprocess
|
10 |
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
from huggingface_hub import snapshot_download
|
|
|
46 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/image_encoder/model.safetensors", local_dir="models/clip_vision/")
|
47 |
# rename
|
48 |
try:
|
49 |
+
source_file = "models/clip_vision/models/image_encoder/model.safetensors"
|
50 |
+
destination_file = "models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
51 |
+
|
52 |
+
result = subprocess.run(["mv", source_file, destination_file], check=True, capture_output=True, text=True)
|
53 |
+
|
54 |
+
# check=True raises a CalledProcessError if the command fails (returns a non-zero exit code)
|
55 |
+
# capture_output=True captures stdout and stderr. text=True decodes to string
|
56 |
+
print(f"Command executed successfully. Return code: {result.returncode}")
|
57 |
+
print(f"Standard output: {result.stdout}")
|
58 |
+
print(f"Standard error: {result.stderr}")
|
59 |
+
|
60 |
+
except subprocess.CalledProcessError as e:
|
61 |
+
print(f"Command failed with error code: {e.returncode}")
|
62 |
+
print(f"Standard output: {e.stdout}")
|
63 |
+
print(f"Standard error: {e.stderr}")
|
64 |
except FileNotFoundError:
|
65 |
+
print("Error: The 'mv' command was not found in your system's PATH.")
|
|
|
|
|
|
|
|
|
66 |
|
67 |
print("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors done")
|
68 |
|
|
|
78 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-plus-face_sd15.safetensors", local_dir="models/ipadapter/")
|
79 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter-full-face_sd15.safetensors", local_dir="models/ipadapter/")
|
80 |
hf_hub_download(repo_id="h94/IP-Adapter", filename="models/ip-adapter_sd15_vit-G.safetensors", local_dir="models/ipadapter/")
|
81 |
+
# rename
|
82 |
+
try:
|
83 |
+
source_file = "models/ipadapter/models/*"
|
84 |
+
destination_file = "models/ipadapter/"
|
85 |
+
|
86 |
+
result = subprocess.run(["mv", source_file, destination_file], check=True, capture_output=True, text=True)
|
87 |
+
|
88 |
+
# check=True raises a CalledProcessError if the command fails (returns a non-zero exit code)
|
89 |
+
# capture_output=True captures stdout and stderr. text=True decodes to string
|
90 |
+
print(f"Command executed successfully. Return code: {result.returncode}")
|
91 |
+
print(f"Standard output: {result.stdout}")
|
92 |
+
print(f"Standard error: {result.stderr}")
|
93 |
+
|
94 |
+
except subprocess.CalledProcessError as e:
|
95 |
+
print(f"Command failed with error code: {e.returncode}")
|
96 |
+
print(f"Standard output: {e.stdout}")
|
97 |
+
print(f"Standard error: {e.stderr}")
|
98 |
+
except FileNotFoundError:
|
99 |
+
print("Error: The 'mv' command was not found in your system's PATH.")
|
100 |
print("ipadapter done")
|
101 |
|
102 |
|
|
|
248 |
|
249 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
250 |
|
251 |
+
florence2modelloader = NODE_CLASS_MAPPINGS["Florence2ModelLoader"]()
|
252 |
+
florence2run = NODE_CLASS_MAPPINGS["Florence2Run"]()
|
253 |
+
florence2modelloader_204 = None
|
254 |
|
255 |
|
256 |
|
|
|
346 |
|
347 |
@spaces.GPU(duration=60)
|
348 |
def generate_image(model_image, hairstyle_template_image):
|
349 |
+
if florence2modelloader_204 == None:
|
350 |
+
florence2modelloader_204 = florence2modelloader.loadmodel(
|
351 |
+
model="Florence-2-base",
|
352 |
+
precision="fp16",
|
353 |
+
attention="sdpa",
|
354 |
+
convert_to_safetensors=False,
|
355 |
+
)
|
356 |
+
print("load done: Florence-2-base")
|
|
|
357 |
|
358 |
with torch.inference_mode():
|
359 |
cliptextencode_52 = cliptextencode.encode(
|