Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -21,6 +21,8 @@ from PIL import Image
|
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
22 |
MAX_IMAGE_SIZE = 1024
|
23 |
|
|
|
|
|
24 |
|
25 |
parser = argparse.ArgumentParser()
|
26 |
parser.add_argument("--port", type=int, default=7860, help="Port for the Gradio app")
|
@@ -31,8 +33,9 @@ parser.add_argument("--enable-model-cpu-offload", action="store_true", help="Ena
|
|
31 |
args = parser.parse_args()
|
32 |
|
33 |
pipe = FluxFillPipeline.from_pretrained(args.flux_path, torch_dtype=torch.bfloat16)
|
34 |
-
pipe.load_lora_weights(args.lora_path)
|
35 |
-
|
|
|
36 |
if args.enable_model_cpu_offload:
|
37 |
pipe.enable_model_cpu_offload()
|
38 |
else:
|
@@ -47,9 +50,19 @@ def infer(edit_images,
|
|
47 |
height=1024,
|
48 |
guidance_scale=50,
|
49 |
num_inference_steps=28,
|
|
|
50 |
progress=gr.Progress(track_tqdm=True)
|
51 |
):
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
image = edit_images
|
54 |
|
55 |
if image.size[0] != 512:
|
@@ -93,13 +106,14 @@ def infer(edit_images,
|
|
93 |
index = len(os.listdir(args.output_dir))
|
94 |
output_image.save(f"{args.output_dir}/result_{index}.png")
|
95 |
|
96 |
-
return (image, output_image), seed
|
97 |
|
98 |
# 新增的示例,将元组转换为列表
|
99 |
new_examples = [
|
100 |
-
['assets/
|
101 |
-
['assets/
|
102 |
-
['assets/
|
|
|
103 |
]
|
104 |
|
105 |
css = """
|
@@ -113,9 +127,11 @@ with gr.Blocks(css=css) as demo:
|
|
113 |
|
114 |
with gr.Column(elem_id="col-container"):
|
115 |
gr.Markdown(f"""# IC-Edit
|
116 |
-
**Image Editing is worth a single LoRA!** A demo for [IC-Edit](https://
|
117 |
More **open-source**, with **lower costs**, **faster speed** (it takes about 9 seconds to process one image), and **powerful performance**.
|
118 |
-
For more details, check out our [Github Repository](https://github.com/River-Zhang/ICEdit) and [
|
|
|
|
|
119 |
""")
|
120 |
with gr.Row():
|
121 |
with gr.Column():
|
@@ -187,11 +203,17 @@ For more details, check out our [Github Repository](https://github.com/River-Zha
|
|
187 |
value=28,
|
188 |
)
|
189 |
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
gr.Examples(
|
192 |
examples=new_examples,
|
193 |
-
inputs=[edit_image, prompt, seed],
|
194 |
-
outputs=[result, seed],
|
195 |
fn=infer,
|
196 |
cache_examples=False
|
197 |
)
|
@@ -199,8 +221,8 @@ For more details, check out our [Github Repository](https://github.com/River-Zha
|
|
199 |
gr.on(
|
200 |
triggers=[run_button.click, prompt.submit],
|
201 |
fn=infer,
|
202 |
-
inputs=[edit_image, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
203 |
-
outputs=[result, seed]
|
204 |
)
|
205 |
|
206 |
demo.launch(server_port=args.port)
|
|
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
22 |
MAX_IMAGE_SIZE = 1024
|
23 |
|
24 |
+
current_lora_scale = 1.0
|
25 |
+
|
26 |
|
27 |
parser = argparse.ArgumentParser()
|
28 |
parser.add_argument("--port", type=int, default=7860, help="Port for the Gradio app")
|
|
|
33 |
args = parser.parse_args()
|
34 |
|
35 |
pipe = FluxFillPipeline.from_pretrained(args.flux_path, torch_dtype=torch.bfloat16)
|
36 |
+
pipe.load_lora_weights(args.lora_path, adapter_name="icedit")
|
37 |
+
pipe.set_adapters("icedit", 1.0)
|
38 |
+
|
39 |
if args.enable_model_cpu_offload:
|
40 |
pipe.enable_model_cpu_offload()
|
41 |
else:
|
|
|
50 |
height=1024,
|
51 |
guidance_scale=50,
|
52 |
num_inference_steps=28,
|
53 |
+
lora_scale=1.0,
|
54 |
progress=gr.Progress(track_tqdm=True)
|
55 |
):
|
56 |
|
57 |
+
|
58 |
+
global current_lora_scale
|
59 |
+
|
60 |
+
if lora_scale != current_lora_scale:
|
61 |
+
print(f"\033[93m[INFO] LoRA scale changed from {current_lora_scale} to {lora_scale}, reloading LoRA weights\033[0m")
|
62 |
+
pipe.set_adapters("icedit", lora_scale)
|
63 |
+
current_lora_scale = lora_scale
|
64 |
+
|
65 |
+
|
66 |
image = edit_images
|
67 |
|
68 |
if image.size[0] != 512:
|
|
|
106 |
index = len(os.listdir(args.output_dir))
|
107 |
output_image.save(f"{args.output_dir}/result_{index}.png")
|
108 |
|
109 |
+
return (image, output_image), seed, lora_scale
|
110 |
|
111 |
# 新增的示例,将元组转换为列表
|
112 |
new_examples = [
|
113 |
+
['assets/girl_3.jpg', 'Make it looks like a watercolor painting.', 0, 0.5],
|
114 |
+
['assets/girl.png', 'Make her hair dark green and her clothes checked.', 42, 1.0],
|
115 |
+
['assets/boy.png', 'Change the sunglasses to a Christmas hat.', 27440001, 1.0],
|
116 |
+
['assets/kaori.jpg', 'Make it a sketch.', 329918865, 1.0]
|
117 |
]
|
118 |
|
119 |
css = """
|
|
|
127 |
|
128 |
with gr.Column(elem_id="col-container"):
|
129 |
gr.Markdown(f"""# IC-Edit
|
130 |
+
**Image Editing is worth a single LoRA!** A demo for [IC-Edit](https://river-zhang.github.io/ICEdit-gh-pages/).
|
131 |
More **open-source**, with **lower costs**, **faster speed** (it takes about 9 seconds to process one image), and **powerful performance**.
|
132 |
+
For more details, check out our [Github Repository](https://github.com/River-Zhang/ICEdit) and [arxiv paper](https://arxiv.org/pdf/2504.20690). If our project resonates with you or proves useful, we'd be truly grateful if you could spare a moment to give it a star.
|
133 |
+
\n**👑 Feel free to share your results in this [Gallery](https://github.com/River-Zhang/ICEdit/discussions/21)!**
|
134 |
+
\n🔥 New feature: Try **different LoRA scale**!
|
135 |
""")
|
136 |
with gr.Row():
|
137 |
with gr.Column():
|
|
|
203 |
value=28,
|
204 |
)
|
205 |
|
206 |
+
lora_scale = gr.Slider(
|
207 |
+
label="LoRA Scale",
|
208 |
+
minimum=0,
|
209 |
+
maximum=1.0,
|
210 |
+
step=0.01,
|
211 |
+
value=1.0,
|
212 |
+
)
|
213 |
gr.Examples(
|
214 |
examples=new_examples,
|
215 |
+
inputs=[edit_image, prompt, seed, lora_scale],
|
216 |
+
outputs=[result, seed, lora_scale],
|
217 |
fn=infer,
|
218 |
cache_examples=False
|
219 |
)
|
|
|
221 |
gr.on(
|
222 |
triggers=[run_button.click, prompt.submit],
|
223 |
fn=infer,
|
224 |
+
inputs=[edit_image, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_scale],
|
225 |
+
outputs=[result, seed, lora_scale]
|
226 |
)
|
227 |
|
228 |
demo.launch(server_port=args.port)
|