Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -68,7 +68,7 @@ base_model = "sayakpaul/FLUX.1-merged"
|
|
68 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
69 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
70 |
# Adjust the scaling factor for the base model's output
|
71 |
-
scaling_factor =
|
72 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
|
73 |
|
74 |
MAX_SEED = 2**32 - 1
|
@@ -433,7 +433,7 @@ def generate_image(prompt, steps, seed, cfg_scale, width, height, progress):
|
|
433 |
width=width,
|
434 |
height=height,
|
435 |
generator=generator,
|
436 |
-
joint_attention_kwargs={"scale": 1.
|
437 |
output_type="pil",
|
438 |
good_vae=good_vae,
|
439 |
):
|
@@ -479,15 +479,12 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
479 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
480 |
|
481 |
# Print formatted log
|
482 |
-
print("\n" + "=" * 50)
|
483 |
-
print("=" * 50)
|
484 |
print(f"π Prompt: {prompt}")
|
485 |
print(f"π Selected LoRAs:\n{lora_details}")
|
486 |
print(f"ποΈ CFG Scale: {cfg_scale} | Steps: {steps}")
|
487 |
print(f"π² Seed: {seed}")
|
488 |
print(f"πΌοΈ Image Size: {width} x {height}")
|
489 |
print("\n" + "=" * 50 + "\n")
|
490 |
-
print(f"\nπ Final Prompt: {prompt_mash}")
|
491 |
|
492 |
# Unload previous LoRA weights
|
493 |
with calculateDuration("Unloading LoRA"):
|
@@ -523,7 +520,6 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
523 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
524 |
|
525 |
print("β
Image Generation Complete!")
|
526 |
-
print("=" * 50 + "\n")
|
527 |
|
528 |
run_lora.zerogpu = False
|
529 |
|
|
|
68 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
69 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
70 |
# Adjust the scaling factor for the base model's output
|
71 |
+
scaling_factor = 1.15 # You can adjust this value as needed
|
72 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
|
73 |
|
74 |
MAX_SEED = 2**32 - 1
|
|
|
433 |
width=width,
|
434 |
height=height,
|
435 |
generator=generator,
|
436 |
+
joint_attention_kwargs={"scale": 1.15},
|
437 |
output_type="pil",
|
438 |
good_vae=good_vae,
|
439 |
):
|
|
|
479 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
480 |
|
481 |
# Print formatted log
|
|
|
|
|
482 |
print(f"π Prompt: {prompt}")
|
483 |
print(f"π Selected LoRAs:\n{lora_details}")
|
484 |
print(f"ποΈ CFG Scale: {cfg_scale} | Steps: {steps}")
|
485 |
print(f"π² Seed: {seed}")
|
486 |
print(f"πΌοΈ Image Size: {width} x {height}")
|
487 |
print("\n" + "=" * 50 + "\n")
|
|
|
488 |
|
489 |
# Unload previous LoRA weights
|
490 |
with calculateDuration("Unloading LoRA"):
|
|
|
520 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
521 |
|
522 |
print("β
Image Generation Complete!")
|
|
|
523 |
|
524 |
run_lora.zerogpu = False
|
525 |
|