Spaces:
Sleeping
Sleeping
#!/usr/bin/env python | |
import os | |
import random | |
import uuid | |
import gradio as gr | |
import numpy as np | |
from PIL import Image | |
import spaces | |
from typing import Tuple | |
import torch | |
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler | |
DESCRIPTION = """# InterDiffusion-4.0 | |
### [https://huggingface.co/cutycat2000x/InterDiffusion-4.0](https://huggingface.co/cutycat2000x/InterDiffusion-4.0)""" | |
MAX_SEED = np.iinfo(np.int32).max | |
DEFAULT_STYLE_NAME = "(LoRA)" | |
def save_image(img): | |
filename = str(uuid.uuid4()) + ".png" | |
img.save(filename) | |
return filename | |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
return random.randint(0, MAX_SEED) if randomize_seed else seed | |
style_list = [ | |
{ | |
"name": DEFAULT_STYLE_NAME, | |
"prompt": "{prompt}", | |
"negative_prompt": "", | |
}, | |
] | |
styles = {s["name"]: (s["prompt"], s["negative_prompt"]) for s in style_list} | |
STYLE_NAMES = list(styles.keys()) | |
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]: | |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) | |
return p.replace("{prompt}", positive), n + negative | |
if torch.cuda.is_available(): | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
"cutycat2000x/InterDiffusion-4.0", | |
torch_dtype=torch.float16, | |
use_safetensors=True, | |
) | |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) | |
pipe.load_lora_weights("cutycat2000x/LoRA2", weight_name="lora.safetensors", adapter_name="adapt") | |
pipe.set_adapters("adapt") | |
pipe.to("cuda") | |
def generate(prompt, negative_prompt, style, use_negative_prompt, num_inference_steps, | |
num_images_per_prompt, seed, width, height, guidance_scale, randomize_seed, progress=gr.Progress(track_tqdm=True)): | |
seed = randomize_seed_fn(seed, randomize_seed) | |
if not use_negative_prompt: | |
negative_prompt = "" | |
prompt, negative_prompt = apply_style(style, prompt, negative_prompt) | |
result = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
width=width, | |
height=height, | |
guidance_scale=guidance_scale, | |
num_inference_steps=num_inference_steps, | |
num_images_per_prompt=num_images_per_prompt, | |
cross_attention_kwargs={"scale": 0.65}, | |
output_type="pil" | |
) | |
return result.images, seed | |
examples = [ | |
'a smiling girl with sparkles in her eyes, walking in a garden, in the morning --style anime', | |
'firewatch landscape, Graphic Novel, Pastel Art...', | |
'Cat on a tree sitting in between parrots.', | |
'cat, 4k, hyperrealistic, Cinematic, unreal engine 5', | |
'cinematic closeup of burning skull', | |
'frozen elsa', | |
'A rainbow tree, anime style, tree in focus', | |
'A cat holding a sign that reads "Hello World"', | |
'Odette the butterfly goddess wondering in the cosmos' | |
] | |
css = ''' | |
.gradio-container{max-width: 560px !important} | |
h1{text-align:center} | |
footer { visibility: hidden } | |
''' | |
with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo: | |
gr.Markdown(DESCRIPTION) | |
with gr.Group(): | |
with gr.Row(): | |
prompt = gr.Textbox( | |
label="Prompt", placeholder="Enter your prompt", lines=1 | |
) | |
run_button = gr.Button("Run") | |
result = gr.Gallery(label="Result", columns=1, preview=True) | |
with gr.Accordion("Advanced options", open=False): | |
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False) | |
negative_prompt = gr.Textbox(label="Negative prompt", lines=1, visible=True) | |
num_inference_steps = gr.Slider(label="Steps", minimum=10, maximum=60, step=1, value=30) | |
num_images_per_prompt = gr.Slider(label="Images", minimum=1, maximum=5, step=1, value=2) | |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) | |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
width = gr.Slider(label="Width", minimum=512, maximum=2048, step=8, value=1024) | |
height = gr.Slider(label="Height", minimum=512, maximum=2048, step=8, value=1024) | |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=6.0) | |
style_selection = gr.Radio(label="Image Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME) | |
gr.Examples( | |
examples=examples, | |
inputs=prompt, | |
outputs=[result, seed], | |
fn=generate, | |
cache_examples=False | |
) | |
use_negative_prompt.change( | |
lambda x: gr.update(visible=x), | |
inputs=use_negative_prompt, | |
outputs=negative_prompt, | |
) | |
prompt.submit( | |
fn=generate, | |
inputs=[prompt, negative_prompt, style_selection, use_negative_prompt, | |
num_inference_steps, num_images_per_prompt, seed, | |
width, height, guidance_scale, randomize_seed], | |
outputs=[result, seed], | |
) | |
run_button.click( | |
fn=generate, | |
inputs=[prompt, negative_prompt, style_selection, use_negative_prompt, | |
num_inference_steps, num_images_per_prompt, seed, | |
width, height, guidance_scale, randomize_seed], | |
outputs=[result, seed], | |
) | |
if __name__ == "__main__": | |
demo.queue(max_size=20).launch(show_api=False, debug=False) | |