Spaces:
Running
Running
File size: 10,854 Bytes
0af19e2 47c1984 0af19e2 0ed58ab a058847 3cc7736 a058847 47c1984 a058847 0ed58ab cedfc4e b59fc65 0af19e2 a058847 4a26907 2102432 4a26907 a058847 3cc7736 0af19e2 0ed58ab c6217e4 0ed58ab 0af19e2 cedfc4e e8d215b 0af19e2 c6217e4 54550f2 c6217e4 54550f2 0ed58ab 0af19e2 c6217e4 0af19e2 c6217e4 0af19e2 54550f2 c6217e4 0af19e2 c6217e4 0af19e2 caac55e 3cc7736 a058847 3cc7736 c6217e4 0af19e2 0ed58ab 0af19e2 e8d215b 0af19e2 c6217e4 0ed58ab 0af19e2 c6217e4 0af19e2 c6217e4 0cfd87f 0af19e2 0ed58ab 0af19e2 e8d215b 0af19e2 0ed58ab 0af19e2 c6217e4 0af19e2 c6217e4 0af19e2 b4bc419 0af19e2 c6217e4 cd37c13 0af19e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 |
import gradio as gr
from all_models import models
#from _prompt import thePrompt, howManyModelsToUse
from externalmod import gr_Interface_load, save_image, randomize_seed
import asyncio
import os
from threading import RLock
from datetime import datetime
import gradio as gr
#anything but huggingface_hub==0.26.2 will result in token error
from huggingface_hub import HfApi, whoami
howManyModelsToUse = 20
thePrompt ="group of 3boys kissing in bathtub while interracial daddy gives a boy a handjob"
preSetPrompt = thePrompt
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness, asian, african, collage, composite, combined image"
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
# --- Step 2: Authenticate and fetch your models
api = HfApi()
user_info = whoami(token=HF_TOKEN)
username = user_info["name"]
# Get all models owned by the user
#models = api.list_models(author=username, token=HF_TOKEN)
mymodels = list(api.list_models(author=username, token=HF_TOKEN))
model_ids = [m.modelId for m in mymodels]
if not model_ids:
raise ValueError(f"No models found for user '{username}'")
# --- Step 3: Build Gradio UI
def handle_model_selection(selected_models):
if not selected_models:
return "No models selected."
return "✅ Selected models:\n" + "\n".join(selected_models)
def get_current_time():
now = datetime.now()
current_time = now.strftime("%y-%m-%d %H:%M:%S")
return current_time
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
print(error)
m = gr.Interface(lambda: None, ['text'], ['image'])
models_load.update({model: m})
load_fn(models)
num_models = howManyModelsToUse
max_images = howManyModelsToUse
inference_timeout = 60
default_models = models[:num_models]
MAX_SEED = 2**32-1
def extend_choices(choices):
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
def update_imgbox(choices):
choices_plus = extend_choices(choices[:num_models])
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
def random_choices():
import random
random.seed()
return random.choices(models, k=num_models)
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
kwargs = {}
if height > 0: kwargs["height"] = height
if width > 0: kwargs["width"] = width
if steps > 0: kwargs["num_inference_steps"] = steps
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
if seed == -1:
theSeed = randomize_seed()
else:
theSeed = seed
kwargs["seed"] = theSeed
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(0)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except asyncio.TimeoutError as e:
print(e)
print(f"infer: Task timed out: {model_str}")
if not task.done(): task.cancel()
result = None
raise Exception(f"Task timed out: {model_str}") from e
except Exception as e:
print(e)
print(f"infer: exception: {model_str}")
if not task.done(): task.cancel()
result = None
raise Exception() from e
if task.done() and result is not None and not isinstance(result, tuple):
with lock:
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed)
return image
return None
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
try:
loop = asyncio.new_event_loop()
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
height, width, steps, cfg, seed, inference_timeout))
except (Exception, asyncio.CancelledError) as e:
print(e)
print(f"gen_fn: Task aborted: {model_str}")
result = None
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
finally:
loop.close()
return result
'''
'''
with gr.Blocks(fill_width=True) as demo:
with gr.Row():
gr.Markdown(f"# ({username}) you are logged in")
model_selector = gr.CheckboxGroup(choices=model_ids,value=model_ids, label="your models", interactive=True, )
output_box = gr.Textbox(lines=10, label="Selected Models")
model_selector.change(fn=handle_model_selection, inputs=model_selector, outputs=output_box)
with gr.Tab(str(num_models) + ' Models'):
with gr.Column(scale=2):
with gr.Group():
txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
with gr.Accordion("Advanced", open=False, visible=True):
with gr.Row():
neg_input = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
with gr.Row():
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
with gr.Row():
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
seed_rand.click(randomize_seed, None, [seed], queue=False)
with gr.Row():
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
with gr.Column(scale=1):
with gr.Group():
with gr.Row():
output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"],
interactive=False, width=112, height=112, show_share_button=False, format="png",
visible=True) for m in default_models]
current_models = [gr.Textbox(m, visible=False) for m in default_models]
for m, o in zip(current_models, output):
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
concurrency_limit=None, queue=False)
with gr.Column(scale=4):
with gr.Accordion('Model selection'):
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
model_choice.change(update_imgbox, model_choice, output)
model_choice.change(extend_choices, model_choice, current_models)
random_button.click(random_choices, None, model_choice)
with gr.Tab('Single model'):
with gr.Column(scale=2):
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
with gr.Group():
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
with gr.Accordion("Advanced", open=False, visible=True):
with gr.Row():
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
with gr.Row():
width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
with gr.Row():
steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
with gr.Row():
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"])
with gr.Column(scale=1):
with gr.Group():
with gr.Row():
output2 = [gr.Image(label='', show_download_button=True,
interactive=False, width=112, height=112, visible=True, format="png",
show_share_button=False, show_label=False) for _ in range(max_images)]
for i, o in enumerate(output2):
img_i = gr.Number(i, visible=False)
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
height2, width2, steps2, cfg2, seed2], outputs=[o],
concurrency_limit=None, queue=False)
demo.launch(show_api=False, max_threads=400)
|