Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,21 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from all_models import models
|
3 |
-
#from _prompt import thePrompt, howManyModelsToUse
|
4 |
from externalmod import gr_Interface_load, save_image, randomize_seed
|
5 |
import asyncio
|
6 |
import os
|
7 |
from threading import RLock
|
8 |
from datetime import datetime
|
9 |
-
|
10 |
#anything but huggingface_hub==0.26.2 will result in token error
|
11 |
from huggingface_hub import HfApi, whoami
|
12 |
howManyModelsToUse = 20
|
|
|
|
|
|
|
|
|
|
|
13 |
thePrompt ="group of 3boys kissing in bathtub while interracial daddy gives a boy a handjob"
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
preSetPrompt = thePrompt
|
21 |
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness, asian, african, collage, composite, combined image"
|
22 |
lock = RLock()
|
@@ -29,7 +28,6 @@ username = user_info["name"]
|
|
29 |
#models = api.list_models(author=username, token=HF_TOKEN)
|
30 |
mymodels = list(api.list_models(author=username, token=HF_TOKEN))
|
31 |
model_ids = [m.modelId for m in mymodels]
|
32 |
-
|
33 |
if not model_ids:
|
34 |
raise ValueError(f"No models found for user '{username}'")
|
35 |
# --- Step 3: Build Gradio UI
|
@@ -43,94 +41,8 @@ def get_current_time():
|
|
43 |
now = datetime.now()
|
44 |
current_time = now.strftime("%y-%m-%d %H:%M:%S")
|
45 |
return current_time
|
46 |
-
|
47 |
-
def load_fn(models):
|
48 |
-
global models_load
|
49 |
-
models_load = {}
|
50 |
-
for model in models:
|
51 |
-
if model not in models_load.keys():
|
52 |
-
try:
|
53 |
-
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
|
54 |
-
except Exception as error:
|
55 |
-
print(error)
|
56 |
-
m = gr.Interface(lambda: None, ['text'], ['image'])
|
57 |
-
models_load.update({model: m})
|
58 |
-
|
59 |
-
|
60 |
load_fn(models)
|
61 |
|
62 |
-
num_models = howManyModelsToUse
|
63 |
-
max_images = howManyModelsToUse
|
64 |
-
inference_timeout = 60
|
65 |
-
default_models = models[:num_models]
|
66 |
-
MAX_SEED = 2**32-1
|
67 |
-
|
68 |
-
|
69 |
-
def extend_choices(choices):
|
70 |
-
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
|
71 |
-
|
72 |
-
|
73 |
-
def update_imgbox(choices):
|
74 |
-
choices_plus = extend_choices(choices[:num_models])
|
75 |
-
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
|
76 |
-
|
77 |
-
|
78 |
-
def random_choices():
|
79 |
-
import random
|
80 |
-
random.seed()
|
81 |
-
return random.choices(models, k=num_models)
|
82 |
-
|
83 |
-
|
84 |
-
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
85 |
-
kwargs = {}
|
86 |
-
if height > 0: kwargs["height"] = height
|
87 |
-
if width > 0: kwargs["width"] = width
|
88 |
-
if steps > 0: kwargs["num_inference_steps"] = steps
|
89 |
-
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
90 |
-
|
91 |
-
if seed == -1:
|
92 |
-
theSeed = randomize_seed()
|
93 |
-
else:
|
94 |
-
theSeed = seed
|
95 |
-
kwargs["seed"] = theSeed
|
96 |
-
|
97 |
-
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
98 |
-
await asyncio.sleep(0)
|
99 |
-
try:
|
100 |
-
result = await asyncio.wait_for(task, timeout=timeout)
|
101 |
-
except asyncio.TimeoutError as e:
|
102 |
-
print(e)
|
103 |
-
print(f"infer: Task timed out: {model_str}")
|
104 |
-
if not task.done(): task.cancel()
|
105 |
-
result = None
|
106 |
-
raise Exception(f"Task timed out: {model_str}") from e
|
107 |
-
except Exception as e:
|
108 |
-
print(e)
|
109 |
-
print(f"infer: exception: {model_str}")
|
110 |
-
if not task.done(): task.cancel()
|
111 |
-
result = None
|
112 |
-
raise Exception() from e
|
113 |
-
if task.done() and result is not None and not isinstance(result, tuple):
|
114 |
-
with lock:
|
115 |
-
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
|
116 |
-
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed)
|
117 |
-
return image
|
118 |
-
return None
|
119 |
-
|
120 |
-
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
121 |
-
try:
|
122 |
-
loop = asyncio.new_event_loop()
|
123 |
-
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
|
124 |
-
height, width, steps, cfg, seed, inference_timeout))
|
125 |
-
except (Exception, asyncio.CancelledError) as e:
|
126 |
-
print(e)
|
127 |
-
print(f"gen_fn: Task aborted: {model_str}")
|
128 |
-
result = None
|
129 |
-
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
|
130 |
-
finally:
|
131 |
-
loop.close()
|
132 |
-
return result
|
133 |
-
|
134 |
|
135 |
'''
|
136 |
|
@@ -160,7 +72,6 @@ with gr.Blocks(fill_width=True) as demo:
|
|
160 |
with gr.Row():
|
161 |
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
|
162 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
163 |
-
|
164 |
with gr.Column(scale=1):
|
165 |
with gr.Group():
|
166 |
with gr.Row():
|
@@ -168,56 +79,17 @@ with gr.Blocks(fill_width=True) as demo:
|
|
168 |
interactive=False, width=112, height=112, show_share_button=False, format="png",
|
169 |
visible=True) for m in default_models]
|
170 |
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
171 |
-
|
172 |
for m, o in zip(current_models, output):
|
173 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
174 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
175 |
concurrency_limit=None, queue=False)
|
176 |
-
|
177 |
-
|
178 |
with gr.Column(scale=4):
|
179 |
with gr.Accordion('Model selection'):
|
180 |
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
|
181 |
model_choice.change(update_imgbox, model_choice, output)
|
182 |
model_choice.change(extend_choices, model_choice, current_models)
|
183 |
random_button.click(random_choices, None, model_choice)
|
184 |
-
|
185 |
-
with gr.Tab('Single model'):
|
186 |
-
with gr.Column(scale=2):
|
187 |
-
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
188 |
-
with gr.Group():
|
189 |
-
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
|
190 |
-
with gr.Accordion("Advanced", open=False, visible=True):
|
191 |
-
with gr.Row():
|
192 |
-
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
|
193 |
-
with gr.Row():
|
194 |
-
width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
195 |
-
height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
196 |
-
with gr.Row():
|
197 |
-
steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
|
198 |
-
cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
|
199 |
-
seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
|
200 |
-
seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
|
201 |
-
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
|
202 |
-
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
|
203 |
-
with gr.Row():
|
204 |
-
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"])
|
205 |
-
|
206 |
-
with gr.Column(scale=1):
|
207 |
-
with gr.Group():
|
208 |
-
with gr.Row():
|
209 |
-
output2 = [gr.Image(label='', show_download_button=True,
|
210 |
-
interactive=False, width=112, height=112, visible=True, format="png",
|
211 |
-
show_share_button=False, show_label=False) for _ in range(max_images)]
|
212 |
-
|
213 |
-
for i, o in enumerate(output2):
|
214 |
-
img_i = gr.Number(i, visible=False)
|
215 |
-
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
|
216 |
-
gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
|
217 |
-
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
218 |
-
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
219 |
-
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
220 |
-
concurrency_limit=None, queue=False)
|
221 |
|
222 |
|
223 |
demo.launch(show_api=False, max_threads=400)
|
|
|
1 |
import gradio as gr
|
2 |
+
from handle_models import load_fn,infer,gen_fn
|
3 |
from all_models import models
|
|
|
4 |
from externalmod import gr_Interface_load, save_image, randomize_seed
|
5 |
import asyncio
|
6 |
import os
|
7 |
from threading import RLock
|
8 |
from datetime import datetime
|
9 |
+
from handlemodelradio import extend_choices,update_imgbox,random_choices
|
10 |
#anything but huggingface_hub==0.26.2 will result in token error
|
11 |
from huggingface_hub import HfApi, whoami
|
12 |
howManyModelsToUse = 20
|
13 |
+
num_models = howManyModelsToUse
|
14 |
+
max_images = howManyModelsToUse
|
15 |
+
inference_timeout = 60
|
16 |
+
default_models = models[:num_models]
|
17 |
+
MAX_SEED = 2**32-1
|
18 |
thePrompt ="group of 3boys kissing in bathtub while interracial daddy gives a boy a handjob"
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
preSetPrompt = thePrompt
|
20 |
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness, asian, african, collage, composite, combined image"
|
21 |
lock = RLock()
|
|
|
28 |
#models = api.list_models(author=username, token=HF_TOKEN)
|
29 |
mymodels = list(api.list_models(author=username, token=HF_TOKEN))
|
30 |
model_ids = [m.modelId for m in mymodels]
|
|
|
31 |
if not model_ids:
|
32 |
raise ValueError(f"No models found for user '{username}'")
|
33 |
# --- Step 3: Build Gradio UI
|
|
|
41 |
now = datetime.now()
|
42 |
current_time = now.strftime("%y-%m-%d %H:%M:%S")
|
43 |
return current_time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
load_fn(models)
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
'''
|
48 |
|
|
|
72 |
with gr.Row():
|
73 |
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
|
74 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
|
|
75 |
with gr.Column(scale=1):
|
76 |
with gr.Group():
|
77 |
with gr.Row():
|
|
|
79 |
interactive=False, width=112, height=112, show_share_button=False, format="png",
|
80 |
visible=True) for m in default_models]
|
81 |
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
|
|
82 |
for m, o in zip(current_models, output):
|
83 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
84 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
85 |
concurrency_limit=None, queue=False)
|
|
|
|
|
86 |
with gr.Column(scale=4):
|
87 |
with gr.Accordion('Model selection'):
|
88 |
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
|
89 |
model_choice.change(update_imgbox, model_choice, output)
|
90 |
model_choice.change(extend_choices, model_choice, current_models)
|
91 |
random_button.click(random_choices, None, model_choice)
|
92 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
|
95 |
demo.launch(show_api=False, max_threads=400)
|