import gradio as gr from huggingface_hub import HfApi, whoami from all_models import models from config import howManyModelsToUse,num_models,max_images,inference_timeout,MAX_SEED,thePrompt,preSetPrompt,negPreSetPrompt default_models = models[:num_models] import asyncio import os import pandas as pd from datetime import datetime from threading import RLock lock = RLock() HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary. # --- Step 2: Authenticate and fetch your models api = HfApi() user_info = whoami(token=HF_TOKEN) username = user_info["name"] from handle_models import load_fn,infer,gen_fn from externalmod import gr_Interface_load, save_image, randomize_seed from handlemodelradio import extend_choices,update_imgbox,random_choices #anything but huggingface_hub==0.26.2 will result in token error # Get all models owned by the user #models = api.list_models(author=username, token=HF_TOKEN) mymodels = list(api.list_models(author=username, token=HF_TOKEN)) model_ids = [m.modelId for m in mymodels] if not model_ids: raise ValueError(f"No models found for user '{username}'") # --- Step 3: Build Gradio UI def handle_model_selection(selected_models): if not selected_models: return "No models selected." return "✅ Selected models:\n" + "\n".join(selected_models) def get_current_time(): now = datetime.now() current_time = now.strftime("%y-%m-%d %H:%M:%S") return current_time load_fn(models) ''' ''' with gr.Blocks(fill_width=True) as demo: with gr.Row(): gr.Markdown(f"# ({username}) you are logged in") model_selector = gr.CheckboxGroup(choices=model_ids,value=model_ids, label="your models", interactive=True, ) output_box = gr.Textbox(lines=10, label="Selected Models") model_selector.change(fn=handle_model_selection, inputs=model_selector, outputs=output_box) with gr.Tab(str(num_models) + ' Models'): with gr.Column(scale=2): with gr.Group(): txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1) with gr.Accordion("Advanced", open=False, visible=True): with gr.Row(): neg_input = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1) with gr.Row(): width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0) height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0) with gr.Row(): steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0) cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0) seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1) seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary") seed_rand.click(randomize_seed, None, [seed], queue=False) with gr.Row(): gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"]) random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1) with gr.Column(scale=1): with gr.Group(): with gr.Row(): output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"], interactive=False, width=112, height=112, show_share_button=False, format="png", visible=True) for m in default_models] current_models = [gr.Textbox(m, visible=False) for m in default_models] for m, o in zip(current_models, output): gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o], concurrency_limit=None, queue=False) with gr.Column(scale=4): with gr.Accordion('Model selection'): model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True) model_choice.change(update_imgbox, model_choice, output) model_choice.change(extend_choices, model_choice, current_models) random_button.click(random_choices, None, model_choice) demo.launch(show_api=False, max_threads=400) ''' # --- Step 2: Fetch user's Spaces spaces = list(api.list_spaces(author=username, token=HF_TOKEN)) space_df = pd.DataFrame([{"Space Name": f"{space.id.split('/')[-1]}", "Last Modified": space.lastModified,} for space in spaces]) def load_space_files(evt: gr.SelectData): clicked_html = evt.value space_id = clicked_html.split("data-space='")[1].split("'")[0] files = api.list_repo_files(repo_id=space_id, repo_type="space", token=HF_TOKEN) file_df = pd.DataFrame([{ "File": f"{file}" } for file in files]) return file_df # --- Step 4: Build Gradio interface gr.Markdown(f"# Hugging Face Spaces for `{username}`") with gr.Row(): left_df = gr.Dataframe(value=space_df, label="Your Spaces (click a name)", interactive=False, datatype="str", max_rows=len(space_df), wrap=True ) right_df = gr.Dataframe( value=pd.DataFrame(columns=["File"]), label="Files in Selected Space", interactive=False, wrap=True ) left_df.select(fn=load_space_files, outputs=right_df) '''