File size: 2,889 Bytes
0af19e2
9133955
26f075f
 
 
 
 
 
 
d36ccaa
26f075f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746d932
26f075f
 
 
 
746d932
26f075f
 
 
 
 
746d932
26f075f
 
746d932
26f075f
 
 
 
 
 
de2e501
26f075f
 
 
 
 
 
 
 
9133955
26f075f
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import os
from huggingface_hub import InferenceClient, list_models
from diffusers import StableDiffusionXLPipeline
import torch
from PIL import Image
import traceback

# Load token from env
HF_TOKEN = os.getenv("ohgoddamn")
USE_LOCAL = False  # default mode
client_cache = {}

# Your models (replace with yours)
all_models = [
    "stabilityai/stable-diffusion-xl-base-1.0",
    "runwayml/stable-diffusion-v1-5",
    "Uthar/John6666_epicrealism-xl-v8kiss-sdxl"
]

# Local model loading (simplified, for demo)
def load_local_pipeline(model_id):
    pipe = StableDiffusionXLPipeline.from_pretrained(
        model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
    )
    return pipe.to("cuda" if torch.cuda.is_available() else "cpu")

# Main generation logic
def generate(model_id, prompt, use_local):
    global client_cache
    debug_log = ""
    try:
        if use_local:
            debug_log += f"🔧 Using local pipeline for: {model_id}\n"
            pipe = load_local_pipeline(model_id)
            image = pipe(prompt).images[0]
        else:
            debug_log += f"🌐 Using InferenceClient for: {model_id}\n"
            if model_id not in client_cache:
                client_cache[model_id] = InferenceClient(model=model_id, token=HF_TOKEN)
            image = client_cache[model_id].text_to_image(prompt)
        return image, debug_log + "\n✅ Success."
    except Exception as e:
        error_msg = traceback.format_exc()
        return None, debug_log + f"\n❌ Error:\n{error_msg}"

# Model API self-check
def check_model_access(models):
    results = ""
    for model in models:
        try:
            client = InferenceClient(model=model, token=HF_TOKEN)
            _ = client.text_to_image("test prompt")
            results += f"✅ {model} is working.\n"
        except Exception as e:
            results += f"❌ {model} failed: {str(e).splitlines()[0]}\n"
    return results

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# 🧪 Stable Diffusion API Tester")
    with gr.Row():
        model = gr.Dropdown(choices=all_models, label="Model", value=all_models[0])
        use_local = gr.Checkbox(label="Use Local Diffusers Instead of API", value=USE_LOCAL)
    prompt = gr.Textbox(label="Prompt", value="a cyberpunk cat playing guitar in Tokyo")
    generate_btn = gr.Button("Generate")
    image_out = gr.Image(label="Generated Image")
    debug_out = gr.Textbox(label="Debug Output", lines=10)
    with gr.Accordion("Self-Check: API Model Access", open=False):
        check_btn = gr.Button("Check All Models")
        check_results = gr.Textbox(label="Model API Status", lines=10)

    generate_btn.click(generate, inputs=[model, prompt, use_local], outputs=[image_out, debug_out])
    check_btn.click(check_model_access, inputs=[gr.State(all_models)], outputs=[check_results])

demo.launch()