##########################################################
# 0. 환경 설정 및 라이브러리 임포트
##########################################################

import os
import cv2
import numpy as np
import torch
import gradio as gr
import spaces

from glob import glob
from typing import Tuple, Optional

from PIL import Image
from gradio_imageslider import ImageSlider
from torchvision import transforms
import requests
from io import BytesIO
import zipfile
import random

# Transformers
from transformers import (
    AutoConfig,
    AutoModelForImageSegmentation,
)
# Hugging Face Hub
from huggingface_hub import hf_hub_download


##########################################################
# 1. Config 및 from_config() 초기화
##########################################################

# 1) Config만 먼저 로드
config = AutoConfig.from_pretrained(
    "zhengpeng7/BiRefNet",  # 예시
    trust_remote_code=True
)

# 2) config.get_text_config에 더미 메서드 부여 (tie_word_embeddings=False)
def dummy_get_text_config(decoder=True):
    return type("DummyTextConfig", (), {"tie_word_embeddings": False})()

config.get_text_config = dummy_get_text_config

# 3) 모델 구조만 만들기
birefnet = AutoModelForImageSegmentation.from_config(config, trust_remote_code=True)
birefnet.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
birefnet.to(device)
birefnet.half()

##########################################################
# 2. 모델 가중치 다운로드 & 로드
##########################################################

# huggingface_hub에서 safetensors 또는 bin 파일 다운로드
# (repo_id, filename 등은 실제 사용 환경에 맞게 변경)
weights_path = hf_hub_download(
    repo_id="zhengpeng7/BiRefNet",       # 예시
    filename="model.safetensors",        # 또는 "pytorch_model.bin"
    trust_remote_code=True
)
print("Downloaded weights to:", weights_path)

# state_dict 로드
print("Loading BiRefNet weights from HF Hub file:", weights_path)
state_dict = torch.load(weights_path, map_location="cpu")
missing, unexpected = birefnet.load_state_dict(state_dict, strict=False)
print("[Info] Missing keys:", missing)
print("[Info] Unexpected keys:", unexpected)
torch.cuda.empty_cache()


##########################################################
# 3. 이미지 후처리 함수들
##########################################################

def refine_foreground(image, mask, r=90):
    if mask.size != image.size:
        mask = mask.resize(image.size)
    image_np = np.array(image) / 255.0
    mask_np = np.array(mask) / 255.0
    estimated_foreground = FB_blur_fusion_foreground_estimator_2(image_np, mask_np, r=r)
    image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
    return image_masked

def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
    alpha = alpha[:, :, None]
    F, blur_B = FB_blur_fusion_foreground_estimator(image, image, image, alpha, r)
    return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]

def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
    if isinstance(image, Image.Image):
        image = np.array(image) / 255.0
    blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]
    blurred_FA = cv2.blur(F * alpha, (r, r))
    blurred_F = blurred_FA / (blurred_alpha + 1e-5)
    blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
    blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
    F = blurred_F + alpha * (image - alpha * blurred_F - (1 - alpha) * blurred_B)
    F = np.clip(F, 0, 1)
    return F, blurred_B

class ImagePreprocessor():
    def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
        self.transform_image = transforms.Compose([
            transforms.Resize(resolution),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
    def proc(self, image: Image.Image) -> torch.Tensor:
        image = self.transform_image(image)
        return image


##########################################################
# 4. 예제 설정 및 기타
##########################################################

usage_to_weights_file = {
    'General': 'BiRefNet',
    'General-HR': 'BiRefNet_HR',
    'General-Lite': 'BiRefNet_lite',
    'General-Lite-2K': 'BiRefNet_lite-2K',
    'Matting': 'BiRefNet-matting',
    'Portrait': 'BiRefNet-portrait',
    'DIS': 'BiRefNet-DIS5K',
    'HRSOD': 'BiRefNet-HRSOD',
    'COD': 'BiRefNet-COD',
    'DIS-TR_TEs': 'BiRefNet-DIS5K-TR_TEs',
    'General-legacy': 'BiRefNet-legacy'
}

examples_image = [[path, "1024x1024", "General"] for path in glob('examples/*')]
examples_text = [[url, "1024x1024", "General"] for url in [
    "https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg"
]]
examples_batch = [[file, "1024x1024", "General"] for file in glob('examples/*')]

descriptions = (
    "Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n"
    "The resolution used in our training was `1024x1024`, which is suggested for good results! "
    "`2048x2048` is suggested for BiRefNet_HR.\n"
    "Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n"
    "We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access."
)

##########################################################
# 5. 추론 함수 (이미 로드된 birefnet 모델 사용)
##########################################################

@spaces.GPU
def predict(images, resolution, weights_file):
    # weights_file은 여기서는 무시하고, 이미 로드된 birefnet 사용
    assert images is not None, 'Images cannot be None.'

    # Parse resolution
    try:
        w, h = map(int, resolution.strip().split('x'))
        w, h = int(w//32*32), int(h//32*32)
    except:
        w, h = 1024, 1024
    resolution_tuple = (w, h)

    # 리스트인지 확인
    if isinstance(images, list):
        is_batch = True
        outputs, save_paths = [], []
        save_dir = 'preds-BiRefNet'
        os.makedirs(save_dir, exist_ok=True)
    else:
        images = [images]
        is_batch = False

    for idx, image_src in enumerate(images):
        # 파일 경로 혹은 URL
        if isinstance(image_src, str):
            if os.path.isfile(image_src):
                image_ori = Image.open(image_src)
            else:
                resp = requests.get(image_src)
                image_ori = Image.open(BytesIO(resp.content))
        # numpy array → PIL
        elif isinstance(image_src, np.ndarray):
            image_ori = Image.fromarray(image_src)
        else:
            image_ori = image_src.convert('RGB')

        # 전처리
        preproc = ImagePreprocessor(resolution_tuple)
        image_proc = preproc.proc(image_ori.convert('RGB')).unsqueeze(0).to(device).half()

        # 추론
        with torch.inference_mode():
            preds = birefnet(image_proc)[-1].sigmoid().cpu()
        pred_mask = preds[0].squeeze()

        # 후처리
        pred_pil = transforms.ToPILImage()(pred_mask)
        image_masked = refine_foreground(image_ori, pred_pil)
        image_masked.putalpha(pred_pil.resize(image_ori.size))

        if is_batch:
            fbase = (os.path.splitext(os.path.basename(image_src))[0] if isinstance(image_src, str) else f"img_{idx}")
            outpath = os.path.join(save_dir, f"{fbase}.png")
            image_masked.save(outpath)
            save_paths.append(outpath)
            outputs.append(image_masked)
        else:
            outputs = [image_masked, image_ori]

        torch.cuda.empty_cache()

    if is_batch:
        zippath = os.path.join(save_dir, f"{save_dir}.zip")
        with zipfile.ZipFile(zippath, 'w') as zipf:
            for fpath in save_paths:
                zipf.write(fpath, os.path.basename(fpath))
        return outputs, zippath
    else:
        return outputs

##########################################################
# 6. Gradio UI
##########################################################

css = """
body {
    background: linear-gradient(135deg, #667eea, #764ba2);
    font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
    color: #333;
    margin: 0;
    padding: 0;
}
.gradio-container {
    background: rgba(255, 255, 255, 0.95);
    border-radius: 15px;
    padding: 30px 40px;
    box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
    margin: 40px auto;
    max-width: 1200px;
}
.gradio-container h1 {
    color: #333;
    text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.2);
}
.fillable { 
    width: 95% !important; 
    max-width: unset !important;
}
#examples_container {
    margin: auto;
    width: 90%;
}
#examples_row {
    justify-content: center;
}
.sidebar {
    background: rgba(255, 255, 255, 0.98);
    border-radius: 10px;
    padding: 20px;
    box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
}
button, .btn {
    background: linear-gradient(90deg, #ff8a00, #e52e71);
    border: none;
    color: #fff;
    padding: 12px 24px;
    text-transform: uppercase;
    font-weight: bold;
    letter-spacing: 1px;
    border-radius: 5px;
    cursor: pointer;
    transition: transform 0.2s ease-in-out;
}
button:hover, .btn:hover {
    transform: scale(1.05);
}
"""

title_html = """
<h1 align="center" style="margin-bottom: 0.2em;">BiRefNet Demo (No Tie-Weights Crash)</h1>
<p align="center" style="font-size:1.1em; color:#555;">
    Using <code>from_config()</code> + local <code>state_dict</code> or <code>hf_hub_download</code> to bypass tie_weights issues
</p>
"""

with gr.Blocks(css=css, title="BiRefNet Demo") as demo:
    gr.Markdown(title_html)
    with gr.Tabs():
        with gr.Tab("Image"):
            with gr.Row():
                with gr.Column(scale=1):
                    image_input = gr.Image(type='pil', label='Upload an Image')
                    resolution_input = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
                    weights_radio = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
                    predict_btn = gr.Button("Predict")
                with gr.Column(scale=2):
                    output_slider = ImageSlider(label="Result", type="pil")
            gr.Examples(examples=examples_image, inputs=[image_input, resolution_input, weights_radio], label="Examples")

        with gr.Tab("Text"):
            with gr.Row():
                with gr.Column(scale=1):
                    image_url = gr.Textbox(label="Paste an Image URL")
                    resolution_input_text = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
                    weights_radio_text = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
                    predict_btn_text = gr.Button("Predict")
                with gr.Column(scale=2):
                    output_slider_text = ImageSlider(label="Result", type="pil")
            gr.Examples(examples=examples_text, inputs=[image_url, resolution_input_text, weights_radio_text], label="Examples")

        with gr.Tab("Batch"):
            with gr.Row():
                with gr.Column(scale=1):
                    file_input = gr.File(label="Upload Multiple Images", type="filepath", file_count="multiple")
                    resolution_input_batch = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
                    weights_radio_batch = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
                    predict_btn_batch = gr.Button("Predict")
                with gr.Column(scale=2):
                    output_gallery = gr.Gallery(label="Results", scale=1)
                    zip_output = gr.File(label="Zip Download")
            gr.Examples(examples=examples_batch, inputs=[file_input, resolution_input_batch, weights_radio_batch], label="Examples")

    gr.Markdown("<p align='center'>Model by <a href='https://huggingface.co/ZhengPeng7/BiRefNet'>ZhengPeng7/BiRefNet</a></p>")

    # 이벤트 연결
    predict_btn.click(
        fn=predict,
        inputs=[image_input, resolution_input, weights_radio],
        outputs=output_slider
    )
    predict_btn_text.click(
        fn=predict,
        inputs=[image_url, resolution_input_text, weights_radio_text],
        outputs=output_slider_text
    )
    predict_btn_batch.click(
        fn=predict,
        inputs=[file_input, resolution_input_batch, weights_radio_batch],
        outputs=[output_gallery, zip_output]
    )

if __name__ == "__main__":
    demo.launch(share=False, debug=True)