import os
import random
import numpy as np
import tensorflow as tf
from PIL import Image

import gradio as gr
from huggingface_hub import from_pretrained_keras

model = from_pretrained_keras("keras-io/GauGAN-Image-generation")


def predict(image_file, segmentation_png, bitmap_img):

    image_list = [segmentation_png, image_file, bitmap_img]

    image = tf.image.decode_png(tf.io.read_file(image_list[1]), channels=3)
    image = tf.cast(image, tf.float32) / 127.5 - 1

    segmentation_file = tf.image.decode_png(tf.io.read_file(image_list[0]), channels=3)
    segmentation_file = tf.cast(segmentation_file, tf.float32) / 127.5 - 1

    label_file = tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=0)
    # label_file = tf.image.rgb_to_grayscale(tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=3))
    # print("after decode_bmp --> ", label_file.shape, type(label_file))
    label_file = tf.squeeze(label_file)

    image_list = [segmentation_file, image, label_file]

    crop_size = tf.convert_to_tensor((256, 256))

    image_shape = tf.shape(image_list[1])[:2]

    margins = image_shape - crop_size

    y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
    x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
    y2 = y1 + crop_size[0]
    x2 = x1 + crop_size[1]

    cropped_images = []
    for img in image_list:
        cropped_images.append(img[y1:y2, x1:x2])

    final_img_list = [
        tf.expand_dims(cropped_images[0], axis=0),
        tf.expand_dims(cropped_images[1], axis=0),
        tf.expand_dims(tf.one_hot(cropped_images[2], 12), axis=0),
    ]

    # print(final_img_list[0].shape)
    # print(final_img_list[1].shape)
    # print(final_img_list[2].shape)

    latent_vector = tf.random.normal(shape=(1, 256), mean=0.0, stddev=2.0)

    # Generate fake images
    fake_image = model.predict([latent_vector, final_img_list[2]])
    fake_img = tf.squeeze(fake_image, axis=0)

    return np.array((fake_img + 1) / 2)


# Define inputs with modern Gradio syntax
ground_truth = gr.Image(type="filepath", label="Ground Truth - Real Image (jpg)")
segmentation = gr.Image(type="filepath", label="Corresponding Segmentation (png)")
bitmap = gr.Image(
    type="filepath", label="Corresponding bitmap image (bmp)", image_mode="L"
)

examples = [
    [
        "facades_data/cmp_b0010.jpg",
        "facades_data/cmp_b0010.png",
        "facades_data/cmp_b0010.bmp",
    ],
    [
        "facades_data/cmp_b0020.jpg",
        "facades_data/cmp_b0020.png",
        "facades_data/cmp_b0020.bmp",
    ],
    [
        "facades_data/cmp_b0030.jpg",
        "facades_data/cmp_b0030.png",
        "facades_data/cmp_b0030.bmp",
    ],
    [
        "facades_data/cmp_b0040.jpg",
        "facades_data/cmp_b0040.png",
        "facades_data/cmp_b0040.bmp",
    ],
    [
        "facades_data/cmp_b0050.jpg",
        "facades_data/cmp_b0050.png",
        "facades_data/cmp_b0050.bmp",
    ],
]

title = "GauGAN For Conditional Image Generation"
description = "Upload an Image or take one from examples to generate realistic images that are conditioned on cue images and segmentation maps"

# Create interface with modern Gradio syntax
demo = gr.Interface(
    fn=predict,
    inputs=[ground_truth, segmentation, bitmap],
    outputs=gr.Image(type="numpy", label="Generated - Conditioned Images"),
    examples=examples,
    flagging_mode="never",
    analytics_enabled=False,
    title=title,
    description=description,
    article="<center>Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/generative/gaugan/'><b>this notebook</b></a></center>",
)

if __name__ == "__main__":
    demo.queue()
    demo.launch(debug=True)