import numpy as np
import gradio as gr

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline, T5Tokenizer, T5Model, BertTokenizer, BertModel, T5ForConditionalGeneration,  AutoTokenizer, AutoModelForSeq2SeqLM

# 1. GENERATE SUMMARY
tokenizer = AutoTokenizer.from_pretrained("suriya7/bart-finetuned-text-summarization")
model = AutoModelForSeq2SeqLM.from_pretrained("suriya7/bart-finetuned-text-summarization")

def generate_summary(text):
    print(text)
    inputs = tokenizer([text], max_length=1024, return_tensors='pt', truncation=True)
    summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False)
    summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary



# 2. TRANSLATE FUNCTION
t5_tokenizer = T5Tokenizer.from_pretrained('t5-small')
t5_model = T5ForConditionalGeneration.from_pretrained('t5-small')

def translate_text(text_to_translate, original_language, destination_language):
    input_text = "translate "+original_language+" to "+destination_language+": "+text_to_translate

    input_ids = t5_tokenizer.encode(input_text, return_tensors='pt')

    outputs = t5_model.generate(input_ids)
    output_text = t5_tokenizer.decode(outputs[0], skip_special_tokens=True)

    return(output_text)



# 4. QUESTION ANSWERING FUNCTION
def question_answering(question,context):
    qa_model = pipeline("question-answering", "timpal0l/mdeberta-v3-base-squad2")
    question = question
    context = context
    solution = qa_model(question = question, context = context)
    return solution['answer']



# 5. PARAPHRASING FUNCTION
paraphrasing_tokenizer = AutoTokenizer.from_pretrained("vngrs-ai/VBART-Large-Paraphrasing", model_input_names=['input_ids', 'attention_mask'])
paraphrasing_model = AutoModelForSeq2SeqLM.from_pretrained("vngrs-ai/VBART-Large-Paraphrasing")

def paraphrasing(text):
    input_text= text

    token_input = tokenizer(input_text, return_tensors="pt")#.to('cuda')
    outputs = model.generate(**token_input)
    return(tokenizer.decode(outputs[0]))




 
with gr.Blocks() as demo:
    gr.Markdown("My AI interface")
    with gr.Tab("Single models"):
        # 1. GENERATE SUMMARY
        with gr.Accordion("Text summarization"):
            gr.Markdown("Single model summarization using BART model")
            text_to_summarize = gr.Textbox(label="Text to summarize")
            summary_output = gr.Textbox(label="Summary")
            summarize_btn = gr.Button("Summarize")


        # 2. TRANSLATE FUNCTION
        with gr.Accordion("Text translation", open=False):
            gr.Markdown("Single model translation using GOOGLE T5 Base model")
            text_to_translate = gr.Textbox(label="Text to translate")
            original_language = gr.Textbox(label="Original language (Write in full form e.g. english)")
            destination_language = gr.Textbox(label="Destination language (Write in full form e.g. deutsch)")
            translate_output = gr.Textbox(label="Translation")
            translate_btn = gr.Button("Translate")


        # 3. ..
        with gr.Accordion("Scentence fill mask", open=False):
            gr.Markdown("Single model translation using GOOGLE T5 Base model")
            scentence_To_fill = gr.Textbox(label="Text to translate")
            filled_scentence = gr.Textbox(label="Translation")
            fill_button = gr.Button("Fill scentence")


        # 4. QUESTION ANSWERING 
        with gr.Accordion("Question answering", open=False):
            gr.Markdown("Single model question answering using GOOGLE mdeberta model")
            question = gr.Textbox(label="Question")
            context = gr.Textbox(label="Context for question")
            answer = gr.Textbox(label="Answer to question")
            ask_question_button = gr.Button("Ask question")


        # 5. PARAPHRASING
        with gr.Accordion("Paraphrasing", open=False):
            gr.Markdown("Single model paraphrasing using the VBART model")
            scentence_to_rephrase = gr.Textbox(label="Text to rephrase")
            rephrased_scentence = gr.Textbox(label="Rephrased scentence")
            paraphrase_button = gr.Button("Rephrase scentence")


    with gr.Tab("Multi models"):
        with gr.Row():
            print("No multi models yet..")
 

    # Button listeners
    summarize_btn.click(generate_summary, inputs=text_to_summarize, outputs=summary_output) # 1. GENERATE SUMMARY
    translate_btn.click(translate_text, inputs=[text_to_translate, original_language, destination_language], outputs=translate_output) # 2. TRANSLATE FUNCTION

    ask_question_button.click(question_answering, inputs=[question,context], outputs=answer) # 4. QUESTION ANSWERING
    paraphrase_button.click(paraphrasing, inputs=scentence_to_rephrase, outputs=rephrased_scentence) # 5. PARAPHRASING


 
demo.launch()