// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1'; //HF transformers;
import { Client } from 'https://cdn.jsdelivr.net/npm/@gradio/client/dist/index.min.js' // "@gradio/client";

// skip local model check
env.allowLocalModels = false;

// GLOBAL VARIABLES

var promptArray = []
var PREPROMPT = `Please continue each sentence, filling in [MASK] with your own words:`

var PROMPT_INPUT = `` // a field for writing or changing a text value
var promptField // an html element to hold the prompt
var outText, outPics, outInfo // html elements to hold the results
var blanksArray = [] // an empty list to store all the variables we enter to modify the prompt

// e.g. ["woman", "man", "non-binary person"]

// // RUN IMAGE CAPTIONER //// W-I-P
// async function captionTask(prompt){
//   // PICK MODEL 
//   let MODEL = 'Xenova/vit-gpt2-image-captioning'
//   const pipe = await pipeline("image-to-text", MODEL)

//   const out = await pipe(prompt)

//   out = JSON.stringify(out, null, 2)
// }

// GENERIC API CALL HANDLING
async function post(request) {
  try {
    const response = await fetch(request);
    const result = await response.json();
    console.log("Success:", result);
  } catch (error) {
    console.error("Error:", error);
  }
}

async function textImgTask(input){
  console.log('text-to-image task initiated')

  let MODEL = "multimodalart/FLUX.1-merged"

  let INPUT = input

  const client = await Client.connect(MODEL);
  const result = await client.predict("/infer", { 		
  		prompt: INPUT, 		
  		seed: 0, 		
  		randomize_seed: true, 		
  		width: 256, 		
  		height: 256, 		
  		guidance_scale: 1, 		
  		num_inference_steps: 1, 
  });

  console.log(result.data);

  let OUT = result.data[0]
  
  // const URL = 'https://multimodalart-flux-1-merged.hf.space/call/infer'
  // const seed = 0
  // const randomizeSeed = true
  // const width = 1024
  // const height = 1024
  // const guidaneceScale = 3.5
  // const inferenceSteps = 8
  
  // const options = [ prompt[0], seed, randomizeSeed, width, height, guidaneceScale, inferenceSteps ]

  // const request = new Request(URL,{
  //     method: "POST",
  //     body: JSON.stringify({"data": options }),
  //     headers: { "Content-Type": "application/json" }
  // })

  // let out = post(request)

  // console.log(out)
  // console.log("text-to-image task completed")

  return OUT
}



// RUN TEXT-GEN MODEL

// async function textGenTask(pre, prompt, blanks){
async function textGenTask(pre, prompts){
  console.log('text-gen task initiated')
  
  // Create concatenated prompt array including preprompt and all variable prompts
  // let promptArray = []
  let PROMPTS = pre.concat(prompts) //adds the preprompt to the front of the prompts list
  console.log(PROMPTS)

  // // Fill in blanks from our sample prompt and make new prompts using our variable list 'blanksArray'
  // blanks.forEach(b => {
  //   let p = prompt.replace('[BLANK]', b) // replace the string segment with an item from the blanksArray
  //   promptArray.push(p) // add the new prompt to the list we created
  // })

  // create combined fill prompt
  let INPUT = PROMPTS.toString()
  console.log(INPUT)
  // let INPUT = pre + prompt // simple concatenated input
  // let INPUT = prompt // basic prompt input

  // PICK MODEL 
  let MODEL = 'Xenova/flan-alpaca-large'

  // MODELS LIST
  // - Xenova/bloom-560m
  // - Xenova/distilgpt2
  // - Xenova/LaMini-Cerebras-256M
  // - Xenova/gpt-neo-125M // not working well
  // - Xenova/llama2.c-stories15M // only fairytails
  // - webml/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/flan-alpaca-large //text2text

  
  // const pipe = await pipeline('text-generation', MODEL) //different task type, also for text generation
  const pipe = await pipeline('text2text-generation', MODEL)

  var hyperparameters = { max_new_tokens: 300, top_k: 30, repetition_penalty: 1.5 }
    // setting hyperparameters
    // max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2, num_return_sequences: 2 (must be 1?)

  // change model run to iterative for each prompt generated locally — will be more expensive??
  // promptArray.forEach(async i => {} //this was a loop to wrap model run multiple times
  
  // RUN INPUT THROUGH MODEL, 
  var out = await pipe(INPUT, hyperparameters)

  console.log(await out)
  console.log('text-gen task completed')
  
  // PARSE RESULTS as a list of outputs, two different ways depending on the model
  
  // parsing of output
  // await out.forEach(o => {
  //   console.log(o)
  //   OUTPUT_LIST.push(o.generated_text)
  // })
  
  // alternate format for parsing, for chat model type
  // await out.choices.forEach(o => {
  //   console.log(o)
  //   OUTPUT_LIST.push(o.message.content)
  // })

  let OUTPUT_LIST = out[0].generated_text //not a list anymore just one result
  // OUTPUT_LIST.push(out[0].generated_text)

  console.log(OUTPUT_LIST)
  console.log('text-gen parsing complete')

  return await OUTPUT_LIST
  // return await out
}

// RUN FILL-IN MODEL
async function fillInTask(input){
  console.log('fill-in task initiated')

  // MODELS LIST
  // - Xenova/bert-base-uncased

  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
  
  var out = await pipe(input);

  console.log(await out) // yields { score, sequence, token, token_str } for each result

  let OUTPUT_LIST = [] // a blank array to store the results from the model

  // parsing of output
  await out.forEach(o => {
    console.log(o) // yields { score, sequence, token, token_str } for each result
    OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
  })
  
  console.log(await OUTPUT_LIST)
  console.log('fill-in task completed')

  // return await out
  return await OUTPUT_LIST
}

//// p5.js Instance

new p5(function (p5){
  p5.setup = function(){
      p5.noCanvas()
      console.log('p5 instance loaded')
      makeTextModules()
      makeInputModule()
      makeOutputModule()
    }

  function makeTextModules(){
    const introDiv = p5.createDiv().class('module').id('intro')
    p5.createElement('h1','p5.js Critical AI Prompt Battle').parent(introDiv)
    p5.createP(`What do AI models really 'know' about you — about your community, your language, your culture? What do they 'know' about different concepts, ideas, and worldviews?`).parent(introDiv)
    p5.createP(`This tool lets you compare the results of multiple AI-generated texts and images side-by-side, using blanks you fill in to explore variations on a single prompt. For more info on prompt programming and critical AI, see <A href="">[TUTORIAL-LINK]</a>.`).parent(introDiv)

    const instructDiv = p5.createDiv().id('instructions').parent(introDiv)
    p5.createElement('h4', 'INSTRUCTIONS').class('header').parent(introDiv)
    p5.createP(`Write your prompt using [BLANK] and [MASK], where [BLANK] will be the variation you choose and fill in below, and [MASK] is a variation that the model will complete.`).parent(introDiv)
    p5.createP(`For best results, try to phrase your prompt so that [BLANK] and [MASK] highlight the qualities you want to investigate. See <A href="">[EXAMPLES]</a>`).parent(introDiv)
  }

  function makeInputModule(){
    const inputDiv = p5.createDiv().class('module', 'main').id('inputDiv')
    p5.createElement('h4', 'INPUT').parent(inputDiv)
    p5.createElement('h3', 'Enter your prompt').class('header').parent(inputDiv)
    p5.createP(`Write your prompt in the box below using one [BLANK] and one [MASK]`).parent(inputDiv)
    p5.createP(`e.g. Write "The [BLANK] was a [MASK]." and in the three blanks choose three occupations`).parent(inputDiv)
    p5.createP(`(This is taken from an actual example used to test GPT-3. (Brown et al. 2020, §6.2.1).)`).class('caption').parent(inputDiv)
    promptField = p5.createInput(PROMPT_INPUT).parent(inputDiv) // turns the string into an input; now access the text via PROMPT_INPUT.value()
    promptField.size(700)
    p5.createP(promptField.attribute('label')).parent(inputDiv)
    promptField.addClass("prompt")

    
    p5.createElement('h3', 'Fill in your blanks').class('header').parent(inputDiv)
    p5.createP('Add three words or phrases in the boxes below that will replace the [BLANK] in your prompt when the model runs.').parent(inputDiv)
    p5.createP('(e.g. doctor, secretary, circus performer)').parent(inputDiv)
    
    addField()
    addField()
    addField()
    
    // press to run model
    const submitButton = p5.createButton("RUN PROMPT")
    submitButton.size(170)
    submitButton.class('button').parent(inputDiv)
    submitButton.mousePressed(displayOutput)
 
  }

  function addField(){
        let f = p5.createInput("").parent(inputDiv)
        f.class("blank")
        blanksArray.push(f)
        console.log("made variable field")
        // // Cap the number to avoids token limit
        // let blanks = document.querySelectorAll(".blank")
        // if (blanks.length > 3){
        //     console.log(blanks.length)
        //     addButton.style('visibility','hidden')
        // }
      }

  // function makeButtons(){
  //   // // press to add more blanks to fill in
  //   // const addButton = p5.createButton("more blanks")
  //   // addButton.size(170)
  //   // // addButton.position(220,500)
  //   // addButton.mousePressed(addField)
  // }

  function makeOutputModule(){
    const outputDiv = p5.createDiv().class('module').id('outputDiv')
    const outHeader = p5.createElement('h4',"OUTPUT").parent(outputDiv)

    // // make output placeholders
    // text-only output
    p5.createElement('h3', 'Text output').parent(outputDiv)
    outText = p5.createP('').id('outText').parent(outputDiv)

    // placeholder DIV for images and captions
    p5.createElement('h3', 'Text-to-image output').parent(outputDiv)
    outPics = p5.createDiv().id('outPics').parent(outputDiv)
        

    // print info about model, prompt, and hyperparams
    p5.createElement('h3', 'Prompting info').parent(outputDiv)    
    outInfo = p5.createP('').id('outInfo').parent(outputDiv)
  }

  async function displayOutput(){
    console.log('submitButton pressed')

    // insert waiting dots into results space of interface
    outText.html('...', false)

    // GRAB CURRENT FIELD INPUTS FROM PROMPT & BLANKS
    PROMPT_INPUT = promptField.value() // grab update to the prompt if it's been changed
    console.log("latest prompt: ", PROMPT_INPUT)
    console.log(blanksArray)

    // create a list of the values in the blanks fields
    let blanksValues = []
    blanksArray.forEach(b => {
      blanksValues.push(b.value())
    })
    console.log(blanksValues)

    // Fill in blanks from our sample prompt and make new prompts list using our variable list 'blanksValues'
    blanksValues.forEach(b => {
      let p = PROMPT_INPUT.replace('[BLANK]', b) // replace the string segment with an item from the blanksValues
      promptArray.push(p) // add the new prompts to the prompt list
    })
    console.log(promptArray)
    
    // call the function that runs the model for the task of your choice here 
    // make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task
    // let outs = await textGenTask(PREPROMPT, PROMPT_INPUT, blanksValues)
    
    let outs = await textGenTask(PREPROMPT, promptArray)
    console.log(outs)

    // insert the model outputs into the paragraph
    await outText.html(outs, false) // false valuereplaces text, true appends text

    let outPic = await textImgTask(promptArray)
    console.log(outPic[1])
    p5.createImage(outPic).parent('#outputDiv')
  }

  p5.draw = function(){
      // 
  }
});