from transformers import pipeline
import tensorflow
import torch


def llm_predict(input_text, model_name):
    pipe = pipeline("text2text-generation", model=model_name)
    generated_text = pipe(input_text, max_new_tokens=1000)
    return generated_text[0]['generated_text']