import streamlit as st from transformers import RobertaTokenizer, RobertaModel from your_model_file import MDFEND # Ensure you import your model correctly # Load model and tokenizer @st.cache(allow_output_mutation=True) def load_model(): tokenizer = RobertaTokenizer.from_pretrained("prediction_sinhala.ipynb") model = MDFEND.from_pretrained("prediction_sinhala.ipynb") return model, tokenizer model, tokenizer = load_model() # User input text_input = st.text_area("Enter text here:") # Prediction if st.button("Predict"): inputs = tokenizer(text_input, return_tensors="pt") with torch.no_grad(): # Ensure no gradients are computed outputs = model(**inputs) prediction = outputs.logits.argmax(-1).item() st.write(f"Prediction: {prediction}")