Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import T5ForConditionalGeneration, T5Tokenizer | |
import torch | |
# Load model and tokenizer from Hugging Face | |
def load_model(): | |
model_name = model_name = "pszemraj/flan-t5-large-grammar-synthesis" | |
tokenizer = T5Tokenizer.from_pretrained(model_name) | |
model = T5ForConditionalGeneration.from_pretrained(model_name) | |
return tokenizer, model | |
tokenizer, model = load_model() | |
# Function to generate corrected sentence | |
def correct_sentence(sentence): | |
input_text = "gec: " + sentence | |
input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True) | |
outputs = model.generate(input_ids, max_length=512, num_beams=4, early_stopping=True) | |
corrected = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return corrected | |
# Streamlit UI | |
st.title("π Advanced Grammar Correction Assistant") | |
st.write("Enter a sentence. I'll correct it and explain the changes.") | |
user_input = st.text_area("Your Sentence", height=150) | |
if st.button("Correct & Explain"): | |
if user_input.strip() == "": | |
st.warning("Please enter a sentence.") | |
else: | |
corrected = correct_sentence(user_input) | |
st.markdown("### β Correction:") | |
st.success(corrected) | |
st.markdown("### π Explanation:") | |
st.info(f""" | |
*Original:* {user_input} | |
*Corrected:* {corrected} | |
Here's what changed: | |
- I used an AI model trained to correct grammar and sentence structure. | |
- To give detailed explanations for each mistake (like verb tense, subject-verb agreement, or punctuation), the app can be extended using another model or logic to compare the two sentences line by line. | |
""") | |
st.caption("Model used: vennify/t5-base-grammar-correction") | |