import gradio as gr import torch from transformers import T5Tokenizer, T5ForConditionalGeneration # Load T5 model and tokenizer model_name = "t5-base" # Use a smaller model for faster inference tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) # Use GPU if available device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # Grammar correction function def correct_grammar(text): input_text = f"correct: {text}" input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device) # Generate corrected text output_ids = model.generate(input_ids, max_length=512, num_beams=5, early_stopping=True) corrected_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) return corrected_text # Gradio interface function def correct_grammar_interface(text): corrected_text = correct_grammar(text) return corrected_text # Gradio interface with gr.Blocks() as grammar_app: gr.Markdown("