from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch # Load model and tokenizer model_name = "hassaanik/grammar-correction-model" tokenizer = AutoTokenizer.from_pretrained(model_name) # Use GPU if available, otherwise fall back to CPU device = "cuda" if torch.cuda.is_available() else "cpu" model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) # Use FP16 for faster inference on GPU if torch.cuda.is_available(): model.half() # Function to correct grammar for a single text input def correct_grammar(text): # Tokenize input and move it to the correct device (CPU/GPU) inputs = tokenizer.encode(text, return_tensors="pt", max_length=512, truncation=True).to(device) # Generate corrected output with beam search outputs = model.generate(inputs, max_length=512, num_beams=5, early_stopping=True) # Decode output and return corrected text corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return corrected_text # Example usage of the grammar correction function if __name__ == "__main__": sample_text = "He go to the market yesturday." corrected_text = correct_grammar(sample_text) print("Original Text:", sample_text) print("Corrected Text:", corrected_text)