# Added more redundant/filler words def remove_redundant_words(text): doc = nlp(text) meaningless_words = {"actually", "basically", "literally", "really", "very", "just", "quite", "rather", "simply", "that", "kind of", "sort of", "you know", "honestly", "seriously"} filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words] return ' '.join(filtered_text) # Capitalize sentences and proper nouns def capitalize_sentences_and_nouns(text): doc = nlp(text) corrected_text = [] for sent in doc.sents: sentence = [] for token in sent: if token.i == sent.start or token.pos_ == "PROPN": sentence.append(token.text.capitalize()) else: sentence.append(token.text.lower()) corrected_text.append(' '.join(sentence)) return ' '.join(corrected_text) # Function to dynamically correct tenses and verb forms def correct_tense_errors(text): doc = nlp(text) corrected_text = [] for token in doc: if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}: lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text corrected_text.append(lemma) else: corrected_text.append(token.text) return ' '.join(corrected_text) # Enhanced function to handle subject-verb agreement def ensure_subject_verb_agreement(text): doc = nlp(text) corrected_text = [] for token in doc: if token.dep_ == "nsubj" and token.head.pos_ == "VERB": if token.tag_ == "NN" and token.head.tag_ != "VBZ": corrected_text.append(token.head.lemma_ + "s") elif token.tag_ == "NNS" and token.head.tag_ == "VBZ": corrected_text.append(token.head.lemma_) else: corrected_text.append(token.head.text) else: corrected_text.append(token.text) return ' '.join(corrected_text) # Ensure proper apostrophe usage and possessives def correct_apostrophes(text): text = re.sub(r"\b(\w+)s\b(?