Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -25,40 +25,34 @@ except OSError:
|
|
25 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
26 |
nlp = spacy.load("en_core_web_sm")
|
27 |
|
28 |
-
# Function to
|
29 |
-
def
|
30 |
-
doc = nlp(text)
|
31 |
-
corrected_text = []
|
32 |
-
|
33 |
-
for sent in doc.sents:
|
34 |
-
sentence = []
|
35 |
-
for token in sent:
|
36 |
-
if token.i == sent.start: # First word of the sentence
|
37 |
-
sentence.append(token.text.capitalize())
|
38 |
-
elif token.pos_ == "PROPN": # Proper noun
|
39 |
-
sentence.append(token.text.capitalize())
|
40 |
-
else:
|
41 |
-
sentence.append(token.text)
|
42 |
-
corrected_text.append(' '.join(sentence))
|
43 |
-
|
44 |
-
return ' '.join(corrected_text)
|
45 |
-
|
46 |
-
# Function to get synonyms using NLTK WordNet and keep the same grammatical form
|
47 |
-
def get_synonym(word, pos_tag):
|
48 |
synsets = wordnet.synsets(word)
|
49 |
if not synsets:
|
50 |
return word
|
51 |
-
|
52 |
for synset in synsets:
|
53 |
if synset.pos() == pos_tag: # Match the part of speech
|
54 |
-
synonym = synset.lemmas()[0].name()
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
58 |
else:
|
59 |
-
return synonym
|
|
|
60 |
return word
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
# Function to rephrase text and replace words with their synonyms while maintaining form
|
63 |
def rephrase_with_synonyms(text):
|
64 |
doc = nlp(text)
|
@@ -77,12 +71,7 @@ def rephrase_with_synonyms(text):
|
|
77 |
pos_tag = wordnet.ADV
|
78 |
|
79 |
if pos_tag:
|
80 |
-
synonym = get_synonym(token.text, pos_tag)
|
81 |
-
# Ensure that the verb/noun/plural/singular is kept intact
|
82 |
-
if token.pos_ == "VERB":
|
83 |
-
synonym = token.lemma_ if token.morph.get("Tense") == "Past" else synonym
|
84 |
-
elif token.pos_ == "NOUN" and token.tag_ == "NNS": # Plural nouns
|
85 |
-
synonym += 's' if not synonym.endswith('s') else ""
|
86 |
rephrased_text.append(synonym)
|
87 |
else:
|
88 |
rephrased_text.append(token.text)
|
@@ -103,62 +92,6 @@ def paraphrase_and_correct(text):
|
|
103 |
|
104 |
return paraphrased_text
|
105 |
|
106 |
-
# Function to correct tense errors in a sentence (Tense Correction)
|
107 |
-
def correct_tense_errors(text):
|
108 |
-
doc = nlp(text)
|
109 |
-
corrected_text = []
|
110 |
-
for token in doc:
|
111 |
-
# Check for tense correction based on modal verbs
|
112 |
-
if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
|
113 |
-
# Replace with appropriate verb form
|
114 |
-
lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
|
115 |
-
corrected_text.append(lemma)
|
116 |
-
else:
|
117 |
-
corrected_text.append(token.text)
|
118 |
-
return ' '.join(corrected_text)
|
119 |
-
|
120 |
-
# Function to correct singular/plural errors (Singular/Plural Correction)
|
121 |
-
def correct_singular_plural_errors(text):
|
122 |
-
doc = nlp(text)
|
123 |
-
corrected_text = []
|
124 |
-
|
125 |
-
for token in doc:
|
126 |
-
if token.pos_ == "NOUN":
|
127 |
-
# Check if the noun is singular or plural
|
128 |
-
if token.tag_ == "NN": # Singular noun
|
129 |
-
# Look for determiners like "many", "several", "few" to correct to plural
|
130 |
-
if any(child.text.lower() in ['many', 'several', 'few'] for child in token.head.children):
|
131 |
-
corrected_text.append(token.lemma_ + 's')
|
132 |
-
else:
|
133 |
-
corrected_text.append(token.text)
|
134 |
-
elif token.tag_ == "NNS": # Plural noun
|
135 |
-
# Look for determiners like "a", "one" to correct to singular
|
136 |
-
if any(child.text.lower() in ['a', 'one'] for child in token.head.children):
|
137 |
-
corrected_text.append(token.lemma_)
|
138 |
-
else:
|
139 |
-
corrected_text.append(token.text)
|
140 |
-
else:
|
141 |
-
corrected_text.append(token.text)
|
142 |
-
|
143 |
-
return ' '.join(corrected_text)
|
144 |
-
|
145 |
-
# Function to check and correct article errors
|
146 |
-
def correct_article_errors(text):
|
147 |
-
doc = nlp(text)
|
148 |
-
corrected_text = []
|
149 |
-
for token in doc:
|
150 |
-
if token.text in ['a', 'an']:
|
151 |
-
next_token = token.nbor(1)
|
152 |
-
if token.text == "a" and next_token.text[0].lower() in "aeiou":
|
153 |
-
corrected_text.append("an")
|
154 |
-
elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
|
155 |
-
corrected_text.append("a")
|
156 |
-
else:
|
157 |
-
corrected_text.append(token.text)
|
158 |
-
else:
|
159 |
-
corrected_text.append(token.text)
|
160 |
-
return ' '.join(corrected_text)
|
161 |
-
|
162 |
# Gradio app setup with two tabs
|
163 |
with gr.Blocks() as demo:
|
164 |
with gr.Tab("AI Detection"):
|
|
|
25 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
26 |
nlp = spacy.load("en_core_web_sm")
|
27 |
|
28 |
+
# Function to get synonyms using NLTK WordNet and maintain original verb form
|
29 |
+
def get_synonym(word, pos_tag, original_token):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
synsets = wordnet.synsets(word)
|
31 |
if not synsets:
|
32 |
return word
|
33 |
+
|
34 |
for synset in synsets:
|
35 |
if synset.pos() == pos_tag: # Match the part of speech
|
36 |
+
synonym = synset.lemmas()[0].name()
|
37 |
+
|
38 |
+
# Preserve the original verb form
|
39 |
+
if original_token.tag_ in ["VBG", "VBN"]: # Present or past participle
|
40 |
+
return spacy_token_form(synonym, original_token.tag_)
|
41 |
+
elif original_token.tag_ in ["VBZ"]: # 3rd person singular
|
42 |
+
return synonym + "s"
|
43 |
else:
|
44 |
+
return synonym
|
45 |
+
|
46 |
return word
|
47 |
|
48 |
+
# Function to conjugate the synonym to the correct form based on the original token's tag
|
49 |
+
def spacy_token_form(synonym, tag):
|
50 |
+
if tag == "VBG": # Gerund or present participle
|
51 |
+
return synonym + "ing" if not synonym.endswith("ing") else synonym
|
52 |
+
elif tag == "VBN": # Past participle
|
53 |
+
return synonym + "ed" if not synonym.endswith("ed") else synonym
|
54 |
+
return synonym
|
55 |
+
|
56 |
# Function to rephrase text and replace words with their synonyms while maintaining form
|
57 |
def rephrase_with_synonyms(text):
|
58 |
doc = nlp(text)
|
|
|
71 |
pos_tag = wordnet.ADV
|
72 |
|
73 |
if pos_tag:
|
74 |
+
synonym = get_synonym(token.text, pos_tag, token)
|
|
|
|
|
|
|
|
|
|
|
75 |
rephrased_text.append(synonym)
|
76 |
else:
|
77 |
rephrased_text.append(token.text)
|
|
|
92 |
|
93 |
return paraphrased_text
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
# Gradio app setup with two tabs
|
96 |
with gr.Blocks() as demo:
|
97 |
with gr.Tab("AI Detection"):
|