sashtech commited on
Commit
a4c0f0e
·
verified ·
1 Parent(s): da0253a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -72
app.py CHANGED
@@ -17,9 +17,6 @@ def predict_en(text):
17
  # Ensure necessary NLTK data is downloaded for Humanifier
18
  nltk.download('wordnet')
19
  nltk.download('omw-1.4')
20
- nltk.download('punkt')
21
- nltk.download('averaged_perceptron_tagger')
22
-
23
 
24
  # Ensure the SpaCy model is installed for Humanifier
25
  try:
@@ -28,59 +25,6 @@ except OSError:
28
  subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
29
  nlp = spacy.load("en_core_web_sm")
30
 
31
- # Grammar, Tense, and Singular/Plural Correction Functions
32
-
33
- # Correct article errors (e.g., "a apple" -> "an apple")
34
- def check_article_error(text):
35
- tokens = nltk.pos_tag(nltk.word_tokenize(text))
36
- corrected_tokens = []
37
-
38
- for i, token in enumerate(tokens):
39
- word, pos = token
40
- if word.lower() == 'a' and i < len(tokens) - 1 and tokens[i + 1][1] == 'NN':
41
- corrected_tokens.append('an' if tokens[i + 1][0][0] in 'aeiou' else 'a')
42
- else:
43
- corrected_tokens.append(word)
44
-
45
- return ' '.join(corrected_tokens)
46
-
47
- # Correct tense errors (e.g., "She has go out" -> "She has gone out")
48
- def check_tense_error(text):
49
- tokens = nltk.pos_tag(nltk.word_tokenize(text))
50
- corrected_tokens = []
51
-
52
- for word, pos in tokens:
53
- if word == "go" and pos == "VB":
54
- corrected_tokens.append("gone")
55
- elif word == "know" and pos == "VB":
56
- corrected_tokens.append("known")
57
- else:
58
- corrected_tokens.append(word)
59
-
60
- return ' '.join(corrected_tokens)
61
-
62
- # Correct singular/plural errors (e.g., "There are many chocolate" -> "There are many chocolates")
63
- def check_pluralization_error(text):
64
- tokens = nltk.pos_tag(nltk.word_tokenize(text))
65
- corrected_tokens = []
66
-
67
- for word, pos in tokens:
68
- if word == "chocolate" and pos == "NN":
69
- corrected_tokens.append("chocolates")
70
- elif word == "kids" and pos == "NNS":
71
- corrected_tokens.append("kid")
72
- else:
73
- corrected_tokens.append(word)
74
-
75
- return ' '.join(corrected_tokens)
76
-
77
- # Combined function to correct grammar, tense, and singular/plural errors
78
- def correct_grammar_tense_plural(text):
79
- text = check_article_error(text)
80
- text = check_tense_error(text)
81
- text = check_pluralization_error(text)
82
- return text
83
-
84
  # Function to get synonyms using NLTK WordNet (Humanifier)
85
  def get_synonyms_nltk(word, pos):
86
  synsets = wordnet.synsets(word, pos=pos)
@@ -140,20 +84,17 @@ def paraphrase_with_spacy_nltk(text):
140
 
141
  return corrected_text
142
 
143
- # Combined function: Paraphrase -> Capitalization -> Grammar Correction
144
  def paraphrase_and_correct(text):
145
  # Step 1: Paraphrase the text
146
  paraphrased_text = paraphrase_with_spacy_nltk(text)
147
 
148
  # Step 2: Capitalize sentences and proper nouns
149
- capitalized_text = capitalize_sentences_and_nouns(paraphrased_text)
150
-
151
- # Step 3: Correct grammar, tense, and pluralization
152
- final_text = correct_grammar_tense_plural(capitalized_text)
153
 
154
  return final_text
155
 
156
- # Gradio app setup with three tabs
157
  with gr.Blocks() as demo:
158
  with gr.Tab("AI Detection"):
159
  t1 = gr.Textbox(lines=5, label='Text')
@@ -171,14 +112,6 @@ with gr.Blocks() as demo:
171
 
172
  # Connect the paraphrasing function to the button
173
  paraphrase_button.click(paraphrase_and_correct, inputs=text_input, outputs=output_text)
174
-
175
- with gr.Tab("Grammar Correction"):
176
- grammar_input = gr.Textbox(lines=5, label="Input Text")
177
- grammar_button = gr.Button("Correct Grammar")
178
- grammar_output = gr.Textbox(label="Corrected Text")
179
-
180
- # Connect the custom grammar, tense, and plural correction function to the button
181
- grammar_button.click(correct_grammar_tense_plural, inputs=grammar_input, outputs=grammar_output)
182
 
183
- # Launch the app with all functionalities
184
- demo.launch(share=True) # Enables public link sharing and could bypass local threading issues
 
17
  # Ensure necessary NLTK data is downloaded for Humanifier
18
  nltk.download('wordnet')
19
  nltk.download('omw-1.4')
 
 
 
20
 
21
  # Ensure the SpaCy model is installed for Humanifier
22
  try:
 
25
  subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
26
  nlp = spacy.load("en_core_web_sm")
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  # Function to get synonyms using NLTK WordNet (Humanifier)
29
  def get_synonyms_nltk(word, pos):
30
  synsets = wordnet.synsets(word, pos=pos)
 
84
 
85
  return corrected_text
86
 
87
+ # Combined function: Paraphrase -> Capitalization (Humanifier)
88
  def paraphrase_and_correct(text):
89
  # Step 1: Paraphrase the text
90
  paraphrased_text = paraphrase_with_spacy_nltk(text)
91
 
92
  # Step 2: Capitalize sentences and proper nouns
93
+ final_text = capitalize_sentences_and_nouns(paraphrased_text)
 
 
 
94
 
95
  return final_text
96
 
97
+ # Gradio app setup with two tabs
98
  with gr.Blocks() as demo:
99
  with gr.Tab("AI Detection"):
100
  t1 = gr.Textbox(lines=5, label='Text')
 
112
 
113
  # Connect the paraphrasing function to the button
114
  paraphrase_button.click(paraphrase_and_correct, inputs=text_input, outputs=output_text)
 
 
 
 
 
 
 
 
115
 
116
+ # Launch the app with the remaining functionalities
117
+ demo.launch()