File size: 4,380 Bytes
8911544
 
 
 
 
 
 
 
 
 
 
 
 
a3f6cfb
8911544
 
 
 
 
 
 
 
a3f6cfb
8911544
 
a3f6cfb
8911544
 
 
a3f6cfb
8911544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a30dc01
8911544
 
 
 
 
 
 
a30dc01
8911544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fff3d6
8911544
 
 
 
 
 
a30dc01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48d050c
a30dc01
48d050c
a30dc01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from spellchecker import SpellChecker
import re
import string
import random

# Download necessary NLTK data
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('averaged_perceptron_tagger_eng')
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('punkt_tab')

# Initialize stopwords
stop_words = set(stopwords.words("english"))

# Words we don't want to replace
exclude_tags = {'PRP', 'PRP$', 'MD', 'VBZ', 'VBP', 'VBD', 'VBG', 'VBN', 'TO', 'IN', 'DT', 'CC'}
exclude_words = {'is', 'am', 'are', 'was', 'were', 'have', 'has', 'do', 'does', 'did', 'will', 'shall', 'should', 'would', 'could', 'can', 'may', 'might'}

# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")

# Initialize the spell checker
spell = SpellChecker()

# Ensure the SpaCy model is installed
try:
    nlp = spacy.load("en_core_web_sm")
except OSError:
    subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
    nlp = spacy.load("en_core_web_sm")

def plagiarism_removal(text):
    def plagiarism_remover(word):
        if word.lower() in stop_words or word.lower() in exclude_words or word in string.punctuation:
            return word
        
        # Find synonyms
        synonyms = set()
        for syn in wordnet.synsets(word):
            for lemma in syn.lemmas():
                if "_" not in lemma.name() and lemma.name().isalpha() and lemma.name().lower() != word.lower():
                    synonyms.add(lemma.name())

        pos_tag_word = nltk.pos_tag([word])[0]

        if pos_tag_word[1] in exclude_tags:
            return word
        
        filtered_synonyms = [syn for syn in synonyms if nltk.pos_tag([syn])[0][1] == pos_tag_word[1]]

        if not filtered_synonyms:
            return word

        synonym_choice = random.choice(filtered_synonyms)

        if word.istitle():
            return synonym_choice.title()
        return synonym_choice

    para_split = word_tokenize(text)
    final_text = [plagiarism_remover(word) for word in para_split]
    
    corrected_text = []
    for i in range(len(final_text)):
        if final_text[i] in string.punctuation and i > 0:
            corrected_text[-1] += final_text[i]  
        else:
            corrected_text.append(final_text[i])

    return " ".join(corrected_text)

def paraphrase_and_correct(text):
    paragraphs = text.split("\n\n")  # Split by paragraphs

    # Process each paragraph separately
    processed_paragraphs = []
    for paragraph in paragraphs:
        cleaned_text = remove_redundant_words(paragraph)
        plag_removed = plagiarism_removal(cleaned_text)
        paraphrased_text = capitalize_sentences_and_nouns(plag_removed)
        paraphrased_text = force_first_letter_capital(paraphrased_text)
        paraphrased_text = correct_article_errors(paraphrased_text)
        paraphrased_text = correct_tense_errors(paraphrased_text)
        paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
        paraphrased_text = fix_possessives(paraphrased_text)
        paraphrased_text = correct_spelling(paraphrased_text)
        paraphrased_text = fix_punctuation_spacing(paraphrased_text)
        processed_paragraphs.append(paraphrased_text)

    return "\n\n".join(processed_paragraphs)  # Reassemble the text with paragraphs

# Gradio app setup
with gr.Blocks() as demo:
    with gr.Tab("AI Detection"):
        t1 = gr.Textbox(lines=5, label='Text')
        button1 = gr.Button("πŸ€– Predict!")
        label1 = gr.Textbox(lines=1, label='Predicted Label πŸŽƒ')
        score1 = gr.Textbox(lines=1, label='Prob')

        button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])

    with gr.Tab("Paraphrasing & Grammar Correction"):
        t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
        button2 = gr.Button("πŸ”„ Paraphrase and Correct")
        result2 = gr.Textbox(lines=5, label='Corrected Text')

        button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)

demo.launch(share=True)