Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ import contractions
|
|
11 |
import tensorflow as tf
|
12 |
from nltk.stem import WordNetLemmatizer
|
13 |
from nltk.tokenize import word_tokenize
|
14 |
-
from nltk.corpus import
|
15 |
from tensorflow.keras.layers import Layer
|
16 |
from tensorflow.keras import backend as K
|
17 |
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
@@ -101,13 +101,6 @@ def cleaning(text):
|
|
101 |
|
102 |
return mapped_tags
|
103 |
|
104 |
-
def remove_stopwords(text):
|
105 |
-
stop_words = set(stopwords.words('english'))
|
106 |
-
tokens = word_tokenize(text)
|
107 |
-
filtered_text = [word for word in tokens if word.lower() not in stop_words]
|
108 |
-
return ' '.join(filtered_text)
|
109 |
-
text = remove_stopwords(text)
|
110 |
-
|
111 |
def pos_tag_and_lemmatize(text):
|
112 |
tokens = word_tokenize(text)
|
113 |
pos_tags = nltk.pos_tag(tokens)
|
|
|
11 |
import tensorflow as tf
|
12 |
from nltk.stem import WordNetLemmatizer
|
13 |
from nltk.tokenize import word_tokenize
|
14 |
+
from nltk.corpus import wordnet
|
15 |
from tensorflow.keras.layers import Layer
|
16 |
from tensorflow.keras import backend as K
|
17 |
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
|
|
101 |
|
102 |
return mapped_tags
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
def pos_tag_and_lemmatize(text):
|
105 |
tokens = word_tokenize(text)
|
106 |
pos_tags = nltk.pos_tag(tokens)
|