Spaces:
Sleeping
Sleeping
File size: 2,241 Bytes
cf772c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import pickle
import re
import string
from nltk.corpus import stopwords
import nltk
import spacy
# Get the list of English stop words from NLTK
nltk_stop_words = stopwords.words('english')
# Load the spaCy model for English
nlp = spacy.load("en_core_web_sm")
def process_text(text):
"""
Process text by:
1. Lowercasing
2. Removing punctuation and non-alphanumeric characters
3. Removing stop words
4. Lemmatization
"""
# Step 1: Tokenization & Processing with spaCy
doc = nlp(text.lower()) # Process text with spaCy
# Step 2: Filter out stop words, non-alphanumeric characters, punctuation, and apply lemmatization
processed_tokens = [
re.sub(r'[^a-zA-Z0-9]', '', token.lemma_) # Remove non-alphanumeric characters
for token in doc
if token.text not in nltk_stop_words and token.text not in string.punctuation
]
# Optional: Filter out empty strings resulting from the regex replacement
processed_tokens = " ".join([word for word in processed_tokens if word])
return processed_tokens
def predict(input_df: pd.DataFrame, tfidf_path: str, model_path: str, text_column: str = "quote"):
"""
Predict the output using a saved TF-IDF vectorizer and Random Forest model.
Parameters:
input_df (pd.DataFrame): Input dataframe containing the text data.
tfidf_path (str): Path to the saved TF-IDF vectorizer pickle file.
model_path (str): Path to the saved Random Forest model pickle file.
text_column (str): The name of the column in the dataframe containing the text data.
Returns:
pd.Series: Predictions for each row in the input dataframe.
"""
# Load the TF-IDF vectorizer
with open(tfidf_path, "rb") as tfidf_file:
tfidf_vectorizer = pickle.load(tfidf_file)
# Load the Random Forest model
with open(model_path, "rb") as model_file:
model = pickle.load(model_file)
# Transform the input text using the TF-IDF vectorizer
text_data = input_df.to_pandas()["quote"]
text_features = tfidf_vectorizer.transform(text_data)
# Make predictions using the loaded model
predictions = model.predict(text_features)
return predictions
|