Spaces:
Sleeping
Sleeping
import pickle | |
import re | |
import string | |
from nltk.corpus import stopwords | |
import nltk | |
import spacy | |
# Get the list of English stop words from NLTK | |
nltk_stop_words = stopwords.words('english') | |
# Load the spaCy model for English | |
nlp = spacy.load("en_core_web_sm") | |
def process_text(text): | |
""" | |
Process text by: | |
1. Lowercasing | |
2. Removing punctuation and non-alphanumeric characters | |
3. Removing stop words | |
4. Lemmatization | |
""" | |
# Step 1: Tokenization & Processing with spaCy | |
doc = nlp(text.lower()) # Process text with spaCy | |
# Step 2: Filter out stop words, non-alphanumeric characters, punctuation, and apply lemmatization | |
processed_tokens = [ | |
re.sub(r'[^a-zA-Z0-9]', '', token.lemma_) # Remove non-alphanumeric characters | |
for token in doc | |
if token.text not in nltk_stop_words and token.text not in string.punctuation | |
] | |
# Optional: Filter out empty strings resulting from the regex replacement | |
processed_tokens = " ".join([word for word in processed_tokens if word]) | |
return processed_tokens | |
def predict(input_df: pd.DataFrame, tfidf_path: str, model_path: str, text_column: str = "quote"): | |
""" | |
Predict the output using a saved TF-IDF vectorizer and Random Forest model. | |
Parameters: | |
input_df (pd.DataFrame): Input dataframe containing the text data. | |
tfidf_path (str): Path to the saved TF-IDF vectorizer pickle file. | |
model_path (str): Path to the saved Random Forest model pickle file. | |
text_column (str): The name of the column in the dataframe containing the text data. | |
Returns: | |
pd.Series: Predictions for each row in the input dataframe. | |
""" | |
# Load the TF-IDF vectorizer | |
with open(tfidf_path, "rb") as tfidf_file: | |
tfidf_vectorizer = pickle.load(tfidf_file) | |
# Load the Random Forest model | |
with open(model_path, "rb") as model_file: | |
model = pickle.load(model_file) | |
# Transform the input text using the TF-IDF vectorizer | |
text_data = input_df.to_pandas()["quote"] | |
text_features = tfidf_vectorizer.transform(text_data) | |
# Make predictions using the loaded model | |
predictions = model.predict(text_features) | |
return predictions | |