File size: 1,900 Bytes
06bd223
daaad57
 
06bd223
 
daaad57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06bd223
 
daaad57
 
06bd223
daaad57
06bd223
daaad57
 
 
 
 
 
 
 
 
 
 
06bd223
daaad57
06bd223
daaad57
 
 
06bd223
 
daaad57
06bd223
daaad57
06bd223
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import re
import langdetect
from stopwordsiso import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer


def detect_language(text):
    """
    Detect language using langdetect; returns a language code (e.g. 'en', 'de', 'es').
    If detection fails or is uncertain, fallback to 'en'.
    """
    try:
        return langdetect.detect(text)
    except:
        return 'en'  # fallback


def get_stopwords_for_language(lang_code):
    """
    Retrieve stopwords from stopwordsiso for a given language code.
    If not available, fallback to empty set.
    """
    lang_code = lang_code.lower()
    if lang_code in stopwords.langdict:
        return stopwords.lang(lang_code)
    else:
        return set()  # fallback to empty set


def extract_top_keywords(text, top_n=5):
    """
    Extract top_n keywords from 'text' using a simple TF-IDF approach with
    language detection and language-specific stopwords.
    """
    # Clean the text (remove punctuation, lower the case, etc.)
    cleaned_text = re.sub(r"[^\w\s]", " ", text.lower())

    # Detect language
    lang_code = detect_language(cleaned_text)

    # Get the relevant stopwords
    language_stopwords = get_stopwords_for_language(lang_code)

    # Initialize TF-IDF with the custom language stop words
    vectorizer = TfidfVectorizer(stop_words=language_stopwords)

    # We pass in a list of one "document" to TF-IDF
    tfidf_matrix = vectorizer.fit_transform([cleaned_text])

    feature_names = vectorizer.get_feature_names_out()
    scores = tfidf_matrix.toarray()[0]  # row 0 since we only have one doc

    # Pair (word, score), then sort descending by score
    word_score_pairs = list(zip(feature_names, scores))
    word_score_pairs.sort(key=lambda x: x[1], reverse=True)

    # Return just the top_n words
    top_keywords = [word for (word, score) in word_score_pairs[:top_n]]
    return top_keywords