Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,27 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
import json
|
5 |
import langdetect
|
6 |
from keybert import KeyBERT
|
7 |
|
8 |
-
# Load
|
9 |
@st.cache_resource
|
10 |
def load_models():
|
11 |
return {
|
12 |
"emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
|
13 |
-
"sentiment": pipeline("
|
14 |
-
"summarization": pipeline("summarization"),
|
15 |
-
"ner": pipeline("ner", grouped_entities=True),
|
16 |
"toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
|
17 |
"keyword_extraction": KeyBERT()
|
18 |
}
|
19 |
|
20 |
-
models=load_models()
|
21 |
|
22 |
# Function: Emotion Detection
|
23 |
def analyze_emotions(text):
|
24 |
results = models["emotion"](text)
|
25 |
-
|
26 |
-
return emotions
|
27 |
|
28 |
# Function: Sentiment Analysis
|
29 |
def analyze_sentiment(text):
|
@@ -32,23 +30,21 @@ def analyze_sentiment(text):
|
|
32 |
|
33 |
# Function: Text Summarization
|
34 |
def summarize_text(text):
|
35 |
-
|
36 |
-
return summary
|
37 |
|
38 |
# Function: Keyword Extraction
|
39 |
def extract_keywords(text):
|
40 |
-
return models["keyword_extraction"].extract_keywords(text,
|
41 |
|
42 |
# Function: Named Entity Recognition (NER)
|
43 |
def analyze_ner(text):
|
44 |
entities = models["ner"](text)
|
45 |
return {entity["word"]: entity["entity_group"] for entity in entities}
|
46 |
|
47 |
-
# Function: Language Detection
|
48 |
def detect_language(text):
|
49 |
try:
|
50 |
-
|
51 |
-
return lang
|
52 |
except:
|
53 |
return "Error detecting language"
|
54 |
|
@@ -57,7 +53,6 @@ def detect_toxicity(text):
|
|
57 |
results = models["toxicity"](text)
|
58 |
return {results[0]['label']: round(results[0]['score'], 2)}
|
59 |
|
60 |
-
|
61 |
# Streamlit UI
|
62 |
st.title("๐ AI-Powered Text Intelligence App")
|
63 |
st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")
|
@@ -68,46 +63,36 @@ text_input = st.text_area("Enter text to analyze:", "")
|
|
68 |
if st.button("Analyze Text"):
|
69 |
if text_input.strip():
|
70 |
st.subheader("๐น Emotion Detection")
|
71 |
-
|
72 |
-
st.json(emotions)
|
73 |
|
74 |
st.subheader("๐น Sentiment Analysis")
|
75 |
-
|
76 |
-
st.json(sentiment)
|
77 |
|
78 |
st.subheader("๐น Text Summarization")
|
79 |
-
|
80 |
-
st.write(summary)
|
81 |
|
82 |
st.subheader("๐น Keyword Extraction")
|
83 |
-
|
84 |
-
st.json(keywords)
|
85 |
|
86 |
st.subheader("๐น Named Entity Recognition (NER)")
|
87 |
-
|
88 |
-
st.json(ner_data)
|
89 |
|
90 |
st.subheader("๐น Language Detection")
|
91 |
-
|
92 |
-
st.write(f"Detected Language: `{lang}`")
|
93 |
|
94 |
st.subheader("๐น Toxicity Detection")
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
"
|
102 |
-
"
|
103 |
-
"
|
104 |
-
"
|
105 |
-
"
|
106 |
-
"language": lang,
|
107 |
-
"toxicity": toxicity
|
108 |
}
|
109 |
-
|
110 |
-
json_result = json.dumps(result_data, indent=2)
|
111 |
-
st.download_button("Download Analysis Report", data=json_result, file_name="text_analysis.json", mime="application/json")
|
112 |
else:
|
113 |
-
st.warning("โ ๏ธ Please enter some text to analyze")
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
|
|
3 |
import json
|
4 |
import langdetect
|
5 |
from keybert import KeyBERT
|
6 |
|
7 |
+
# Load Pretrained Models
|
8 |
@st.cache_resource
|
9 |
def load_models():
|
10 |
return {
|
11 |
"emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
|
12 |
+
"sentiment": pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment"),
|
13 |
+
"summarization": pipeline("summarization", model="facebook/bart-large-cnn"),
|
14 |
+
"ner": pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", grouped_entities=True),
|
15 |
"toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
|
16 |
"keyword_extraction": KeyBERT()
|
17 |
}
|
18 |
|
19 |
+
models = load_models()
|
20 |
|
21 |
# Function: Emotion Detection
|
22 |
def analyze_emotions(text):
|
23 |
results = models["emotion"](text)
|
24 |
+
return {r['label']: round(r['score'], 2) for r in results[0]}
|
|
|
25 |
|
26 |
# Function: Sentiment Analysis
|
27 |
def analyze_sentiment(text):
|
|
|
30 |
|
31 |
# Function: Text Summarization
|
32 |
def summarize_text(text):
|
33 |
+
return models["summarization"](text[:1024])[0]['summary_text']
|
|
|
34 |
|
35 |
# Function: Keyword Extraction
|
36 |
def extract_keywords(text):
|
37 |
+
return models["keyword_extraction"].extract_keywords(text, ngram_range=(1, 2), stop_words='english')
|
38 |
|
39 |
# Function: Named Entity Recognition (NER)
|
40 |
def analyze_ner(text):
|
41 |
entities = models["ner"](text)
|
42 |
return {entity["word"]: entity["entity_group"] for entity in entities}
|
43 |
|
44 |
+
# Function: Language Detection
|
45 |
def detect_language(text):
|
46 |
try:
|
47 |
+
return langdetect.detect(text)
|
|
|
48 |
except:
|
49 |
return "Error detecting language"
|
50 |
|
|
|
53 |
results = models["toxicity"](text)
|
54 |
return {results[0]['label']: round(results[0]['score'], 2)}
|
55 |
|
|
|
56 |
# Streamlit UI
|
57 |
st.title("๐ AI-Powered Text Intelligence App")
|
58 |
st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")
|
|
|
63 |
if st.button("Analyze Text"):
|
64 |
if text_input.strip():
|
65 |
st.subheader("๐น Emotion Detection")
|
66 |
+
st.json(analyze_emotions(text_input))
|
|
|
67 |
|
68 |
st.subheader("๐น Sentiment Analysis")
|
69 |
+
st.json(analyze_sentiment(text_input))
|
|
|
70 |
|
71 |
st.subheader("๐น Text Summarization")
|
72 |
+
st.write(summarize_text(text_input))
|
|
|
73 |
|
74 |
st.subheader("๐น Keyword Extraction")
|
75 |
+
st.json(extract_keywords(text_input))
|
|
|
76 |
|
77 |
st.subheader("๐น Named Entity Recognition (NER)")
|
78 |
+
st.json(analyze_ner(text_input))
|
|
|
79 |
|
80 |
st.subheader("๐น Language Detection")
|
81 |
+
st.write(f"Detected Language: `{detect_language(text_input)}`")
|
|
|
82 |
|
83 |
st.subheader("๐น Toxicity Detection")
|
84 |
+
st.json(detect_toxicity(text_input))
|
85 |
+
|
86 |
+
# Save results to JSON
|
87 |
+
results = {
|
88 |
+
"emotion": analyze_emotions(text_input),
|
89 |
+
"sentiment": analyze_sentiment(text_input),
|
90 |
+
"summary": summarize_text(text_input),
|
91 |
+
"keywords": extract_keywords(text_input),
|
92 |
+
"ner": analyze_ner(text_input),
|
93 |
+
"language": detect_language(text_input),
|
94 |
+
"toxicity": detect_toxicity(text_input)
|
|
|
|
|
95 |
}
|
96 |
+
st.download_button("Download JSON Report", json.dumps(results, indent=2), "text_analysis.json", "application/json")
|
|
|
|
|
97 |
else:
|
98 |
+
st.warning("โ ๏ธ Please enter some text to analyze.")
|