Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import json
|
5 |
+
import langdetect
|
6 |
+
from keybert import KeyBERT
|
7 |
+
|
8 |
+
# Load models with caching
|
9 |
+
@st.cache_resource
|
10 |
+
def load_models():
|
11 |
+
return {
|
12 |
+
"emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
|
13 |
+
"sentiment": pipeline("sentiment-analysis"),
|
14 |
+
"summarization": pipeline("summarization"),
|
15 |
+
"ner": pipeline("ner", grouped_entities=True),
|
16 |
+
"toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
|
17 |
+
"keyword_extraction": KeyBERT()
|
18 |
+
}
|
19 |
+
|
20 |
+
models=load_models()
|
21 |
+
|
22 |
+
# Function: Emotion Detection
|
23 |
+
def analyze_emotions(text):
|
24 |
+
results = models["emotion"](text)
|
25 |
+
emotions = {r['label']: round(r['score'], 2) for r in results[0]}
|
26 |
+
return emotions
|
27 |
+
|
28 |
+
# Function: Sentiment Analysis
|
29 |
+
def analyze_sentiment(text):
|
30 |
+
result = models["sentiment"](text)[0]
|
31 |
+
return {result['label']: round(result['score'], 2)}
|
32 |
+
|
33 |
+
# Function: Text Summarization
|
34 |
+
def summarize_text(text):
|
35 |
+
summary = models["summarization"](text["1024"])[0]['summary_text'] # Limit input to 1024 tokens
|
36 |
+
return summary
|
37 |
+
|
38 |
+
# Function: Keyword Extraction
|
39 |
+
def extract_keywords(text):
|
40 |
+
return models["keyword_extraction"].extract_keywords(text, keyphrase_ngram_range(1, 2), stop_words='english')
|
41 |
+
|
42 |
+
# Function: Named Entity Recognition (NER)
|
43 |
+
def analyze_ner(text):
|
44 |
+
entities = models["ner"](text)
|
45 |
+
return {entity["word"]: entity["entity_group"] for entity in entities}
|
46 |
+
|
47 |
+
# Function: Language Detection and Translation
|
48 |
+
def detect_language(text):
|
49 |
+
try:
|
50 |
+
lang = langdetect.detect(text)
|
51 |
+
return lang
|
52 |
+
except:
|
53 |
+
return "Error detecting language"
|
54 |
+
|
55 |
+
# Function: Toxicity Detection
|
56 |
+
def detect_toxicity(text):
|
57 |
+
results = models["toxicity"](text)
|
58 |
+
return {results[0]['label']: round(results[0]['score'], 2)}
|
59 |
+
|
60 |
+
|
61 |
+
# Streamlit UI
|
62 |
+
st.title("🚀 AI-Powered Text Intelligence App")
|
63 |
+
st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")
|
64 |
+
|
65 |
+
# User Input
|
66 |
+
text_input = st.text_area("Enter text to analyze:", "")
|
67 |
+
|
68 |
+
if st.button("Analyze Text"):
|
69 |
+
if text_input.strip():
|
70 |
+
st.subheader("🔹 Emotion Detection")
|
71 |
+
emotions = analyze_emotions(text_input)
|
72 |
+
st.json(emotions)
|
73 |
+
|
74 |
+
st.subheader("🔹 Sentiment Analysis")
|
75 |
+
sentiment = analyze_sentiment(text_input)
|
76 |
+
st.json(sentiment)
|
77 |
+
|
78 |
+
st.subheader("🔹 Text Summarization")
|
79 |
+
summary = summarize_text(text_input)
|
80 |
+
st.write(summary)
|
81 |
+
|
82 |
+
st.subheader("🔹 Keyword Extraction")
|
83 |
+
keywords = extract_keywords(text_input)
|
84 |
+
st.json(keywords)
|
85 |
+
|
86 |
+
st.subheader("🔹 Named Entity Recognition (NER)")
|
87 |
+
ner_data = analyze_ner(text_input)
|
88 |
+
st.json(ner_data)
|
89 |
+
|
90 |
+
st.subheader("🔹 Language Detection")
|
91 |
+
lang = detect_language(text_input)
|
92 |
+
st.write(f"Detected Language: `{lang}`")
|
93 |
+
|
94 |
+
st.subheader("🔹 Toxicity Detection")
|
95 |
+
toxicity = detect_toxicity(text_input)
|
96 |
+
st.json(toxicity)
|
97 |
+
|
98 |
+
|
99 |
+
# JSON Download
|
100 |
+
result_data = {
|
101 |
+
"emotion": emotions,
|
102 |
+
"sentiment": sentiment,
|
103 |
+
"summary": summary,
|
104 |
+
"keywords": keywords,
|
105 |
+
"ner": ner_data,
|
106 |
+
"language": lang,
|
107 |
+
"toxicity": toxicity
|
108 |
+
}
|
109 |
+
|
110 |
+
json_result = json.dumps(result_data, indent=2)
|
111 |
+
st.download_button("Download Analysis Report", data=json_result, file_name="text_analysis.json", mime="application/json")
|
112 |
+
else:
|
113 |
+
st.warning("⚠️ Please enter some text to analyze")
|