Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,36 +4,26 @@ import spaces
|
|
4 |
import wbgtopic
|
5 |
import plotly.graph_objects as go
|
6 |
import plotly.express as px
|
7 |
-
import plotly.figure_factory as ff
|
8 |
-
import nltk
|
9 |
import numpy as np
|
10 |
import pandas as pd
|
11 |
-
|
12 |
-
from scipy import stats
|
13 |
-
import torch
|
14 |
-
from wordcloud import WordCloud
|
15 |
-
from topic_translator import translate_topics
|
16 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
17 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
18 |
from sklearn.cluster import KMeans
|
|
|
19 |
|
20 |
-
# GPU 설정
|
21 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
22 |
|
23 |
-
#
|
|
|
|
|
24 |
try:
|
25 |
nltk.download('punkt', quiet=True)
|
26 |
nltk.download('vader_lexicon', quiet=True)
|
27 |
-
except
|
28 |
-
|
29 |
|
30 |
-
SAMPLE_TEXT = """
|
31 |
-
The three reportedly discussed the Stargate Project, a large-scale AI initiative led by OpenAI, SoftBank, and U.S. software giant Oracle. The project aims to invest $500 billion over the next four years in building new AI infrastructure in the U.S. The U.S. government has shown a strong commitment to the initiative, with President Donald Trump personally announcing it at the White House the day after his inauguration last month. If Samsung participates, the project will lead to a Korea-U.S.-Japan AI alliance.
|
32 |
-
The AI sector requires massive investments and extensive resources, including advanced models, high-performance AI chips to power the models, and large-scale data centers to operate them. Nvidia and TSMC currently dominate the AI sector, but a partnership between Samsung, SoftBank, and OpenAI could pave the way for a competitive alternative.
|
33 |
-
"""
|
34 |
-
|
35 |
-
# WBGDocTopic 초기화 시 device 지정
|
36 |
-
clf = wbgtopic.WBGDocTopic(device=device)
|
37 |
|
38 |
def safe_process(func):
|
39 |
def wrapper(*args, **kwargs):
|
@@ -44,144 +34,196 @@ def safe_process(func):
|
|
44 |
return None
|
45 |
return wrapper
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
@safe_process
|
48 |
def analyze_text_sections(text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
sentences = sent_tokenize(text)
|
|
|
50 |
sections = [' '.join(sentences[i:i+3]) for i in range(0, len(sentences), 3)]
|
51 |
-
section_topics = []
|
52 |
|
|
|
53 |
for section in sections:
|
54 |
-
|
55 |
-
|
|
|
56 |
|
57 |
return section_topics
|
58 |
|
|
|
|
|
|
|
|
|
|
|
59 |
@safe_process
|
60 |
-
def calculate_topic_correlations(
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
print(f"Correlation calculation error: {e}")
|
79 |
-
return np.array([[1]]), ['Error']
|
80 |
|
81 |
-
@spaces.GPU()
|
82 |
-
def process_all_analysis(text):
|
83 |
-
try:
|
84 |
-
# 기본 주제 분석
|
85 |
-
raw_results = clf.suggest_topics(text)
|
86 |
-
topics = process_results(raw_results)
|
87 |
-
|
88 |
-
# 추가 분석
|
89 |
-
section_topics = analyze_text_sections(text)
|
90 |
-
corr_matrix, labels = calculate_topic_correlations(topics)
|
91 |
-
sentiments = perform_sentiment_analysis(text)
|
92 |
-
clusters = create_topic_clusters(topics)
|
93 |
-
|
94 |
-
# 차트 생성
|
95 |
-
bar_chart, radar_chart = create_main_charts(topics)
|
96 |
-
heatmap = create_correlation_heatmap(corr_matrix, labels)
|
97 |
-
evolution_chart = create_topic_evolution(section_topics)
|
98 |
-
gauge_chart = create_confidence_gauge(topics)
|
99 |
-
|
100 |
-
results = {
|
101 |
-
'topics': topics,
|
102 |
-
'bar_chart': bar_chart,
|
103 |
-
'radar_chart': radar_chart,
|
104 |
-
'heatmap': heatmap,
|
105 |
-
'evolution': evolution_chart,
|
106 |
-
'gauge': gauge_chart,
|
107 |
-
'sentiments': sentiments.to_dict() if sentiments is not None else {},
|
108 |
-
'clusters': clusters.tolist() if clusters is not None else []
|
109 |
-
}
|
110 |
-
|
111 |
-
# 개별 결과 반환
|
112 |
-
return (
|
113 |
-
results, # JSON output
|
114 |
-
bar_chart, # plot1
|
115 |
-
radar_chart, # plot2
|
116 |
-
heatmap, # plot3
|
117 |
-
evolution_chart, # plot4
|
118 |
-
gauge_chart, # plot5
|
119 |
-
go.Figure() # plot6 (빈 감성 분석 차트)
|
120 |
-
)
|
121 |
-
except Exception as e:
|
122 |
-
print(f"Analysis error: {str(e)}")
|
123 |
-
empty_fig = go.Figure()
|
124 |
-
return (
|
125 |
-
{'error': str(e), 'topics': []},
|
126 |
-
empty_fig,
|
127 |
-
empty_fig,
|
128 |
-
empty_fig,
|
129 |
-
empty_fig,
|
130 |
-
empty_fig,
|
131 |
-
empty_fig
|
132 |
-
)
|
133 |
|
134 |
@safe_process
|
135 |
def perform_sentiment_analysis(text):
|
136 |
sia = SentimentIntensityAnalyzer()
|
137 |
-
|
138 |
-
|
139 |
-
return pd.DataFrame(
|
|
|
140 |
|
141 |
@safe_process
|
142 |
-
def create_topic_clusters(
|
143 |
-
if len(
|
144 |
-
return
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
-
X = np.array(
|
147 |
-
|
|
|
|
|
|
|
148 |
clusters = kmeans.fit_predict(X)
|
149 |
-
return clusters
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
@safe_process
|
152 |
-
def create_main_charts(
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
bar_fig.update_layout(
|
161 |
title='주제 분석 결과',
|
162 |
-
height=500,
|
163 |
xaxis_title='주제',
|
164 |
yaxis_title='관련도 (%)',
|
165 |
-
template='plotly_white'
|
|
|
166 |
)
|
167 |
|
|
|
168 |
radar_fig = go.Figure()
|
169 |
radar_fig.add_trace(go.Scatterpolar(
|
170 |
-
r=
|
171 |
-
theta=
|
172 |
fill='toself',
|
173 |
name='주제 분포'
|
174 |
))
|
175 |
radar_fig.update_layout(
|
176 |
title='주제 레이더 차트',
|
|
|
177 |
height=500,
|
178 |
-
|
|
|
179 |
)
|
180 |
-
|
181 |
return bar_fig, radar_fig
|
182 |
|
|
|
183 |
@safe_process
|
184 |
def create_correlation_heatmap(corr_matrix, labels):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
fig = go.Figure(data=go.Heatmap(
|
186 |
z=corr_matrix,
|
187 |
x=labels,
|
@@ -195,85 +237,155 @@ def create_correlation_heatmap(corr_matrix, labels):
|
|
195 |
)
|
196 |
return fig
|
197 |
|
|
|
198 |
@safe_process
|
199 |
def create_topic_evolution(section_topics):
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
203 |
fig = go.Figure()
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
fig.update_layout(
|
219 |
title='주제 변화 추이',
|
220 |
xaxis_title='섹션',
|
221 |
-
yaxis_title='
|
222 |
height=500,
|
223 |
template='plotly_white'
|
224 |
)
|
225 |
return fig
|
226 |
|
|
|
227 |
@safe_process
|
228 |
-
def create_confidence_gauge(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
fig = go.Figure()
|
230 |
-
|
|
|
|
|
|
|
231 |
fig.add_trace(go.Indicator(
|
232 |
mode="gauge+number",
|
233 |
-
value=
|
234 |
-
title={'text':
|
235 |
-
domain={'row': 0, 'column': i
|
236 |
))
|
|
|
237 |
fig.update_layout(
|
238 |
-
grid={'rows': 1, 'columns':
|
239 |
height=400,
|
240 |
template='plotly_white'
|
241 |
)
|
242 |
return fig
|
243 |
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
}
|
259 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
|
|
|
|
|
|
|
264 |
|
265 |
with gr.Blocks(title="고급 문서 주제 분석기") as demo:
|
266 |
gr.Markdown("## 📊 고급 문서 주제 분석기")
|
267 |
-
gr.Markdown("문서를 입력하면 다양한 분석 결과를 시각화하여 보여줍니다.")
|
268 |
|
269 |
with gr.Row():
|
270 |
-
|
271 |
value=SAMPLE_TEXT,
|
272 |
label="분석할 텍스트",
|
273 |
-
placeholder="여기에 분석할 텍스트를 입력하세요",
|
274 |
lines=8
|
275 |
)
|
276 |
-
|
277 |
with gr.Row():
|
278 |
submit_btn = gr.Button("분석 시작", variant="primary")
|
279 |
|
@@ -282,25 +394,22 @@ with gr.Blocks(title="고급 문서 주제 분석기") as demo:
|
|
282 |
with gr.Row():
|
283 |
plot1 = gr.Plot(label="주제 분포")
|
284 |
plot2 = gr.Plot(label="레이더 차트")
|
285 |
-
|
286 |
with gr.TabItem("상세 분석"):
|
287 |
with gr.Row():
|
288 |
plot3 = gr.Plot(label="상관관계 히트맵")
|
289 |
plot4 = gr.Plot(label="주제 변화 추이")
|
290 |
-
|
291 |
with gr.TabItem("신뢰도 분석"):
|
292 |
plot5 = gr.Plot(label="신뢰도 게이지")
|
293 |
-
|
294 |
with gr.TabItem("감성 분석"):
|
295 |
plot6 = gr.Plot(label="감성 분석 결과")
|
296 |
-
|
297 |
with gr.Row():
|
298 |
-
|
299 |
|
300 |
submit_btn.click(
|
301 |
fn=process_all_analysis,
|
302 |
-
inputs=[
|
303 |
-
outputs=[
|
304 |
)
|
305 |
|
306 |
if __name__ == "__main__":
|
@@ -308,6 +417,6 @@ if __name__ == "__main__":
|
|
308 |
demo.launch(
|
309 |
server_name="0.0.0.0",
|
310 |
server_port=7860,
|
311 |
-
share=
|
312 |
debug=True
|
313 |
-
)
|
|
|
4 |
import wbgtopic
|
5 |
import plotly.graph_objects as go
|
6 |
import plotly.express as px
|
|
|
|
|
7 |
import numpy as np
|
8 |
import pandas as pd
|
9 |
+
import nltk
|
|
|
|
|
|
|
|
|
10 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
11 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
12 |
from sklearn.cluster import KMeans
|
13 |
+
import torch
|
14 |
|
|
|
15 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
16 |
|
17 |
+
# Initialize WBGDocTopic
|
18 |
+
clf = wbgtopic.WBGDocTopic(device=device)
|
19 |
+
|
20 |
try:
|
21 |
nltk.download('punkt', quiet=True)
|
22 |
nltk.download('vader_lexicon', quiet=True)
|
23 |
+
except:
|
24 |
+
pass
|
25 |
|
26 |
+
SAMPLE_TEXT = """Your sample text here ..."""
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
def safe_process(func):
|
29 |
def wrapper(*args, **kwargs):
|
|
|
34 |
return None
|
35 |
return wrapper
|
36 |
|
37 |
+
|
38 |
+
################################################################
|
39 |
+
# 1) Convert Raw Results into a Consistent Format #
|
40 |
+
################################################################
|
41 |
+
|
42 |
+
@safe_process
|
43 |
+
def parse_wbg_results(raw_output):
|
44 |
+
"""
|
45 |
+
Example: raw_output might be something like:
|
46 |
+
[
|
47 |
+
{ 'Innovation and Entrepreneurship': 0.32,
|
48 |
+
'Digital Development': 0.27,
|
49 |
+
...}
|
50 |
+
]
|
51 |
+
or it might be [ [ {...}, {...} ] ]
|
52 |
+
|
53 |
+
Adjust logic so we end up with a list of dicts:
|
54 |
+
[
|
55 |
+
{'label': 'Innovation and Entrepreneurship', 'score_mean': 0.32, 'score_std': 0.0},
|
56 |
+
{'label': 'Digital Development', 'score_mean': 0.27, 'score_std': 0.0},
|
57 |
+
...
|
58 |
+
]
|
59 |
+
"""
|
60 |
+
if not raw_output:
|
61 |
+
return []
|
62 |
+
|
63 |
+
# If the library returns a list with a single dictionary:
|
64 |
+
# raw_output[0] might be a dict of {topic: score}
|
65 |
+
# or it might be a list of dicts with 'label'/'score_mean' keys
|
66 |
+
first_item = raw_output[0]
|
67 |
+
|
68 |
+
# If it's already a list of dicts with 'label', 'score_mean', etc.
|
69 |
+
if isinstance(first_item, dict) and 'label' in first_item:
|
70 |
+
# Possibly we already have the correct format
|
71 |
+
return raw_output
|
72 |
+
|
73 |
+
# If it's a dict of {topic_label: numeric_score}
|
74 |
+
if isinstance(first_item, dict):
|
75 |
+
# Then let's convert it
|
76 |
+
parsed_list = []
|
77 |
+
for label, val in first_item.items():
|
78 |
+
parsed_list.append({
|
79 |
+
'label': label,
|
80 |
+
'score_mean': float(val),
|
81 |
+
'score_std': 0.0 # If no std is given, default 0
|
82 |
+
})
|
83 |
+
return parsed_list
|
84 |
+
|
85 |
+
# If it’s something else, handle it
|
86 |
+
return []
|
87 |
+
|
88 |
+
|
89 |
+
################################################################
|
90 |
+
# 2) Section-based Analysis #
|
91 |
+
################################################################
|
92 |
+
|
93 |
@safe_process
|
94 |
def analyze_text_sections(text):
|
95 |
+
"""
|
96 |
+
Splits text into sections, calls clf.suggest_topics on each,
|
97 |
+
and returns a list-of-lists:
|
98 |
+
section_topics = [
|
99 |
+
[ {'label':'...', 'score_mean':...}, {...} ],
|
100 |
+
[ {'label':'...', 'score_mean':...}, {...} ],
|
101 |
+
...
|
102 |
+
]
|
103 |
+
"""
|
104 |
sentences = sent_tokenize(text)
|
105 |
+
# e.g. group 3 sentences per section
|
106 |
sections = [' '.join(sentences[i:i+3]) for i in range(0, len(sentences), 3)]
|
|
|
107 |
|
108 |
+
section_topics = []
|
109 |
for section in sections:
|
110 |
+
raw_sec = clf.suggest_topics(section)
|
111 |
+
parsed_sec = parse_wbg_results(raw_sec)
|
112 |
+
section_topics.append(parsed_sec)
|
113 |
|
114 |
return section_topics
|
115 |
|
116 |
+
|
117 |
+
################################################################
|
118 |
+
# 3) Basic Summaries (Correlation, Sentiment, Clusters etc.) #
|
119 |
+
################################################################
|
120 |
+
|
121 |
@safe_process
|
122 |
+
def calculate_topic_correlations(topic_dicts):
|
123 |
+
"""
|
124 |
+
If we only want a single dimension correlation (like score_mean),
|
125 |
+
we can do a simple correlation across different topics.
|
126 |
+
But typically you'd want multiple texts or some multi-dimensional approach.
|
127 |
+
"""
|
128 |
+
if len(topic_dicts) < 2:
|
129 |
+
# Not enough to do correlation
|
130 |
+
return np.array([[1.0]]), ["Insufficient topics"]
|
131 |
+
|
132 |
+
labels = [d['label'] for d in topic_dicts]
|
133 |
+
scores = [d['score_mean'] for d in topic_dicts] # single dimension
|
134 |
+
|
135 |
+
if len(scores) < 2:
|
136 |
+
return np.array([[1.0]]), ["Insufficient topics"]
|
137 |
+
|
138 |
+
corr_matrix = np.corrcoef(scores)
|
139 |
+
return corr_matrix, labels
|
|
|
|
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
@safe_process
|
143 |
def perform_sentiment_analysis(text):
|
144 |
sia = SentimentIntensityAnalyzer()
|
145 |
+
sents = sent_tokenize(text)
|
146 |
+
results = [sia.polarity_scores(s) for s in sents]
|
147 |
+
return pd.DataFrame(results)
|
148 |
+
|
149 |
|
150 |
@safe_process
|
151 |
+
def create_topic_clusters(topic_dicts):
|
152 |
+
if len(topic_dicts) < 3:
|
153 |
+
return [0]*len(topic_dicts) # trivial cluster
|
154 |
+
|
155 |
+
# Must have 'score_mean' and 'score_std' or something else
|
156 |
+
X = []
|
157 |
+
for t in topic_dicts:
|
158 |
+
X.append([t['score_mean'], t.get('score_std', 0.0)])
|
159 |
|
160 |
+
X = np.array(X)
|
161 |
+
if X.shape[0] < 3:
|
162 |
+
return [0]*X.shape[0]
|
163 |
+
|
164 |
+
kmeans = KMeans(n_clusters=min(3, X.shape[0]), random_state=42)
|
165 |
clusters = kmeans.fit_predict(X)
|
166 |
+
return clusters.tolist() # safe to JSON-encode
|
167 |
+
|
168 |
+
|
169 |
+
################################################################
|
170 |
+
# 4) Charts (Bar, Radar, Correlation Heatmap, etc.) #
|
171 |
+
################################################################
|
172 |
|
173 |
@safe_process
|
174 |
+
def create_main_charts(topic_dicts):
|
175 |
+
"""
|
176 |
+
Expects a list of dicts with keys: 'label', 'score_mean', ...
|
177 |
+
We'll just use 'score_mean' (or a scaled version).
|
178 |
+
"""
|
179 |
+
if not topic_dicts:
|
180 |
+
return go.Figure(), go.Figure()
|
181 |
+
|
182 |
+
# Bar chart
|
183 |
+
labels = [t['label'] for t in topic_dicts]
|
184 |
+
scores = [t['score_mean']*100 for t in topic_dicts] # convert to %
|
185 |
+
|
186 |
+
bar_fig = go.Figure(
|
187 |
+
data=[go.Bar(x=labels, y=scores, marker_color='rgb(55, 83, 109)')]
|
188 |
+
)
|
189 |
bar_fig.update_layout(
|
190 |
title='주제 분석 결과',
|
|
|
191 |
xaxis_title='주제',
|
192 |
yaxis_title='관련도 (%)',
|
193 |
+
template='plotly_white',
|
194 |
+
height=500,
|
195 |
)
|
196 |
|
197 |
+
# Radar chart
|
198 |
radar_fig = go.Figure()
|
199 |
radar_fig.add_trace(go.Scatterpolar(
|
200 |
+
r=scores,
|
201 |
+
theta=labels,
|
202 |
fill='toself',
|
203 |
name='주제 분포'
|
204 |
))
|
205 |
radar_fig.update_layout(
|
206 |
title='주제 레이더 차트',
|
207 |
+
template='plotly_white',
|
208 |
height=500,
|
209 |
+
polar=dict(radialaxis=dict(visible=True)),
|
210 |
+
showlegend=False
|
211 |
)
|
|
|
212 |
return bar_fig, radar_fig
|
213 |
|
214 |
+
|
215 |
@safe_process
|
216 |
def create_correlation_heatmap(corr_matrix, labels):
|
217 |
+
if corr_matrix.ndim == 0:
|
218 |
+
# It's a scalar => shape ()
|
219 |
+
corr_matrix = np.array([[corr_matrix]])
|
220 |
+
|
221 |
+
if corr_matrix.shape == (1,1):
|
222 |
+
# Usually means not enough data
|
223 |
+
fig = go.Figure()
|
224 |
+
fig.add_annotation(text="Not enough topics for correlation", showarrow=False)
|
225 |
+
return fig
|
226 |
+
|
227 |
fig = go.Figure(data=go.Heatmap(
|
228 |
z=corr_matrix,
|
229 |
x=labels,
|
|
|
237 |
)
|
238 |
return fig
|
239 |
|
240 |
+
|
241 |
@safe_process
|
242 |
def create_topic_evolution(section_topics):
|
243 |
+
"""
|
244 |
+
section_topics: list of [ {label:..., score_mean:...}, ...]
|
245 |
+
one element per section
|
246 |
+
"""
|
247 |
fig = go.Figure()
|
248 |
+
if not section_topics or len(section_topics) == 0:
|
249 |
+
return fig
|
250 |
+
|
251 |
+
# Take the first section’s list as reference
|
252 |
+
if not section_topics[0]:
|
253 |
+
return fig
|
254 |
+
|
255 |
+
# For each topic in the first section, gather its evolution
|
256 |
+
for topic_dict in section_topics[0]:
|
257 |
+
label = topic_dict['label']
|
258 |
+
score_list = []
|
259 |
+
for sec_list in section_topics:
|
260 |
+
# find matching label
|
261 |
+
match = next((d for d in sec_list if d['label'] == label), None)
|
262 |
+
if match:
|
263 |
+
score_list.append(match['score_mean'])
|
264 |
+
else:
|
265 |
+
score_list.append(0.0)
|
266 |
+
|
267 |
+
fig.add_trace(go.Scatter(
|
268 |
+
x=list(range(len(section_topics))),
|
269 |
+
y=score_list,
|
270 |
+
name=label,
|
271 |
+
mode='lines+markers'
|
272 |
+
))
|
273 |
|
274 |
fig.update_layout(
|
275 |
title='주제 변화 추이',
|
276 |
xaxis_title='섹션',
|
277 |
+
yaxis_title='score_mean',
|
278 |
height=500,
|
279 |
template='plotly_white'
|
280 |
)
|
281 |
return fig
|
282 |
|
283 |
+
|
284 |
@safe_process
|
285 |
+
def create_confidence_gauge(topic_dicts):
|
286 |
+
"""
|
287 |
+
If your data doesn’t actually have a separate confidence measure,
|
288 |
+
you may skip or adapt. For example, you might define confidence
|
289 |
+
= (1 - score_std)*100
|
290 |
+
"""
|
291 |
+
if not topic_dicts:
|
292 |
+
return go.Figure()
|
293 |
+
|
294 |
fig = go.Figure()
|
295 |
+
num_topics = len(topic_dicts)
|
296 |
+
|
297 |
+
for i, t in enumerate(topic_dicts):
|
298 |
+
confidence_val = 100.0*(1.0 - t.get('score_std', 0.0)) # an example
|
299 |
fig.add_trace(go.Indicator(
|
300 |
mode="gauge+number",
|
301 |
+
value=confidence_val,
|
302 |
+
title={'text': t['label']},
|
303 |
+
domain={'row': 0, 'column': i}
|
304 |
))
|
305 |
+
|
306 |
fig.update_layout(
|
307 |
+
grid={'rows': 1, 'columns': num_topics},
|
308 |
height=400,
|
309 |
template='plotly_white'
|
310 |
)
|
311 |
return fig
|
312 |
|
313 |
+
|
314 |
+
################################################################
|
315 |
+
# 5) Putting Everything into `process_all_analysis` #
|
316 |
+
################################################################
|
317 |
+
|
318 |
+
@spaces.GPU()
|
319 |
+
def process_all_analysis(text):
|
320 |
+
try:
|
321 |
+
# 1) Suggest topics on the entire text
|
322 |
+
raw_results = clf.suggest_topics(text)
|
323 |
+
all_topics = parse_wbg_results(raw_results) # keep full list of dicts
|
324 |
+
|
325 |
+
# 2) Top 5 (if you want to highlight them)
|
326 |
+
# Sort by score_mean descending
|
327 |
+
sorted_topics = sorted(all_topics, key=lambda x: x['score_mean'], reverse=True)
|
328 |
+
top_topics = sorted_topics[:5]
|
329 |
+
|
330 |
+
# 3) Section-based
|
331 |
+
section_topics = analyze_text_sections(text) # list of lists
|
332 |
+
|
333 |
+
# 4) Extra analyses
|
334 |
+
corr_matrix, corr_labels = calculate_topic_correlations(all_topics)
|
335 |
+
sentiments_df = perform_sentiment_analysis(text)
|
336 |
+
clusters = create_topic_clusters(all_topics)
|
337 |
+
|
338 |
+
# 5) Build charts
|
339 |
+
bar_chart, radar_chart = create_main_charts(top_topics) # show top 5 on bar
|
340 |
+
heatmap = create_correlation_heatmap(corr_matrix, corr_labels)
|
341 |
+
evolution_chart = create_topic_evolution(section_topics)
|
342 |
+
gauge_chart = create_confidence_gauge(top_topics)
|
343 |
+
|
344 |
+
# 6) Prepare output for the JSON field
|
345 |
+
# Make sure everything is JSON-serializable with string keys
|
346 |
+
results = {
|
347 |
+
"top_topics": top_topics, # list of dict
|
348 |
+
"clusters": clusters, # list of ints
|
349 |
+
"sentiments": sentiments_df.to_dict(orient="records"),
|
350 |
}
|
351 |
+
|
352 |
+
return (
|
353 |
+
results, # JSON output
|
354 |
+
bar_chart, # plot1
|
355 |
+
radar_chart, # plot2
|
356 |
+
heatmap, # plot3
|
357 |
+
evolution_chart,# plot4
|
358 |
+
gauge_chart, # plot5
|
359 |
+
go.Figure() # plot6 (placeholder for sentiment plot, or skip)
|
360 |
+
)
|
361 |
|
362 |
+
except Exception as e:
|
363 |
+
print(f"Analysis error: {str(e)}")
|
364 |
+
empty_fig = go.Figure()
|
365 |
+
return (
|
366 |
+
{"error": str(e), "topics": []},
|
367 |
+
empty_fig,
|
368 |
+
empty_fig,
|
369 |
+
empty_fig,
|
370 |
+
empty_fig,
|
371 |
+
empty_fig,
|
372 |
+
empty_fig
|
373 |
+
)
|
374 |
|
375 |
|
376 |
+
################################################################
|
377 |
+
# 6) Gradio UI #
|
378 |
+
################################################################
|
379 |
|
380 |
with gr.Blocks(title="고급 문서 주제 분석기") as demo:
|
381 |
gr.Markdown("## 📊 고급 문서 주제 분석기")
|
|
|
382 |
|
383 |
with gr.Row():
|
384 |
+
text_input = gr.Textbox(
|
385 |
value=SAMPLE_TEXT,
|
386 |
label="분석할 텍스트",
|
|
|
387 |
lines=8
|
388 |
)
|
|
|
389 |
with gr.Row():
|
390 |
submit_btn = gr.Button("분석 시작", variant="primary")
|
391 |
|
|
|
394 |
with gr.Row():
|
395 |
plot1 = gr.Plot(label="주제 분포")
|
396 |
plot2 = gr.Plot(label="레이더 차트")
|
|
|
397 |
with gr.TabItem("상세 분석"):
|
398 |
with gr.Row():
|
399 |
plot3 = gr.Plot(label="상관관계 히트맵")
|
400 |
plot4 = gr.Plot(label="주제 변화 추이")
|
|
|
401 |
with gr.TabItem("신뢰도 분석"):
|
402 |
plot5 = gr.Plot(label="신뢰도 게이지")
|
|
|
403 |
with gr.TabItem("감성 분석"):
|
404 |
plot6 = gr.Plot(label="감성 분석 결과")
|
405 |
+
|
406 |
with gr.Row():
|
407 |
+
output_json = gr.JSON(label="상세 분석 결과")
|
408 |
|
409 |
submit_btn.click(
|
410 |
fn=process_all_analysis,
|
411 |
+
inputs=[text_input],
|
412 |
+
outputs=[output_json, plot1, plot2, plot3, plot4, plot5, plot6]
|
413 |
)
|
414 |
|
415 |
if __name__ == "__main__":
|
|
|
417 |
demo.launch(
|
418 |
server_name="0.0.0.0",
|
419 |
server_port=7860,
|
420 |
+
share=False,
|
421 |
debug=True
|
422 |
+
)
|