Spaces:
Running
on
Zero
Running
on
Zero
File size: 13,189 Bytes
dfa0bd7 2b1e4b7 d27df0e 96070b5 bc928c9 2c099cb 6efc05e 2c099cb ea6037b 6efc05e ea6037b 2134133 ea6037b 2b1e4b7 6efc05e 2134133 ea6037b 2134133 2c099cb 2134133 4efedce ea6037b 2134133 ea6037b 2b1e4b7 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e ea6037b 2c099cb 6efc05e 2134133 6efc05e 2c099cb 2134133 2c099cb 2134133 6efc05e 2c099cb 6efc05e 2c099cb 2134133 6efc05e ea6037b 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 5a47f9e ea6037b 2c099cb 2134133 2c099cb 6efc05e ea6037b 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2c099cb 2134133 2c099cb ea6037b 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e ea6037b 2134133 6efc05e ea6037b 2134133 6efc05e 2c099cb 6efc05e 2c099cb 2134133 2c099cb ea6037b 2134133 6efc05e ea6037b 6efc05e ea6037b 2c099cb ea6037b 2c099cb 2134133 6efc05e 2134133 6efc05e 2134133 2c099cb ea6037b 2134133 ea6037b bc928c9 ea6037b 2c099cb 6efc05e 2134133 6efc05e 2c099cb 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 ea6037b 2134133 ea6037b 2c099cb ea6037b 6efc05e 2134133 6efc05e 2134133 2c099cb 6efc05e 2134133 6efc05e 2134133 2c099cb 2134133 6efc05e 2c099cb 2134133 ea6037b 6efc05e ea6037b 2c099cb 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e 2134133 ea6037b 2134133 6efc05e 2134133 6efc05e 2134133 6efc05e ea6037b 2134133 96070b5 2134133 5a47f9e 74b6cd5 6efc05e ea6037b 2134133 ea6037b 74b6cd5 2134133 2c099cb 2134133 2c099cb 2134133 2c099cb 2134133 bc928c9 2134133 4efedce 2c099cb 6efc05e 4efedce ea6037b 5a47f9e ea6037b 2134133 ea6037b 6efc05e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 |
import json
import gradio as gr
import spaces
import wbgtopic
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
from sklearn.cluster import KMeans
import torch
# Set GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Initialize WBGDocTopic
clf = wbgtopic.WBGDocTopic(device=device)
# Download NLTK data if needed
try:
nltk.download('punkt', quiet=True)
nltk.download('vader_lexicon', quiet=True)
except Exception as e:
print(f"NLTK data download error: {e}")
# Sample text for demonstration
SAMPLE_TEXT = """
The three reportedly discussed the Stargate Project, a large-scale AI initiative led by OpenAI, SoftBank, and U.S. software giant Oracle. The project aims to invest $500 billion over the next four years in building new AI infrastructure in the U.S. The U.S. government has shown a strong commitment to the initiative, with President Donald Trump personally announcing it at the White House the day after his inauguration last month. If Samsung participates, the project will lead to a Korea-U.S.-Japan AI alliance.
The AI sector requires massive investments and extensive resources, including advanced models, high-performance AI chips to power the models, and large-scale data centers to operate them. Nvidia and TSMC currently dominate the AI sector, but a partnership between Samsung, SoftBank, and OpenAI could pave the way for a competitive alternative.
"""
def safe_process(func):
"""
A decorator that catches and logs exceptions inside a function,
returning None if an error occurs. This helps ensure that
the Gradio interface does not crash from unexpected exceptions.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print(f"Error in {func.__name__}: {str(e)}")
return None
return wrapper
@safe_process
def parse_wbg_results(raw_output):
"""
Convert the raw output from WBGDocTopic into a list of dictionaries with
'label', 'score_mean', and 'score_std'. Adjust logic according to the
actual structure of raw_output.
"""
if not raw_output:
return []
# Example logic: If raw_output is something like:
# [ { "Innovation and Entrepreneurship": 0.74, "Digital Development": 0.65, ... } ]
# We'll parse it accordingly.
first_item = raw_output[0]
# If the first item is already a dict with a 'label' key, it might already be in the right format
if isinstance(first_item, dict) and "label" in first_item:
return raw_output
# If it's a dict containing topic -> score
if isinstance(first_item, dict):
parsed_list = []
for label, val in first_item.items():
parsed_list.append({
"label": label,
"score_mean": float(val),
"score_std": 0.0 # If std is not provided, default to 0
})
return parsed_list
return []
@safe_process
def analyze_text_sections(text):
"""
Splits the text into sections and calls clf.suggest_topics for each section.
Returns a list of topic lists, where each element is the parsed WBG result
for that section.
"""
sentences = sent_tokenize(text)
# Example: group every 3 sentences into one section
sections = [' '.join(sentences[i:i+3]) for i in range(0, len(sentences), 3)]
section_topics = []
for section in sections:
raw_sec = clf.suggest_topics(section)
parsed_sec = parse_wbg_results(raw_sec)
section_topics.append(parsed_sec)
return section_topics
@safe_process
def calculate_topic_correlations(topic_dicts):
"""
Calculates correlation between topics based on 'score_mean'.
This is usually a single-dimensional correlation across different topics,
which can be conceptually limited, but shown here as an example.
Returns (corr_matrix, labels).
"""
if len(topic_dicts) < 2:
return np.array([[1.0]]), ["Insufficient topics"]
labels = [d['label'] for d in topic_dicts]
scores = [d['score_mean'] for d in topic_dicts]
if len(scores) < 2:
return np.array([[1.0]]), ["Insufficient topics"]
corr_matrix = np.corrcoef(scores)
return corr_matrix, labels
@safe_process
def perform_sentiment_analysis(text):
"""
Uses NLTK's VADER sentiment analyzer to produce sentiment scores
(neg, neu, pos, compound) for each sentence in the text.
Returns a pandas DataFrame of results.
"""
sia = SentimentIntensityAnalyzer()
sents = sent_tokenize(text)
results = [sia.polarity_scores(s) for s in sents]
return pd.DataFrame(results)
@safe_process
def create_topic_clusters(topic_dicts):
"""
Applies a KMeans clustering on (score_mean, score_std).
If there are fewer than 3 topics, returns trivial cluster assignments.
"""
if len(topic_dicts) < 3:
return [0] * len(topic_dicts)
X = []
for t in topic_dicts:
X.append([t['score_mean'], t.get('score_std', 0.0)])
X = np.array(X)
if X.shape[0] < 3:
return [0] * X.shape[0]
kmeans = KMeans(n_clusters=min(3, X.shape[0]), random_state=42)
clusters = kmeans.fit_predict(X)
return clusters.tolist()
@safe_process
def create_main_charts(topic_dicts):
"""
Creates a bar chart and a radar chart for the given list of topics.
Uses 'score_mean' as the base score.
"""
if not topic_dicts:
return go.Figure(), go.Figure()
labels = [t['label'] for t in topic_dicts]
scores = [t['score_mean'] * 100 for t in topic_dicts] # scale to %
# Bar chart
bar_fig = go.Figure(
data=[go.Bar(x=labels, y=scores, marker_color='rgb(55, 83, 109)')]
)
bar_fig.update_layout(
title='Topic Analysis Results',
xaxis_title='Topics',
yaxis_title='Relevance (%)',
template='plotly_white',
height=500,
)
# Radar chart
radar_fig = go.Figure()
radar_fig.add_trace(go.Scatterpolar(
r=scores,
theta=labels,
fill='toself',
name='Topic Distribution'
))
radar_fig.update_layout(
title='Topic Radar Chart',
template='plotly_white',
height=500,
polar=dict(radialaxis=dict(visible=True)),
showlegend=False
)
return bar_fig, radar_fig
@safe_process
def create_correlation_heatmap(corr_matrix, labels):
"""
Creates a heatmap figure of the provided correlation matrix.
If there's insufficient data, shows a placeholder message.
"""
if corr_matrix.ndim == 0:
# It's a scalar => shape ()
corr_matrix = np.array([[corr_matrix]])
if corr_matrix.shape == (1, 1):
# Not enough data for correlation
fig = go.Figure()
fig.add_annotation(text="Not enough topics for correlation", showarrow=False)
return fig
fig = go.Figure(data=go.Heatmap(
z=corr_matrix,
x=labels,
y=labels,
colorscale='Viridis'
))
fig.update_layout(
title='Topic Correlation Heatmap',
height=500,
template='plotly_white'
)
return fig
@safe_process
def create_topic_evolution(section_topics):
"""
Plots topic evolution across sections.
section_topics: list of lists, where each inner list
is a list of dicts [{'label':..., 'score_mean':...}, ...]
"""
fig = go.Figure()
if not section_topics or len(section_topics) == 0:
return fig
if not section_topics[0]:
return fig
# For each topic in the first section, track the score across all sections
for topic_dict in section_topics[0]:
label = topic_dict['label']
score_list = []
for sec_list in section_topics:
match = next((d for d in sec_list if d['label'] == label), None)
if match:
score_list.append(match['score_mean'])
else:
score_list.append(0.0)
fig.add_trace(go.Scatter(
x=list(range(len(section_topics))),
y=score_list,
name=label,
mode='lines+markers'
))
fig.update_layout(
title='Topic Evolution Across Sections',
xaxis_title='Section',
yaxis_title='Score Mean',
height=500,
template='plotly_white'
)
return fig
@safe_process
def create_confidence_gauge(topic_dicts):
"""
Creates individual gauge indicators for each topic's confidence.
A simple heuristic: confidence = (1 - score_std) * 100.
"""
if not topic_dicts:
return go.Figure()
fig = go.Figure()
num_topics = len(topic_dicts)
for i, t in enumerate(topic_dicts):
# If score_std not present, default to 0 => confidence = 100%
conf_val = 100.0 * (1.0 - t.get("score_std", 0.0))
fig.add_trace(go.Indicator(
mode="gauge+number",
value=conf_val,
title={'text': t['label']},
domain={'row': 0, 'column': i}
))
fig.update_layout(
grid={'rows': 1, 'columns': num_topics},
height=400,
template='plotly_white'
)
return fig
@spaces.GPU()
def process_all_analysis(text):
"""
Main function that calls all analysis steps and returns
structured JSON plus various Plotly figures.
"""
try:
# 1) Suggest topics for the entire text
raw_results = clf.suggest_topics(text)
all_topics = parse_wbg_results(raw_results)
# 2) Sort by 'score_mean' descending to get top 5
sorted_topics = sorted(all_topics, key=lambda x: x['score_mean'], reverse=True)
top_topics = sorted_topics[:5]
# 3) Analyze by sections
section_topics = analyze_text_sections(text)
# 4) Extra analyses
corr_matrix, corr_labels = calculate_topic_correlations(all_topics)
sentiments_df = perform_sentiment_analysis(text)
clusters = create_topic_clusters(all_topics)
# 5) Build charts
bar_chart, radar_chart = create_main_charts(top_topics)
heatmap = create_correlation_heatmap(corr_matrix, corr_labels)
evolution_chart = create_topic_evolution(section_topics)
gauge_chart = create_confidence_gauge(top_topics)
# 6) Prepare JSON output (ensure valid JSON with string keys)
results = {
"top_topics": top_topics, # list of dict
"clusters": clusters, # list of ints
"sentiments": sentiments_df.to_dict(orient="records")
}
# Return JSON + Figures
return (
results, # JSON output
bar_chart, # plot1
radar_chart, # plot2
heatmap, # plot3
evolution_chart,# plot4
gauge_chart, # plot5
go.Figure() # plot6 (placeholder for sentiment plot, if desired)
)
except Exception as e:
print(f"Analysis error: {str(e)}")
empty_fig = go.Figure()
return (
{"error": str(e), "topics": []},
empty_fig,
empty_fig,
empty_fig,
empty_fig,
empty_fig,
empty_fig
)
######################################################
# Gradio UI Definition #
######################################################
with gr.Blocks(title="Advanced Document Topic Analyzer") as demo:
gr.Markdown("## 📝 Advanced Document Topic Analyzer")
gr.Markdown(
"Enter text, then click 'Start Analysis' to see topic analysis, correlation, "
"confidence gauges, sentiment, and more."
)
with gr.Row():
text_input = gr.Textbox(
value=SAMPLE_TEXT,
label="Text to Analyze",
lines=8
)
with gr.Row():
submit_btn = gr.Button("Start Analysis", variant="primary")
with gr.Tabs():
with gr.TabItem("Main Analysis"):
with gr.Row():
plot1 = gr.Plot(label="Topic Distribution")
plot2 = gr.Plot(label="Radar Chart")
with gr.TabItem("Detailed Analysis"):
with gr.Row():
plot3 = gr.Plot(label="Correlation Heatmap")
plot4 = gr.Plot(label="Topic Evolution")
with gr.TabItem("Confidence Analysis"):
plot5 = gr.Plot(label="Confidence Gauge")
with gr.TabItem("Sentiment Analysis"):
plot6 = gr.Plot(label="Sentiment Results")
with gr.Row():
output_json = gr.JSON(label="Detailed Analysis Output")
submit_btn.click(
fn=process_all_analysis,
inputs=[text_input],
outputs=[output_json, plot1, plot2, plot3, plot4, plot5, plot6]
)
if __name__ == "__main__":
demo.queue(max_size=1)
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set True if you want a public share link
debug=True
)
|