Spaces:
Running
on
Zero
Running
on
Zero
import json | |
import gradio as gr | |
import spaces | |
import wbgtopic | |
import plotly.graph_objects as go | |
import plotly.express as px | |
import numpy as np | |
import pandas as pd | |
import nltk | |
from nltk.tokenize import sent_tokenize, word_tokenize | |
from nltk.sentiment import SentimentIntensityAnalyzer | |
from sklearn.cluster import KMeans | |
import torch | |
# Set GPU if available | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# Initialize WBGDocTopic | |
clf = wbgtopic.WBGDocTopic(device=device) | |
# Download NLTK data if needed | |
try: | |
nltk.download('punkt', quiet=True) | |
nltk.download('vader_lexicon', quiet=True) | |
except Exception as e: | |
print(f"NLTK data download error: {e}") | |
# Sample text for demonstration | |
SAMPLE_TEXT = """ | |
The three reportedly discussed the Stargate Project, a large-scale AI initiative led by OpenAI, SoftBank, and U.S. software giant Oracle. The project aims to invest $500 billion over the next four years in building new AI infrastructure in the U.S. The U.S. government has shown a strong commitment to the initiative, with President Donald Trump personally announcing it at the White House the day after his inauguration last month. If Samsung participates, the project will lead to a Korea-U.S.-Japan AI alliance. | |
The AI sector requires massive investments and extensive resources, including advanced models, high-performance AI chips to power the models, and large-scale data centers to operate them. Nvidia and TSMC currently dominate the AI sector, but a partnership between Samsung, SoftBank, and OpenAI could pave the way for a competitive alternative. | |
""" | |
def safe_process(func): | |
""" | |
A decorator that catches and logs exceptions inside a function, | |
returning None if an error occurs. This helps ensure that | |
the Gradio interface does not crash from unexpected exceptions. | |
""" | |
def wrapper(*args, **kwargs): | |
try: | |
return func(*args, **kwargs) | |
except Exception as e: | |
print(f"Error in {func.__name__}: {str(e)}") | |
return None | |
return wrapper | |
def parse_wbg_results(raw_output): | |
""" | |
Convert the raw output from WBGDocTopic into a list of dictionaries with | |
'label', 'score_mean', and 'score_std'. Adjust logic according to the | |
actual structure of raw_output. | |
""" | |
if not raw_output: | |
return [] | |
# Example logic: If raw_output is something like: | |
# [ { "Innovation and Entrepreneurship": 0.74, "Digital Development": 0.65, ... } ] | |
# We'll parse it accordingly. | |
first_item = raw_output[0] | |
# If the first item is already a dict with a 'label' key, it might already be in the right format | |
if isinstance(first_item, dict) and "label" in first_item: | |
return raw_output | |
# If it's a dict containing topic -> score | |
if isinstance(first_item, dict): | |
parsed_list = [] | |
for label, val in first_item.items(): | |
parsed_list.append({ | |
"label": label, | |
"score_mean": float(val), | |
"score_std": 0.0 # If std is not provided, default to 0 | |
}) | |
return parsed_list | |
return [] | |
def analyze_text_sections(text): | |
""" | |
Splits the text into sections and calls clf.suggest_topics for each section. | |
Returns a list of topic lists, where each element is the parsed WBG result | |
for that section. | |
""" | |
sentences = sent_tokenize(text) | |
# Example: group every 3 sentences into one section | |
sections = [' '.join(sentences[i:i+3]) for i in range(0, len(sentences), 3)] | |
section_topics = [] | |
for section in sections: | |
raw_sec = clf.suggest_topics(section) | |
parsed_sec = parse_wbg_results(raw_sec) | |
section_topics.append(parsed_sec) | |
return section_topics | |
def calculate_topic_correlations(topic_dicts): | |
""" | |
Calculates correlation between topics based on 'score_mean'. | |
This is usually a single-dimensional correlation across different topics, | |
which can be conceptually limited, but shown here as an example. | |
Returns (corr_matrix, labels). | |
""" | |
if len(topic_dicts) < 2: | |
return np.array([[1.0]]), ["Insufficient topics"] | |
labels = [d['label'] for d in topic_dicts] | |
scores = [d['score_mean'] for d in topic_dicts] | |
if len(scores) < 2: | |
return np.array([[1.0]]), ["Insufficient topics"] | |
corr_matrix = np.corrcoef(scores) | |
return corr_matrix, labels | |
def perform_sentiment_analysis(text): | |
""" | |
Uses NLTK's VADER sentiment analyzer to produce sentiment scores | |
(neg, neu, pos, compound) for each sentence in the text. | |
Returns a pandas DataFrame of results. | |
""" | |
sia = SentimentIntensityAnalyzer() | |
sents = sent_tokenize(text) | |
results = [sia.polarity_scores(s) for s in sents] | |
return pd.DataFrame(results) | |
def create_topic_clusters(topic_dicts): | |
""" | |
Applies a KMeans clustering on (score_mean, score_std). | |
If there are fewer than 3 topics, returns trivial cluster assignments. | |
""" | |
if len(topic_dicts) < 3: | |
return [0] * len(topic_dicts) | |
X = [] | |
for t in topic_dicts: | |
X.append([t['score_mean'], t.get('score_std', 0.0)]) | |
X = np.array(X) | |
if X.shape[0] < 3: | |
return [0] * X.shape[0] | |
kmeans = KMeans(n_clusters=min(3, X.shape[0]), random_state=42) | |
clusters = kmeans.fit_predict(X) | |
return clusters.tolist() | |
def create_main_charts(topic_dicts): | |
""" | |
Creates a bar chart and a radar chart for the given list of topics. | |
Uses 'score_mean' as the base score. | |
""" | |
if not topic_dicts: | |
return go.Figure(), go.Figure() | |
labels = [t['label'] for t in topic_dicts] | |
scores = [t['score_mean'] * 100 for t in topic_dicts] # scale to % | |
# Bar chart | |
bar_fig = go.Figure( | |
data=[go.Bar(x=labels, y=scores, marker_color='rgb(55, 83, 109)')] | |
) | |
bar_fig.update_layout( | |
title='Topic Analysis Results', | |
xaxis_title='Topics', | |
yaxis_title='Relevance (%)', | |
template='plotly_white', | |
height=500, | |
) | |
# Radar chart | |
radar_fig = go.Figure() | |
radar_fig.add_trace(go.Scatterpolar( | |
r=scores, | |
theta=labels, | |
fill='toself', | |
name='Topic Distribution' | |
)) | |
radar_fig.update_layout( | |
title='Topic Radar Chart', | |
template='plotly_white', | |
height=500, | |
polar=dict(radialaxis=dict(visible=True)), | |
showlegend=False | |
) | |
return bar_fig, radar_fig | |
def create_correlation_heatmap(corr_matrix, labels): | |
""" | |
Creates a heatmap figure of the provided correlation matrix. | |
If there's insufficient data, shows a placeholder message. | |
""" | |
if corr_matrix.ndim == 0: | |
# It's a scalar => shape () | |
corr_matrix = np.array([[corr_matrix]]) | |
if corr_matrix.shape == (1, 1): | |
# Not enough data for correlation | |
fig = go.Figure() | |
fig.add_annotation(text="Not enough topics for correlation", showarrow=False) | |
return fig | |
fig = go.Figure(data=go.Heatmap( | |
z=corr_matrix, | |
x=labels, | |
y=labels, | |
colorscale='Viridis' | |
)) | |
fig.update_layout( | |
title='Topic Correlation Heatmap', | |
height=500, | |
template='plotly_white' | |
) | |
return fig | |
def create_topic_evolution(section_topics): | |
""" | |
Plots topic evolution across sections. | |
section_topics: list of lists, where each inner list | |
is a list of dicts [{'label':..., 'score_mean':...}, ...] | |
""" | |
fig = go.Figure() | |
if not section_topics or len(section_topics) == 0: | |
return fig | |
if not section_topics[0]: | |
return fig | |
# For each topic in the first section, track the score across all sections | |
for topic_dict in section_topics[0]: | |
label = topic_dict['label'] | |
score_list = [] | |
for sec_list in section_topics: | |
match = next((d for d in sec_list if d['label'] == label), None) | |
if match: | |
score_list.append(match['score_mean']) | |
else: | |
score_list.append(0.0) | |
fig.add_trace(go.Scatter( | |
x=list(range(len(section_topics))), | |
y=score_list, | |
name=label, | |
mode='lines+markers' | |
)) | |
fig.update_layout( | |
title='Topic Evolution Across Sections', | |
xaxis_title='Section', | |
yaxis_title='Score Mean', | |
height=500, | |
template='plotly_white' | |
) | |
return fig | |
def create_confidence_gauge(topic_dicts): | |
""" | |
Creates individual gauge indicators for each topic's confidence. | |
A simple heuristic: confidence = (1 - score_std) * 100. | |
""" | |
if not topic_dicts: | |
return go.Figure() | |
fig = go.Figure() | |
num_topics = len(topic_dicts) | |
for i, t in enumerate(topic_dicts): | |
# If score_std not present, default to 0 => confidence = 100% | |
conf_val = 100.0 * (1.0 - t.get("score_std", 0.0)) | |
fig.add_trace(go.Indicator( | |
mode="gauge+number", | |
value=conf_val, | |
title={'text': t['label']}, | |
domain={'row': 0, 'column': i} | |
)) | |
fig.update_layout( | |
grid={'rows': 1, 'columns': num_topics}, | |
height=400, | |
template='plotly_white' | |
) | |
return fig | |
def process_all_analysis(text): | |
""" | |
Main function that calls all analysis steps and returns | |
structured JSON plus various Plotly figures. | |
""" | |
try: | |
# 1) Suggest topics for the entire text | |
raw_results = clf.suggest_topics(text) | |
all_topics = parse_wbg_results(raw_results) | |
# 2) Sort by 'score_mean' descending to get top 5 | |
sorted_topics = sorted(all_topics, key=lambda x: x['score_mean'], reverse=True) | |
top_topics = sorted_topics[:5] | |
# 3) Analyze by sections | |
section_topics = analyze_text_sections(text) | |
# 4) Extra analyses | |
corr_matrix, corr_labels = calculate_topic_correlations(all_topics) | |
sentiments_df = perform_sentiment_analysis(text) | |
clusters = create_topic_clusters(all_topics) | |
# 5) Build charts | |
bar_chart, radar_chart = create_main_charts(top_topics) | |
heatmap = create_correlation_heatmap(corr_matrix, corr_labels) | |
evolution_chart = create_topic_evolution(section_topics) | |
gauge_chart = create_confidence_gauge(top_topics) | |
# 6) Prepare JSON output (ensure valid JSON with string keys) | |
results = { | |
"top_topics": top_topics, # list of dict | |
"clusters": clusters, # list of ints | |
"sentiments": sentiments_df.to_dict(orient="records") | |
} | |
# Return JSON + Figures | |
return ( | |
results, # JSON output | |
bar_chart, # plot1 | |
radar_chart, # plot2 | |
heatmap, # plot3 | |
evolution_chart,# plot4 | |
gauge_chart, # plot5 | |
go.Figure() # plot6 (placeholder for sentiment plot, if desired) | |
) | |
except Exception as e: | |
print(f"Analysis error: {str(e)}") | |
empty_fig = go.Figure() | |
return ( | |
{"error": str(e), "topics": []}, | |
empty_fig, | |
empty_fig, | |
empty_fig, | |
empty_fig, | |
empty_fig, | |
empty_fig | |
) | |
###################################################### | |
# Gradio UI Definition # | |
###################################################### | |
with gr.Blocks(title="Advanced Document Topic Analyzer") as demo: | |
gr.Markdown("## 📝 Advanced Document Topic Analyzer") | |
gr.Markdown( | |
"Enter text, then click 'Start Analysis' to see topic analysis, correlation, " | |
"confidence gauges, sentiment, and more." | |
) | |
with gr.Row(): | |
text_input = gr.Textbox( | |
value=SAMPLE_TEXT, | |
label="Text to Analyze", | |
lines=8 | |
) | |
with gr.Row(): | |
submit_btn = gr.Button("Start Analysis", variant="primary") | |
with gr.Tabs(): | |
with gr.TabItem("Main Analysis"): | |
with gr.Row(): | |
plot1 = gr.Plot(label="Topic Distribution") | |
plot2 = gr.Plot(label="Radar Chart") | |
with gr.TabItem("Detailed Analysis"): | |
with gr.Row(): | |
plot3 = gr.Plot(label="Correlation Heatmap") | |
plot4 = gr.Plot(label="Topic Evolution") | |
with gr.TabItem("Confidence Analysis"): | |
plot5 = gr.Plot(label="Confidence Gauge") | |
with gr.TabItem("Sentiment Analysis"): | |
plot6 = gr.Plot(label="Sentiment Results") | |
with gr.Row(): | |
output_json = gr.JSON(label="Detailed Analysis Output") | |
submit_btn.click( | |
fn=process_all_analysis, | |
inputs=[text_input], | |
outputs=[output_json, plot1, plot2, plot3, plot4, plot5, plot6] | |
) | |
if __name__ == "__main__": | |
demo.queue(max_size=1) | |
demo.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=False, # Set True if you want a public share link | |
debug=True | |
) | |