import json import gradio as gr import spaces import wbgtopic import plotly.graph_objects as go import plotly.express as px import numpy as np import pandas as pd import nltk from nltk.tokenize import sent_tokenize, word_tokenize from nltk.sentiment import SentimentIntensityAnalyzer from sklearn.cluster import KMeans import torch # Set up GPU if available device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Initialize WBGDocTopic clf = wbgtopic.WBGDocTopic(device=device) # Download NLTK data if necessary try: nltk.download('punkt', quiet=True) nltk.download('vader_lexicon', quiet=True) except Exception as e: print(f"NLTK data download error: {e}") # Sample Text SAMPLE_TEXT = """ The three reportedly discussed the Stargate Project, a large-scale AI initiative led by OpenAI, SoftBank, and U.S. software giant Oracle. The project aims to invest $500 billion over the next four years in building new AI infrastructure in the U.S. The U.S. government has shown a strong commitment to the initiative, with President Donald Trump personally announcing it at the White House the day after his inauguration last month. If Samsung participates, the project will lead to a Korea-U.S.-Japan AI alliance. The AI sector requires massive investments and extensive resources, including advanced models, high-performance AI chips to power the models, and large-scale data centers to operate them. Nvidia and TSMC currently dominate the AI sector, but a partnership between Samsung, SoftBank, and OpenAI could pave the way for a competitive alternative. """ def safe_process(func): """ Decorator to log exceptions and return None to prevent Gradio interface crashes. """ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: print(f"Error in {func.__name__}: {str(e)}") return None return wrapper @safe_process def parse_wbg_results(raw_output): """ Standardize the output of wbgtopic.WBGDocTopic's suggest_topics() into a list of dictionaries with keys: 'label', 'score_mean', and 'score_std'. Example return structure: [ { "label": "Agriculture", "score_mean": 0.32, "score_std": 0.05 }, ... ] """ print(">>> DEBUG: raw_output =", raw_output) # If raw_output is a list of lists, flatten it. if isinstance(raw_output, list) and len(raw_output) > 0 and isinstance(raw_output[0], list): raw_output = raw_output[0] # If raw_output is a dict (instead of a list), convert it to the expected list format. if isinstance(raw_output, dict): parsed_list = [] for k, v in raw_output.items(): parsed_list.append({ "label": k, "score_mean": float(v) if v is not None else 0.0, "score_std": 0.0 }) return parsed_list # If the result is empty, return an empty list. if not raw_output: return [] # Assume raw_output is a list; get the first item. first_item = raw_output[0] # Case 1: Already in the form of a dictionary with a 'label' key. if isinstance(first_item, dict) and ("label" in first_item): parsed_list = [] for item in raw_output: label = item.get("label", "") score_mean = item.get("score_mean", None) score_std = item.get("score_std", None) # If only 'score' exists, use it as score_mean. if score_mean is None and "score" in item: score_mean = float(item["score"]) if score_mean is None: score_mean = 0.0 if score_std is None: score_std = 0.0 parsed_list.append({ "label": label, "score_mean": float(score_mean), "score_std": float(score_std) }) return parsed_list # Case 2: Dictionary with topic names as keys and scores as values. if isinstance(first_item, dict): merged = {} for d in raw_output: for k, v in d.items(): merged[k] = v # Overwrite duplicates with the last occurrence. parsed_list = [] for label, val in merged.items(): parsed_list.append({ "label": label, "score_mean": float(val), "score_std": 0.0 }) return parsed_list # If the structure is unexpected, return an empty list. return [] @safe_process def analyze_text_sections(text): """ Split the text into multiple sections (e.g., every 3 sentences) and analyze topics for each section using suggest_topics(). Returns a list of topic lists for each section. """ sentences = sent_tokenize(text) # Group every 3 sentences into one section. sections = [' '.join(sentences[i:i+3]) for i in range(0, len(sentences), 3)] section_topics = [] for section in sections: raw_sec = clf.suggest_topics(section) parsed_sec = parse_wbg_results(raw_sec) section_topics.append(parsed_sec) return section_topics @safe_process def calculate_topic_correlations(topic_dicts): """ Calculate correlation among topic score_means from a list of topic dictionaries. Note: Ideally, correlations should be calculated across different documents, but here we use topics from a single text. """ if len(topic_dicts) < 2: return np.array([[1.0]]), ["Insufficient topics"] labels = [d['label'] for d in topic_dicts] scores = [d['score_mean'] for d in topic_dicts] if len(scores) < 2: return np.array([[1.0]]), ["Insufficient topics"] corr_matrix = np.corrcoef(scores) return corr_matrix, labels @safe_process def perform_sentiment_analysis(text): """ Perform sentiment analysis on each sentence using NLTK's VADER. Returns a pandas DataFrame of sentiment scores. """ sia = SentimentIntensityAnalyzer() sents = sent_tokenize(text) results = [sia.polarity_scores(s) for s in sents] return pd.DataFrame(results) @safe_process def create_topic_clusters(topic_dicts): """ Perform KMeans clustering on topics based on score_mean and score_std. If there are fewer than 3 topics, assign all to cluster 0. """ if len(topic_dicts) < 3: return [0] * len(topic_dicts) X = [] for t in topic_dicts: X.append([t['score_mean'], t.get('score_std', 0.0)]) X = np.array(X) if X.shape[0] < 3: return [0] * X.shape[0] kmeans = KMeans(n_clusters=min(3, X.shape[0]), random_state=42) clusters = kmeans.fit_predict(X) return clusters.tolist() @safe_process def create_main_charts(topic_dicts): """ Generate a bar chart and radar chart. 'score_mean' is assumed to be in the range 0-1 and is converted to a percentage. """ if not topic_dicts: return go.Figure(), go.Figure() labels = [t['label'] for t in topic_dicts] scores = [t['score_mean'] * 100 for t in topic_dicts] # Bar chart bar_fig = go.Figure( data=[go.Bar(x=labels, y=scores, marker_color='rgb(55, 83, 109)')] ) bar_fig.update_layout( title='Topic Analysis Results', xaxis_title='Topics', yaxis_title='Relevance (%)', template='plotly_white', height=500, ) # Radar chart radar_fig = go.Figure() radar_fig.add_trace(go.Scatterpolar( r=scores, theta=labels, fill='toself', name='Topic Distribution' )) radar_fig.update_layout( title='Topic Radar Chart', template='plotly_white', height=500, polar=dict(radialaxis=dict(visible=True)), showlegend=False ) return bar_fig, radar_fig @safe_process def create_correlation_heatmap(corr_matrix, labels): """ Visualize the correlation matrix as a heatmap. If data is insufficient, display a message. """ if corr_matrix.ndim == 0: corr_matrix = np.array([[corr_matrix]]) if corr_matrix.shape == (1, 1): fig = go.Figure() fig.add_annotation(text="Not enough topics for correlation", showarrow=False) return fig fig = go.Figure(data=go.Heatmap( z=corr_matrix, x=labels, y=labels, colorscale='Viridis' )) fig.update_layout( title='Topic Correlation Heatmap', height=500, template='plotly_white' ) return fig @safe_process def create_topic_evolution(section_topics): """ Create a line chart showing topic score evolution across different sections. section_topics: List of lists containing topic dictionaries for each section. """ fig = go.Figure() if not section_topics or len(section_topics) == 0: return fig if not section_topics[0]: return fig # Use topics from the first section as reference. for topic_dict in section_topics[0]: label = topic_dict['label'] score_list = [] for sec_list in section_topics: match = next((d for d in sec_list if d['label'] == label), None) if match: score_list.append(match['score_mean']) else: score_list.append(0.0) fig.add_trace(go.Scatter( x=list(range(len(section_topics))), y=score_list, name=label, mode='lines+markers' )) fig.update_layout( title='Section-wise Topic Evolution', xaxis_title='Section', yaxis_title='Score Mean', height=500, template='plotly_white' ) return fig @safe_process def create_confidence_gauge(topic_dicts): """ Display each topic's confidence as a gauge. Confidence is calculated using a simple formula: (1 - score_std) * 100. """ if not topic_dicts: return go.Figure() fig = go.Figure() num_topics = len(topic_dicts) for i, t in enumerate(topic_dicts): conf_val = 100.0 * (1.0 - t.get("score_std", 0.0)) fig.add_trace(go.Indicator( mode="gauge+number", value=conf_val, title={'text': t['label']}, domain={'row': 0, 'column': i} )) fig.update_layout( grid={'rows': 1, 'columns': num_topics}, height=400, template='plotly_white' ) return fig @spaces.GPU() def process_all_analysis(text): """ Perform comprehensive analysis on the input text, including topic analysis, section analysis, correlation, sentiment analysis, clustering, and generate corresponding JSON results and Plotly charts. """ try: # 1) Analyze topics for the entire text. raw_results = clf.suggest_topics(text) all_topics = parse_wbg_results(raw_results) # 2) Sort topics by score_mean in descending order and take the top 5. sorted_topics = sorted(all_topics, key=lambda x: x['score_mean'], reverse=True) top_topics = sorted_topics[:5] # 3) Analyze topics by sections. section_topics = analyze_text_sections(text) # 4) Additional analyses (correlation, sentiment, clustering). corr_matrix, corr_labels = calculate_topic_correlations(all_topics) sentiments_df = perform_sentiment_analysis(text) clusters = create_topic_clusters(all_topics) # 5) Generate charts. bar_chart, radar_chart = create_main_charts(top_topics) heatmap = create_correlation_heatmap(corr_matrix, corr_labels) evolution_chart = create_topic_evolution(section_topics) gauge_chart = create_confidence_gauge(top_topics) # 6) Return results as JSON and charts. results = { "top_topics": top_topics, # Top 5 topics. "clusters": clusters, # Cluster results. "sentiments": sentiments_df.to_dict(orient="records") # Sentiment analysis results. } return ( results, # JSON output. bar_chart, # Plot 1: Topic Distribution (Bar Chart). radar_chart, # Plot 2: Radar Chart. heatmap, # Plot 3: Correlation Heatmap. evolution_chart, # Plot 4: Section Topic Evolution. gauge_chart, # Plot 5: Confidence Gauge. go.Figure() # Plot 6: (Placeholder for additional sentiment analysis chart). ) except Exception as e: print(f"Analysis error: {str(e)}") empty_fig = go.Figure() return ( {"error": str(e), "topics": []}, empty_fig, empty_fig, empty_fig, empty_fig, empty_fig, empty_fig ) ###################################################### # Gradio UI Definition # ###################################################### with gr.Blocks(title="Advanced Document Topic Analyzer") as demo: gr.Markdown("## Advanced Document Topic Analyzer") gr.Markdown( "Enter the text below and click **Start Analysis**. " "The tool will analyze key topics, correlations, confidence gauges, sentiment analysis, and more." ) with gr.Row(): text_input = gr.Textbox( value=SAMPLE_TEXT, label="Enter Text for Analysis", lines=8 ) with gr.Row(): submit_btn = gr.Button("Start Analysis", variant="primary") with gr.Tabs(): with gr.TabItem("Main Analysis"): with gr.Row(): plot1 = gr.Plot(label="Topic Distribution (Bar Chart)") plot2 = gr.Plot(label="Radar Chart") with gr.TabItem("Detailed Analysis"): with gr.Row(): plot3 = gr.Plot(label="Correlation Heatmap") plot4 = gr.Plot(label="Section Topic Evolution") with gr.TabItem("Confidence Analysis"): plot5 = gr.Plot(label="Confidence Gauge") with gr.TabItem("Sentiment Analysis"): plot6 = gr.Plot(label="Sentiment Analysis Result") with gr.Row(): output_json = gr.JSON(label="Detailed Analysis Result (JSON)") submit_btn.click( fn=process_all_analysis, inputs=[text_input], outputs=[output_json, plot1, plot2, plot3, plot4, plot5, plot6] ) if __name__ == "__main__": demo.queue(max_size=1) demo.launch( server_name="0.0.0.0", server_port=7860, share=False, # To create a public link, set share=True. debug=True )