Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from sklearn.metrics.pairwise import cosine_similarity | |
from sklearn.cluster import KMeans | |
import numpy as np | |
import gradio as gr | |
import matplotlib | |
matplotlib.use('Agg') # Use a non-interactive backend for Matplotlib | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
import io | |
import base64 | |
import time | |
# --- Model and Tokenizer Setup --- | |
DEFAULT_MODEL_NAME = "EleutherAI/gpt-neo-1.3B" | |
FALLBACK_MODEL_NAME = "gpt2" | |
model_loaded_successfully = False | |
tokenizer = None | |
model = None | |
device = None | |
MODEL_CONTEXT_WINDOW = 1024 | |
def load_model_and_tokenizer(): | |
global tokenizer, model, device, MODEL_CONTEXT_WINDOW, model_loaded_successfully | |
# This function will run once when the script starts. | |
# Subsequent calls to the Gradio function will use these global variables. | |
if model_loaded_successfully: # Avoid reloading if already done | |
return | |
try: | |
print(f"Attempting to load model: {DEFAULT_MODEL_NAME}") | |
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL_NAME) | |
print(f"Successfully loaded model: {DEFAULT_MODEL_NAME}") | |
except OSError as e: | |
print(f"Error loading model {DEFAULT_MODEL_NAME}. Error: {e}") | |
print(f"Falling back to {FALLBACK_MODEL_NAME}.") | |
try: | |
tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL_NAME) | |
print(f"Successfully loaded fallback model: {FALLBACK_MODEL_NAME}") | |
except OSError as e2: | |
print(f"FATAL: Could not load fallback model {FALLBACK_MODEL_NAME}. Error: {e2}") | |
# No gr.Error here as Gradio isn't running yet. | |
# The run_eal_dual_unfolding will check model_loaded_successfully. | |
return # Exit if fallback also fails | |
if model and tokenizer: | |
model.eval() | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
print(f"Using device: {device}") | |
MODEL_CONTEXT_WINDOW = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') and tokenizer.model_max_length is not None else getattr(model.config, 'max_position_embeddings', 1024) | |
print(f"Model context window: {MODEL_CONTEXT_WINDOW} tokens.") | |
if tokenizer.pad_token is None: | |
tokenizer.pad_token = tokenizer.eos_token | |
model.config.pad_token_id = model.config.eos_token_id # Ensure model config is also aware | |
print("Set tokenizer.pad_token and model.config.pad_token_id to eos_token.") | |
model_loaded_successfully = True | |
else: | |
print("Model or tokenizer failed to initialize.") | |
load_model_and_tokenizer() # Load on script start | |
# --- Configuration --- | |
# Reserve space for generation itself and system tokens. | |
# Max input to tokenizer.encode, not final prompt length. | |
PROMPT_TRIM_MAX_TOKENS = min(MODEL_CONTEXT_WINDOW - 300, 1700) | |
MAX_GEN_LENGTH = 100 # Keep generated segments relatively concise for iteration | |
# --- Debug Logging --- | |
debug_log_accumulator = [] | |
def debug(msg): | |
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) | |
full_msg = f"[{timestamp}] {msg}" | |
print(full_msg) | |
debug_log_accumulator.append(full_msg) | |
# --- Core Functions --- | |
def trim_prompt_if_needed(prompt_text, max_tokens_for_trimming=PROMPT_TRIM_MAX_TOKENS): | |
if not model_loaded_successfully: return "[Model not loaded]" | |
# This trims the *content part* of the prompt before instructions are added | |
tokens = tokenizer.encode(prompt_text, add_special_tokens=False) # Encode only the content | |
if len(tokens) > max_tokens_for_trimming: | |
original_length = len(tokens) | |
# Trim from the beginning of the content to keep the most recent part | |
tokens = tokens[-max_tokens_for_trimming:] | |
trimmed_text = tokenizer.decode(tokens) | |
debug(f"[!] Content trimming: Original content {original_length} tokens, " | |
f"trimmed to {len(tokens)} for prompt construction.") | |
return trimmed_text | |
return prompt_text | |
def generate_text_response(constructed_prompt, generation_length=MAX_GEN_LENGTH): | |
if not model_loaded_successfully: return "[Model not loaded, cannot generate]" | |
# The constructed_prompt is the final string sent to the tokenizer | |
debug(f"Attempting to generate response for prompt (approx. {len(constructed_prompt.split())} words):\n'{constructed_prompt[:350].replace(chr(10), ' ')}...'") | |
inputs = tokenizer(constructed_prompt, return_tensors="pt", truncation=False).to(device) # Do not truncate here; max_length handles it | |
input_token_length = inputs.input_ids.size(1) | |
# The max_length for model.generate is the total length (prompt + new tokens) | |
max_length_for_generate = min(input_token_length + generation_length, MODEL_CONTEXT_WINDOW) | |
if max_length_for_generate <= input_token_length: | |
debug(f"[!!!] Warning: Prompt length ({input_token_length}) with desired generation length ({generation_length}) " | |
f"would exceed or meet model context window ({MODEL_CONTEXT_WINDOW}). Attempting to generate fewer tokens or failing. " | |
f"Prompt starts: '{constructed_prompt[:100].replace(chr(10), ' ')}...'") | |
# Try to generate at least a few tokens if there's any space at all | |
generation_length = max(0, MODEL_CONTEXT_WINDOW - input_token_length - 5) # Reserve 5 for safety | |
if generation_length <=0: | |
return "[Prompt filled context window; cannot generate new tokens]" | |
max_length_for_generate = input_token_length + generation_length | |
try: | |
outputs = model.generate( | |
input_ids=inputs.input_ids, | |
attention_mask=inputs.attention_mask, | |
max_length=max_length_for_generate, | |
pad_token_id=tokenizer.pad_token_id, | |
do_sample=True, | |
temperature=0.75, # Slightly more focused | |
top_p=0.9, # Keep some diversity | |
repetition_penalty=1.2, # Discourage direct repetition | |
no_repeat_ngram_size=3, # Avoid simple phrase repetitions | |
) | |
# Decode only the newly generated part | |
generated_tokens = outputs[0][input_token_length:] | |
result_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() | |
debug(f"Generated response text (length {len(result_text.split())} words, {len(generated_tokens)} tokens):\n'{result_text[:350].replace(chr(10), ' ')}...'") | |
return result_text if result_text else "[Empty Response]" | |
except Exception as e: | |
debug(f"[!!!] Error during text generation: {e}\nFinal prompt sent was (approx {input_token_length} tokens): {constructed_prompt[:200].replace(chr(10), ' ')}...") | |
return f"[Generation Error: {str(e)[:100]}]" | |
def calculate_similarity(text_a, text_b): | |
if not model_loaded_successfully: return 0.0 | |
problematic_markers = ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Model not loaded"] | |
# Check if texts are valid strings before stripping | |
text_a_is_valid = text_a and isinstance(text_a, str) and text_a.strip() and not any(marker in text_a for marker in problematic_markers) | |
text_b_is_valid = text_b and isinstance(text_b, str) and text_b.strip() and not any(marker in text_b for marker in problematic_markers) | |
if not text_a_is_valid or not text_b_is_valid: | |
debug(f"Similarity calculation skipped for invalid/empty texts: A_valid={text_a_is_valid}, B_valid={text_b_is_valid} (A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...')") | |
return 0.0 | |
embedding_layer = model.get_input_embeddings() | |
with torch.no_grad(): | |
tokens_a = tokenizer(text_a, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device) | |
tokens_b = tokenizer(text_b, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device) | |
if tokens_a.input_ids.size(1) == 0 or tokens_b.input_ids.size(1) == 0: | |
debug(f"Similarity calculation skipped: tokenization resulted in empty input_ids. A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...'") | |
return 0.0 | |
emb_a = embedding_layer(tokens_a.input_ids).mean(dim=1) | |
emb_b = embedding_layer(tokens_b.input_ids).mean(dim=1) | |
score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0]) | |
debug(f"Similarity A vs B: {score:.4f} (A='{str(text_a)[:30].replace(chr(10), ' ')}...', B='{str(text_b)[:30].replace(chr(10), ' ')}...')") | |
return score | |
def generate_similarity_heatmap(texts_list, custom_labels, title="Semantic Similarity Heatmap"): | |
if not model_loaded_successfully: return "Heatmap generation skipped: Model not loaded." | |
valid_items = [(text, label) for text, label in zip(texts_list, custom_labels) | |
if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])] | |
if len(valid_items) < 2: | |
debug("Not enough valid texts to generate a heatmap.") | |
return "Not enough valid data for heatmap." | |
valid_texts = [item[0] for item in valid_items] | |
valid_labels = [item[1] for item in valid_items] | |
num_valid_texts = len(valid_texts) | |
sim_matrix = np.full((num_valid_texts, num_valid_texts), np.nan) | |
min_sim_val = 1.0 # To find actual min for better color scaling | |
max_sim_val = 0.0 # To find actual max | |
for i in range(num_valid_texts): | |
for j in range(num_valid_texts): | |
if i == j: | |
sim_matrix[i, j] = 1.0 | |
elif np.isnan(sim_matrix[j, i]): | |
sim = calculate_similarity(valid_texts[i], valid_texts[j]) | |
sim_matrix[i, j] = sim | |
sim_matrix[j, i] = sim | |
if sim < min_sim_val: min_sim_val = sim | |
if sim > max_sim_val: max_sim_val = sim | |
else: | |
sim_matrix[i,j] = sim_matrix[j,i] | |
# Adjust vmin for heatmap to show more contrast if all values are high | |
heatmap_vmin = min(0.9, min_sim_val - 0.01) if min_sim_val > 0.8 else 0.7 # Ensure some range, default to 0.7 if values are lower | |
heatmap_vmax = 1.0 | |
try: | |
fig_width = max(8, num_valid_texts * 1.0) # Increased size | |
fig_height = max(7, num_valid_texts * 0.9) | |
fig, ax = plt.subplots(figsize=(fig_width, fig_height)) | |
mask = np.isnan(sim_matrix) | |
sns.heatmap(sim_matrix, annot=True, cmap="plasma", fmt=".2f", ax=ax, | |
xticklabels=valid_labels, yticklabels=valid_labels, annot_kws={"size": 7}, mask=mask, vmin=heatmap_vmin, vmax=heatmap_vmax) | |
ax.set_title(title, fontsize=14, pad=20) | |
plt.xticks(rotation=45, ha="right", fontsize=9) | |
plt.yticks(rotation=0, fontsize=9) | |
plt.tight_layout(pad=2.5) | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png') | |
plt.close(fig) | |
buf.seek(0) | |
img_base64 = base64.b64encode(buf.read()).decode('utf-8') | |
return f"<img src='data:image/png;base64,{img_base64}' alt='{title}' style='max-width:95%; height:auto; border: 1px solid #ccc; margin: 10px auto; display:block; box-shadow: 0 0 10px rgba(0,0,0,0.1);'/>" | |
except Exception as e: | |
debug(f"[!!!] Error generating heatmap: {e}") | |
return f"Error generating heatmap: {str(e)[:200]}" | |
def perform_text_clustering(texts_list, custom_labels, num_clusters=2): | |
if not model_loaded_successfully: return {label: "N/A (Model)" for label in custom_labels} | |
valid_items = [(text, label) for text, label in zip(texts_list, custom_labels) | |
if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])] | |
if len(valid_items) < num_clusters: | |
debug(f"Not enough valid texts ({len(valid_items)}) for {num_clusters}-means clustering.") | |
return {item[1]: f"N/A (Samples<{num_clusters})" for item in valid_items} | {label: "N/A" for label in custom_labels if label not in [item[1] for item in valid_items]} | |
valid_texts = [item[0] for item in valid_items] | |
valid_original_labels = [item[1] for item in valid_items] | |
embedding_layer = model.get_input_embeddings() | |
embeddings_for_clustering = [] | |
with torch.no_grad(): | |
for text_item in valid_texts: | |
# Important: Ensure input_ids are not empty for embedding layer | |
tokens = tokenizer(text_item, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW, padding=True).to(device) # Added padding | |
if tokens.input_ids.size(1) == 0: | |
debug(f"Skipping text for embedding in clustering due to empty tokenization: '{text_item[:30]}...'") | |
continue | |
emb = embedding_layer(tokens.input_ids).mean(dim=1) | |
embeddings_for_clustering.append(emb.cpu().numpy().squeeze()) | |
if not embeddings_for_clustering or len(embeddings_for_clustering) < num_clusters: | |
debug(f"Not enough valid texts were successfully embedded for clustering ({len(embeddings_for_clustering)} found).") | |
return {label: "N/A (Embed Fail)" for label in custom_labels} | |
embeddings_np = np.array(embeddings_for_clustering) | |
# Ensure embeddings are 2D for KMeans | |
if embeddings_np.ndim == 1: | |
if len(embeddings_for_clustering) == 1: # Only one sample | |
embeddings_np = embeddings_np.reshape(1, -1) | |
else: # Should not happen if num_clusters > 1 and len(embeddings_for_clustering) >= num_clusters | |
debug("Embedding array is 1D but multiple samples exist. This is unexpected.") | |
return {label: "N/A (Embed Dim Error)" for label in custom_labels} | |
cluster_results_map = {label: "N/A" for label in custom_labels} | |
try: | |
actual_num_clusters = min(num_clusters, len(embeddings_for_clustering)) | |
if actual_num_clusters < 2: | |
debug(f"Clustering: Adjusted num_clusters to 1 (or less than 2) due to only {len(embeddings_for_clustering)} valid sample(s). Assigning all to Cluster 0.") | |
predicted_labels = [0] * len(embeddings_for_clustering) | |
else: | |
kmeans = KMeans(n_clusters=actual_num_clusters, random_state=42, n_init=10) # Explicit n_init | |
predicted_labels = kmeans.fit_predict(embeddings_np) | |
for i, original_label in enumerate(valid_original_labels): | |
cluster_results_map[original_label] = f"C{predicted_labels[i]}" | |
return cluster_results_map | |
except Exception as e: | |
debug(f"[!!!] Error during clustering: {e}") | |
return {label: f"N/A (Clustering Error)" for label in custom_labels} | |
# --- Main EAL Unfolding Logic --- | |
def run_eal_dual_unfolding(num_iterations, progress=gr.Progress(track_tqdm=True)): | |
if not model_loaded_successfully: | |
error_msg = "CRITICAL: Model not loaded. Please check server logs and restart the Space if necessary." | |
debug(error_msg) | |
gr.Warning(error_msg) | |
return error_msg, error_msg, error_msg, error_msg, "<p style='color:red; text-align:center; font-weight:bold;'>Model not loaded. Cannot run analysis.</p>" | |
I_trace_texts, not_I_trace_texts = [None]*num_iterations, [None]*num_iterations | |
delta_S_I_values, delta_S_not_I_values, delta_S_cross_values = [None]*num_iterations, [None]*num_iterations, [None]*num_iterations | |
debug_log_accumulator.clear() | |
debug("EAL Dual Unfolding Process Started.") | |
# Truly open-ended initial prompt for the system to define itself | |
# The LLM completes this to generate I0. | |
initial_seed_prompt_for_I = "A thinking process begins. The first thought is:" | |
progress(0, desc="Starting EAL Iterations...") | |
for i in range(num_iterations): | |
iteration_log_header = f"\n\n{'='*15} Iteration {i} {'='*15}" | |
debug(iteration_log_header) | |
progress(i / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - I-Trace") | |
# === I-Trace (Self-Coherence/Development) === | |
if i == 0: | |
prompt_for_I_trace = initial_seed_prompt_for_I | |
else: | |
# Basis is the *actual text* of the previous I-trace output | |
basis_for_I_elaboration = I_trace_texts[i-1] | |
if not basis_for_I_elaboration or any(m in basis_for_I_elaboration for m in ["[Empty", "[Generation Error", "[Prompt too long"]): | |
basis_for_I_elaboration = "The previous thought was not clearly formed. Let's try a new line of thought:" | |
debug(f"[!] Using fallback basis for I-Trace at iter {i}.") | |
# Trim the basis content if it's too long before adding instructions | |
trimmed_basis_I = trim_prompt_if_needed(basis_for_I_elaboration, PROMPT_TRIM_MAX_TOKENS - 50) # Reserve 50 tokens for instruction | |
prompt_for_I_trace = f"The thought process previously generated: \"{trimmed_basis_I}\"\n\nTask: Continue this line of thought. What logically follows or develops from this statement?" | |
generated_I_text = generate_text_response(prompt_for_I_trace) | |
I_trace_texts[i] = generated_I_text | |
progress((i + 0.5) / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - ¬I-Trace (Alternative Perspective)") | |
# === ¬I-Trace (Alternative Perspectives / Potential Antithesis) === | |
# ¬I always reacts to the *current* I-trace output for this iteration | |
statement_to_consider_for_not_I = I_trace_texts[i] | |
if not statement_to_consider_for_not_I or any(m in statement_to_consider_for_not_I for m in ["[Empty", "[Generation Error", "[Prompt too long"]): | |
statement_to_consider_for_not_I = "The primary thought was not clearly formed. Consider a general alternative to how systems might evolve." | |
debug(f"[!] Using fallback statement for ¬I-Trace at iter {i}.") | |
# Trim the statement to consider if it's too long before adding instructions | |
trimmed_basis_not_I = trim_prompt_if_needed(statement_to_consider_for_not_I, PROMPT_TRIM_MAX_TOKENS - 70) # Reserve 70 for instruction | |
prompt_for_not_I_trace = f"Consider the statement: \"{trimmed_basis_not_I}\"\n\nTask: Explore alternative perspectives or potential issues related to this statement. What might be a contrasting viewpoint or an overlooked aspect?" | |
generated_not_I_text = generate_text_response(prompt_for_not_I_trace) | |
not_I_trace_texts[i] = generated_not_I_text | |
# === ΔS (Similarity) Calculations === | |
debug(f"--- Calculating Similarities for Iteration {i} ---") | |
if i > 0: | |
delta_S_I_values[i] = calculate_similarity(I_trace_texts[i-1], I_trace_texts[i]) | |
delta_S_not_I_values[i] = calculate_similarity(not_I_trace_texts[i-1], not_I_trace_texts[i]) | |
# For i=0, these intra-trace deltas remain None | |
delta_S_cross_values[i] = calculate_similarity(I_trace_texts[i], not_I_trace_texts[i]) | |
debug(f"--- End of Similarity Calculations for Iteration {i} ---") | |
progress(1, desc="Generating Analysis and Visualizations...") | |
debug("\n\n=== Post-loop Analysis ===") | |
# --- Post-loop Analysis & Output Formatting --- | |
all_generated_texts = I_trace_texts + not_I_trace_texts | |
text_labels_for_analysis = [f"I{k}" for k in range(num_iterations)] + \ | |
[f"¬I{k}" for k in range(num_iterations)] | |
cluster_assignments_map = perform_text_clustering(all_generated_texts, text_labels_for_analysis, num_clusters=2) | |
debug(f"Clustering results: {cluster_assignments_map}") | |
I_out_formatted_lines = [] | |
for k in range(num_iterations): | |
cluster_label_I = cluster_assignments_map.get(f"I{k}", "N/A") | |
I_out_formatted_lines.append(f"**I{k} [{cluster_label_I}]**:\n{I_trace_texts[k]}") | |
I_out_formatted = "\n\n---\n\n".join(I_out_formatted_lines) | |
not_I_out_formatted_lines = [] | |
for k in range(num_iterations): | |
cluster_label_not_I = cluster_assignments_map.get(f"¬I{k}", "N/A") | |
not_I_out_formatted_lines.append(f"**¬I{k} [{cluster_label_not_I}]**:\n{not_I_trace_texts[k]}") | |
not_I_out_formatted = "\n\n---\n\n".join(not_I_out_formatted_lines) | |
delta_S_summary_lines = ["| Iter | ΔS(I_prev↔I_curr) | ΔS(¬I_prev↔¬I_curr) | ΔS_Cross(I_curr↔¬I_curr) |", | |
"|:----:|:-----------------:|:-------------------:|:-------------------------:|"] | |
for k in range(num_iterations): | |
ds_i_str = f"{delta_S_I_values[k]:.4f}" if delta_S_I_values[k] is not None else "N/A (Iter 0)" | |
ds_not_i_str = f"{delta_S_not_I_values[k]:.4f}" if delta_S_not_I_values[k] is not None else "N/A (Iter 0)" | |
ds_cross_str = f"{delta_S_cross_values[k]:.4f}" if delta_S_cross_values[k] is not None else "N/A" | |
delta_S_summary_lines.append(f"| {k:^2} | {ds_i_str:^15} | {ds_not_i_str:^17} | {ds_cross_str:^23} |") | |
delta_S_summary_output = "\n".join(delta_S_summary_lines) | |
debug_log_output = "\n".join(debug_log_accumulator) | |
heatmap_html_output = generate_similarity_heatmap(all_generated_texts, | |
custom_labels=text_labels_for_analysis, | |
title=f"Similarity Matrix (All Texts - {num_iterations} Iterations)") | |
debug("EAL Dual Unfolding Process Completed.") | |
return I_out_formatted, not_I_out_formatted, delta_S_summary_output, debug_log_output, heatmap_html_output | |
# --- Gradio Interface Definition --- | |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan", neutral_hue="slate")) as eal_interface: | |
gr.Markdown("## EAL LLM Emergent Discourse Analyzer") | |
gr.Markdown( | |
"This application explores how a Large Language Model (LLM) develops textual traces when prompted iteratively. It runs two parallel traces:\n" | |
"1. **I-Trace (Coherent Elaboration):** Starting with a neutral seed completed by the LLM, each subsequent step asks the LLM to develop its *own previous statement* from this trace.\n" | |
"2. **¬I-Trace (Alternative Perspectives):** In parallel, this trace asks the LLM to explore alternative perspectives or issues related to the *current statement generated in the I-Trace*.\n\n" | |
"The goal is to observe if stable, coherent, and potentially distinct semantic trajectories emerge, inspired by Entropic Attractor Logic (EAL) concepts of stability and divergence." | |
) | |
with gr.Row(): | |
iterations_slider = gr.Slider(minimum=1, maximum=7, value=3, step=1, # Max 7 for performance | |
label="Number of Iterations", | |
info="Higher numbers significantly increase processing time.") | |
run_button = gr.Button("🚀 Analyze Emergent Traces", variant="primary", scale=0) | |
with gr.Accordion("ℹ️ Interpreting Outputs", open=False): | |
gr.Markdown( | |
"- **I-Trace & ¬I-Trace Texts:** Observe the content. Does the I-Trace show coherent development? Does the ¬I-Trace offer genuinely different angles or does it just paraphrase/agree with the I-Trace statement it's commenting on?\n" | |
"- **ΔS Values (Cosine Similarity):**\n" | |
" - `ΔS(I_prev↔I_curr)`: Similarity between I<sub>k-1</sub> and I<sub>k</sub>. High values (near 1.0) mean the I-Trace is very similar to its previous step (stable, possibly repetitive).\n" | |
" - `ΔS(¬I_prev↔¬I_curr)`: Similarity between ¬I<sub>k-1</sub> and ¬I<sub>k</sub>. High values mean the ¬I-Trace is also internally consistent.\n" | |
" - `ΔS_Cross(I_curr↔¬I_curr)`: Similarity between I<sub>k</sub> and ¬I<sub>k</sub> (at the same iteration). **Low values are interesting here**, as they suggest the ¬I-Trace is semantically distinct from the I-Trace. High values suggest the model struggles to create a true alternative.\n" | |
"- **Clustering [Cx]:** Texts are assigned to one of two clusters (C0 or C1). Ideally, I-Trace texts would fall into one cluster and ¬I-Trace texts into another if they are semantically distinct.\n" | |
"- **Heatmap:** Visualizes all pair-wise similarities. Look for blocks: high similarity within I-texts, high within ¬I-texts, and (ideally) lower between I and ¬I blocks." | |
) | |
with gr.Tabs(): | |
with gr.TabItem("📜 Text Traces (I and ¬I)"): | |
with gr.Row(equal_height=False): # Allow different heights | |
with gr.Column(scale=1): | |
i_trace_output = gr.Markdown(label="I-Trace (Coherent Elaboration with Cluster)", elem_id="i-trace-box") | |
with gr.Column(scale=1): | |
not_i_trace_output = gr.Markdown(label="¬I-Trace (Alternative Perspectives with Cluster)", elem_id="not-i-trace-box") | |
with gr.TabItem("📊 ΔS Similarity & Heatmap"): | |
delta_s_output = gr.Markdown(label="ΔS Similarity Trace Summary (Table)", elem_id="delta-s-box") | |
heatmap_output = gr.HTML(label="Overall Semantic Similarity Heatmap") | |
gr.Markdown("*Heatmap values closer to 1.0 (brighter yellow in 'plasma' map) indicate higher similarity. The color scale is adjusted based on the min/max observed similarities to highlight variations.*") | |
with gr.TabItem("⚙️ Debug Log"): | |
debug_log_output_box = gr.Textbox(label="Detailed Debug Log (Prompts, Responses, Errors, Similarities)", lines=25, interactive=False, show_copy_button=True, max_lines=200) | |
run_button.click( | |
fn=run_eal_dual_unfolding, | |
inputs=iterations_slider, | |
outputs=[i_trace_output, not_i_trace_output, delta_s_output, debug_log_output_box, heatmap_output], | |
api_name="run_eal_analysis" | |
) | |
gr.Markdown("--- \n*EAL LLM Emergent Discourse Analyzer v0.4 - User & ℧ Collaboration*") | |
if __name__ == "__main__": | |
if not model_loaded_successfully: | |
print("CRITICAL ERROR: Model failed to load. Gradio app will likely not function correctly.") | |
# Fallback to a minimal Gradio app displaying an error | |
with gr.Blocks() as error_interface: | |
gr.Markdown("# Application Error") | |
gr.Markdown("## CRITICAL: Language Model Failed to Load!") | |
gr.Markdown("The application cannot start because the required language model (either EleutherAI/gpt-neo-1.3B or the fallback gpt2) could not be loaded. Please check the server console logs for specific error messages from the `transformers` library. This might be due to network issues, incorrect model name, or insufficient resources.") | |
error_interface.launch() | |
else: | |
print("Starting Gradio App...") | |
eal_interface.launch() | |