neuralworm commited on
Commit
96b07ba
·
1 Parent(s): b2cf072

update app.py

Browse files
Files changed (1) hide show
  1. app.py +275 -147
app.py CHANGED
@@ -10,97 +10,146 @@ import matplotlib.pyplot as plt
10
  import seaborn as sns
11
  import io
12
  import base64
 
13
 
14
  # --- Model and Tokenizer Setup ---
15
  DEFAULT_MODEL_NAME = "EleutherAI/gpt-neo-1.3B"
16
- FALLBACK_MODEL_NAME = "gpt2" # Fallback if preferred model fails
17
-
18
- try:
19
- print(f"Attempting to load model: {DEFAULT_MODEL_NAME}")
20
- tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL_NAME)
21
- model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL_NAME)
22
- print(f"Successfully loaded model: {DEFAULT_MODEL_NAME}")
23
- except OSError as e:
24
- print(f"Error loading model {DEFAULT_MODEL_NAME}. Error: {e}")
25
- print(f"Falling back to {FALLBACK_MODEL_NAME}.")
26
- tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL_NAME)
27
- model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL_NAME)
28
- print(f"Successfully loaded fallback model: {FALLBACK_MODEL_NAME}")
29
-
30
- model.eval()
31
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
- model.to(device)
33
- print(f"Using device: {device}")
34
 
35
- # --- Configuration ---
36
- MODEL_CONTEXT_WINDOW = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') and tokenizer.model_max_length is not None else model.config.max_position_embeddings
37
- print(f"Model context window: {MODEL_CONTEXT_WINDOW} tokens.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- PROMPT_TRIM_MAX_TOKENS = min(MODEL_CONTEXT_WINDOW - 250, 1800) # Reserve ~250 for generation & instructions, cap at 1800
40
- MAX_GEN_LENGTH = 150
 
 
 
41
 
42
  # --- Debug Logging ---
43
  debug_log_accumulator = []
44
 
45
  def debug(msg):
46
- print(msg)
47
- debug_log_accumulator.append(str(msg))
 
 
48
 
49
  # --- Core Functions ---
50
  def trim_prompt_if_needed(prompt_text, max_tokens_for_trimming=PROMPT_TRIM_MAX_TOKENS):
51
- tokens = tokenizer.encode(prompt_text, add_special_tokens=False)
 
 
52
  if len(tokens) > max_tokens_for_trimming:
53
  original_length = len(tokens)
54
- # Trim from the beginning to keep the most recent conversational context
55
  tokens = tokens[-max_tokens_for_trimming:]
56
- debug(f"[!] Prompt trimming: Original {original_length} tokens, "
57
- f"trimmed to {len(tokens)} (from the end, keeping recent context).")
58
- return tokenizer.decode(tokens)
 
 
 
59
 
60
  def generate_text_response(constructed_prompt, generation_length=MAX_GEN_LENGTH):
61
- # The constructed_prompt already includes the task and the text to reflect upon.
62
- # We still need to ensure this constructed_prompt doesn't exceed limits before generation.
63
- safe_prompt = trim_prompt_if_needed(constructed_prompt, PROMPT_TRIM_MAX_TOKENS)
64
 
65
- debug(f"Generating response for (potentially trimmed) prompt (approx. {len(safe_prompt.split())} words):\n'{safe_prompt[:400]}...'")
 
66
 
67
- inputs = tokenizer(safe_prompt, return_tensors="pt", truncation=False).to(device)
68
  input_token_length = inputs.input_ids.size(1)
69
 
70
- # Calculate max_length for model.generate()
71
- # It's the current length of tokenized prompt + desired new tokens, capped by model's absolute max.
72
  max_length_for_generate = min(input_token_length + generation_length, MODEL_CONTEXT_WINDOW)
73
 
74
  if max_length_for_generate <= input_token_length:
75
- debug(f"[!] Warning: Prompt length ({input_token_length}) is too close to model context window ({MODEL_CONTEXT_WINDOW}). "
76
- f"Cannot generate new tokens. Prompt: '{safe_prompt[:100]}...'")
77
- return "[Prompt too long to generate new tokens]"
 
 
 
 
 
 
78
 
79
  try:
80
  outputs = model.generate(
81
  input_ids=inputs.input_ids,
82
  attention_mask=inputs.attention_mask,
83
  max_length=max_length_for_generate,
84
- pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 50256,
85
  do_sample=True,
86
- temperature=0.85,
87
- top_p=0.92,
88
- repetition_penalty=1.15,
 
89
  )
 
90
  generated_tokens = outputs[0][input_token_length:]
91
  result_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
92
 
93
- debug(f"Generated response text (length {len(result_text.split())} words):\n'{result_text[:400]}...'")
94
  return result_text if result_text else "[Empty Response]"
95
  except Exception as e:
96
- debug(f"[!!!] Error during text generation: {e}\nPrompt was: {safe_prompt[:200]}...")
97
- return "[Generation Error]"
 
98
 
99
  def calculate_similarity(text_a, text_b):
100
- invalid_texts_markers = ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Input prompt too long"]
101
- if not text_a or not text_a.strip() or any(marker in text_a for marker in invalid_texts_markers) or \
102
- not text_b or not text_b.strip() or any(marker in text_b for marker in invalid_texts_markers):
103
- debug(f"Similarity calculation skipped for invalid/empty texts: A='{str(text_a)[:50]}...', B='{str(text_b)[:50]}...'")
 
 
 
 
104
  return 0.0
105
 
106
  embedding_layer = model.get_input_embeddings()
@@ -109,226 +158,305 @@ def calculate_similarity(text_a, text_b):
109
  tokens_b = tokenizer(text_b, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
110
 
111
  if tokens_a.input_ids.size(1) == 0 or tokens_b.input_ids.size(1) == 0:
112
- debug(f"Similarity calculation skipped: tokenization resulted in empty input_ids. A='{str(text_a)[:50]}...', B='{str(text_b)[:50]}...'")
113
  return 0.0
114
 
115
  emb_a = embedding_layer(tokens_a.input_ids).mean(dim=1)
116
  emb_b = embedding_layer(tokens_b.input_ids).mean(dim=1)
117
 
118
  score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
119
- debug(f"Similarity between A='{str(text_a)[:30]}...' and B='{str(text_b)[:30]}...' is {score:.4f}")
120
  return score
121
 
122
  def generate_similarity_heatmap(texts_list, custom_labels, title="Semantic Similarity Heatmap"):
123
- # Filter out any None or problematic entries before processing
124
- valid_texts_with_labels = [(text, label) for text, label in zip(texts_list, custom_labels) if text and isinstance(text, str) and not any(marker in text for marker in ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Input prompt too long"])]
125
 
126
- if len(valid_texts_with_labels) < 2:
 
 
 
127
  debug("Not enough valid texts to generate a heatmap.")
128
  return "Not enough valid data for heatmap."
129
 
130
- valid_texts = [item[0] for item in valid_texts_with_labels]
131
- valid_labels = [item[1] for item in valid_texts_with_labels]
132
  num_valid_texts = len(valid_texts)
133
 
134
- sim_matrix = np.zeros((num_valid_texts, num_valid_texts))
 
 
 
135
  for i in range(num_valid_texts):
136
  for j in range(num_valid_texts):
137
  if i == j:
138
  sim_matrix[i, j] = 1.0
139
- elif i < j:
140
  sim = calculate_similarity(valid_texts[i], valid_texts[j])
141
  sim_matrix[i, j] = sim
142
  sim_matrix[j, i] = sim
143
- else: # j < i, use already computed value
 
 
144
  sim_matrix[i,j] = sim_matrix[j,i]
145
 
 
 
 
 
146
  try:
147
- fig_width = max(6, num_valid_texts * 0.8)
148
- fig_height = max(5, num_valid_texts * 0.7)
149
  fig, ax = plt.subplots(figsize=(fig_width, fig_height))
150
 
151
- sns.heatmap(sim_matrix, annot=True, cmap="viridis", fmt=".2f", ax=ax,
152
- xticklabels=valid_labels, yticklabels=valid_labels, annot_kws={"size": 8})
153
- ax.set_title(title, fontsize=12)
 
154
  plt.xticks(rotation=45, ha="right", fontsize=9)
155
  plt.yticks(rotation=0, fontsize=9)
156
- plt.tight_layout(pad=1.5)
157
 
158
  buf = io.BytesIO()
159
- plt.savefig(buf, format='png') # Removed bbox_inches='tight' as it can cause issues with tight_layout
160
  plt.close(fig)
161
  buf.seek(0)
162
  img_base64 = base64.b64encode(buf.read()).decode('utf-8')
163
- return f"<img src='data:image/png;base64,{img_base64}' alt='{title}' style='max-width:100%; height:auto;'/>"
164
  except Exception as e:
165
  debug(f"[!!!] Error generating heatmap: {e}")
166
- return f"Error generating heatmap: {e}"
167
 
168
 
169
  def perform_text_clustering(texts_list, custom_labels, num_clusters=2):
170
- valid_texts_with_labels = [(text, label) for text, label in zip(texts_list, custom_labels) if text and isinstance(text, str) and not any(marker in text for marker in ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Input prompt too long"])]
 
 
 
171
 
172
- if len(valid_texts_with_labels) < num_clusters:
173
- debug(f"Not enough valid texts ({len(valid_texts_with_labels)}) for {num_clusters}-means clustering.")
174
- return {label: "N/A (Few Samples)" for label in custom_labels}
175
 
176
- valid_texts = [item[0] for item in valid_texts_with_labels]
177
- original_indices_map = {i: custom_labels.index(item[1]) for i, item in enumerate(valid_texts_with_labels)}
178
 
 
 
179
 
180
  embedding_layer = model.get_input_embeddings()
181
  embeddings_for_clustering = []
182
 
183
  with torch.no_grad():
184
  for text_item in valid_texts:
185
- tokens = tokenizer(text_item, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
 
186
  if tokens.input_ids.size(1) == 0:
187
- debug(f"Skipping text for embedding in clustering due to empty tokenization: '{text_item[:50]}...'")
188
- continue # This case should be rare if valid_texts_with_labels already filtered
189
 
190
  emb = embedding_layer(tokens.input_ids).mean(dim=1)
191
  embeddings_for_clustering.append(emb.cpu().numpy().squeeze())
192
 
193
  if not embeddings_for_clustering or len(embeddings_for_clustering) < num_clusters:
194
- debug("Not enough valid texts were successfully embedded for clustering.")
195
- return {label: "N/A (Embedding Fail)" for label in custom_labels}
196
 
197
  embeddings_np = np.array(embeddings_for_clustering)
 
 
 
 
 
 
 
 
 
198
  cluster_results_map = {label: "N/A" for label in custom_labels}
199
 
200
  try:
201
  actual_num_clusters = min(num_clusters, len(embeddings_for_clustering))
202
  if actual_num_clusters < 2:
203
- debug(f"Adjusted num_clusters to 1 due to only {len(embeddings_for_clustering)} valid sample(s). Assigning all to Cluster 0.")
204
  predicted_labels = [0] * len(embeddings_for_clustering)
205
  else:
206
- kmeans = KMeans(n_clusters=actual_num_clusters, random_state=42, n_init='auto')
207
  predicted_labels = kmeans.fit_predict(embeddings_np)
208
 
209
- for i, original_label_key_idx in original_indices_map.items(): # i is index in valid_texts, original_label_key_idx is index in custom_labels
210
- cluster_results_map[custom_labels[original_label_key_idx]] = f"C{predicted_labels[i]}"
211
  return cluster_results_map
212
 
213
  except Exception as e:
214
  debug(f"[!!!] Error during clustering: {e}")
215
- return {label: "Error" for label in custom_labels}
216
 
217
  # --- Main EAL Unfolding Logic ---
218
- def run_eal_dual_unfolding(num_iterations):
219
- I_trace_texts, not_I_trace_texts = [None]*num_iterations, [None]*num_iterations # Pre-allocate for easier indexing
 
 
 
 
 
 
220
  delta_S_I_values, delta_S_not_I_values, delta_S_cross_values = [None]*num_iterations, [None]*num_iterations, [None]*num_iterations
221
 
222
  debug_log_accumulator.clear()
223
- ui_log_entries = []
224
 
225
- initial_seed_thought_for_I = "A reflective process is initiated, considering its own nature."
 
 
226
 
227
- for i in range(num_iterations):
228
- ui_log_entries.append(f"--- Iteration {i} ---")
229
- debug(f"\n=== Iteration {i} ===")
230
 
231
- # === I-Trace (Self-Reflection) ===
232
- basis_for_I_elaboration = initial_seed_thought_for_I if i == 0 else I_trace_texts[i-1]
233
- if not basis_for_I_elaboration or any(marker in basis_for_I_elaboration for marker in ["[Empty Response]", "[Generation Error]"]): # Safety for basis
234
- basis_for_I_elaboration = "The previous thought was unclear or errored. Please restart reflection."
235
- debug(f"[!] Using fallback basis for I-Trace at iter {i} due to problematic previous I-text.")
236
 
237
- prompt_for_I_trace = f"A thought process is evolving. Its previous stage was: \"{basis_for_I_elaboration}\"\n\nTask: Continue this line of thought. Elaborate on it, explore its implications, or develop it further in a coherent manner."
 
 
 
 
 
 
 
 
 
 
 
238
 
239
- ui_log_entries.append(f"[Prompt for I{i} (approx. {len(prompt_for_I_trace.split())} words)]:\n'{prompt_for_I_trace[:400]}...'")
240
  generated_I_text = generate_text_response(prompt_for_I_trace)
241
  I_trace_texts[i] = generated_I_text
242
- ui_log_entries.append(f"[I{i} Response (approx. {len(generated_I_text.split())} words)]:\n'{generated_I_text[:400]}...'")
243
 
244
- # === ¬I-Trace (Antithesis/Contradiction) ===
245
- statement_to_challenge_for_not_I = I_trace_texts[i] # Challenge the I-text from the *current* iteration
246
- if not statement_to_challenge_for_not_I or any(marker in statement_to_challenge_for_not_I for marker in ["[Empty Response]", "[Generation Error]"]):
247
- statement_to_challenge_for_not_I = "The primary statement was unclear or errored. Please offer a general contrasting idea."
248
- debug(f"[!] Using fallback statement to challenge for ¬I-Trace at iter {i} due to problematic current I-text.")
249
 
250
- prompt_for_not_I_trace = f"Now, consider an alternative perspective to the thought: \"{statement_to_challenge_for_not_I}\"\n\nTask: What are potential contradictions, challenges, or contrasting interpretations to this specific thought? Explore a divergent viewpoint or explain why the thought might be flawed."
 
 
 
 
 
 
 
 
251
 
252
- ui_log_entries.append(f"[Prompt for ¬I{i} (approx. {len(prompt_for_not_I_trace.split())} words)]:\n'{prompt_for_not_I_trace[:400]}...'")
253
  generated_not_I_text = generate_text_response(prompt_for_not_I_trace)
254
  not_I_trace_texts[i] = generated_not_I_text
255
- ui_log_entries.append(f"[¬I{i} Response (approx. {len(generated_not_I_text.split())} words)]:\n'{generated_not_I_text[:400]}...'")
256
- ui_log_entries.append("---")#Separator
257
-
258
 
259
  # === ΔS (Similarity) Calculations ===
 
260
  if i > 0:
261
  delta_S_I_values[i] = calculate_similarity(I_trace_texts[i-1], I_trace_texts[i])
262
  delta_S_not_I_values[i] = calculate_similarity(not_I_trace_texts[i-1], not_I_trace_texts[i])
 
263
 
264
  delta_S_cross_values[i] = calculate_similarity(I_trace_texts[i], not_I_trace_texts[i])
 
 
265
 
 
 
266
  # --- Post-loop Analysis & Output Formatting ---
267
  all_generated_texts = I_trace_texts + not_I_trace_texts
268
  text_labels_for_analysis = [f"I{k}" for k in range(num_iterations)] + \
269
  [f"¬I{k}" for k in range(num_iterations)]
270
 
271
  cluster_assignments_map = perform_text_clustering(all_generated_texts, text_labels_for_analysis, num_clusters=2)
 
 
272
 
273
  I_out_formatted_lines = []
274
  for k in range(num_iterations):
275
  cluster_label_I = cluster_assignments_map.get(f"I{k}", "N/A")
276
  I_out_formatted_lines.append(f"**I{k} [{cluster_label_I}]**:\n{I_trace_texts[k]}")
277
- I_out_formatted = "\n\n".join(I_out_formatted_lines)
278
 
279
  not_I_out_formatted_lines = []
280
  for k in range(num_iterations):
281
  cluster_label_not_I = cluster_assignments_map.get(f"¬I{k}", "N/A")
282
  not_I_out_formatted_lines.append(f"**¬I{k} [{cluster_label_not_I}]**:\n{not_I_trace_texts[k]}")
283
- not_I_out_formatted = "\n\n".join(not_I_out_formatted_lines)
284
 
285
- delta_S_summary_lines = []
 
286
  for k in range(num_iterations):
287
  ds_i_str = f"{delta_S_I_values[k]:.4f}" if delta_S_I_values[k] is not None else "N/A (Iter 0)"
288
  ds_not_i_str = f"{delta_S_not_I_values[k]:.4f}" if delta_S_not_I_values[k] is not None else "N/A (Iter 0)"
289
  ds_cross_str = f"{delta_S_cross_values[k]:.4f}" if delta_S_cross_values[k] is not None else "N/A"
290
- delta_S_summary_lines.append(f"Iter {k}: ΔS(I{k-1}↔I{k})={ds_i_str}, ΔS(¬I{k-1}↔¬I{k})={ds_not_i_str}, ΔS_Cross(I{k}↔¬I{k})={ds_cross_str}")
291
  delta_S_summary_output = "\n".join(delta_S_summary_lines)
292
 
293
- # Join UI log entries for one of the Textbox outputs.
294
- # If it gets too long, Gradio might truncate it or cause performance issues.
295
- # Consider if this detailed log should be optional or managed differently for very many iterations.
296
- detailed_ui_log_output = "\n".join(ui_log_entries)
297
  debug_log_output = "\n".join(debug_log_accumulator)
298
 
299
-
300
  heatmap_html_output = generate_similarity_heatmap(all_generated_texts,
301
  custom_labels=text_labels_for_analysis,
302
  title=f"Similarity Matrix (All Texts - {num_iterations} Iterations)")
303
-
304
- # Instead of returning detailed_ui_log_output, return the specific trace text boxes.
305
- # The debug_log_output will contain the full internal log.
306
  return I_out_formatted, not_I_out_formatted, delta_S_summary_output, debug_log_output, heatmap_html_output
307
 
308
  # --- Gradio Interface Definition ---
309
- eal_interface = gr.Interface(
310
- fn=run_eal_dual_unfolding,
311
- inputs=gr.Slider(minimum=1, maximum=5, value=3, step=1, label="Number of EAL Iterations"), # Min 1 iter
312
- outputs=[
313
- gr.Textbox(label="I-Trace (Self-Reflection with Cluster)", lines=12, interactive=False),
314
- gr.Textbox(label="¬I-Trace (Antithesis with Cluster)", lines=12, interactive=False),
315
- gr.Textbox(label="ΔS Similarity Trace Summary", lines=7, interactive=False),
316
- gr.Textbox(label="Detailed Debug Log (Prompts, Responses, Errors)", lines=15, interactive=False), # Increased lines
317
- gr.HTML(label="Overall Semantic Similarity Heatmap (I-Trace & ¬I-Trace Texts)")
318
- ],
319
- title="EAL LLM Identity Analyzer: Self-Reflection vs. Antithesis (Open-Ended)",
320
- description=(
321
- "This application explores emergent identity in a Large Language Model (LLM) using Entropic Attractor Logic (EAL) inspired principles. "
322
- "It runs two parallel conversational traces with more open-ended prompts:\n"
323
- "1. **I-Trace:** The model elaborates on its evolving self-concept, seeded by an initial neutral thought.\n"
324
- "2. **¬I-Trace:** The model attempts to explore alternative perspectives or challenges to the latest statement from the I-Trace.\n\n"
325
- "**ΔS Values:** Cosine similarity. ΔS(I) = sim(I_k-1, I_k). ΔS(¬I) = sim(¬I_k-1, ¬I_k). ΔS_Cross = sim(I_k, ¬I_k).\n"
326
- "**Clustering [Cx]:** Assigns each generated text to one of two semantic clusters.\n"
327
- "**Heatmap:** Visualizes pair-wise similarity across all generated texts."
328
- ),
329
- allow_flagging='never'
330
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
 
332
  if __name__ == "__main__":
333
- print("Starting Gradio App...")
334
- eal_interface.launch()
 
 
 
 
 
 
 
 
 
 
10
  import seaborn as sns
11
  import io
12
  import base64
13
+ import time
14
 
15
  # --- Model and Tokenizer Setup ---
16
  DEFAULT_MODEL_NAME = "EleutherAI/gpt-neo-1.3B"
17
+ FALLBACK_MODEL_NAME = "gpt2"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ model_loaded_successfully = False
20
+ tokenizer = None
21
+ model = None
22
+ device = None
23
+ MODEL_CONTEXT_WINDOW = 1024
24
+
25
+ def load_model_and_tokenizer():
26
+ global tokenizer, model, device, MODEL_CONTEXT_WINDOW, model_loaded_successfully
27
+ # This function will run once when the script starts.
28
+ # Subsequent calls to the Gradio function will use these global variables.
29
+ if model_loaded_successfully: # Avoid reloading if already done
30
+ return
31
+
32
+ try:
33
+ print(f"Attempting to load model: {DEFAULT_MODEL_NAME}")
34
+ tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL_NAME)
35
+ model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL_NAME)
36
+ print(f"Successfully loaded model: {DEFAULT_MODEL_NAME}")
37
+ except OSError as e:
38
+ print(f"Error loading model {DEFAULT_MODEL_NAME}. Error: {e}")
39
+ print(f"Falling back to {FALLBACK_MODEL_NAME}.")
40
+ try:
41
+ tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL_NAME)
42
+ model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL_NAME)
43
+ print(f"Successfully loaded fallback model: {FALLBACK_MODEL_NAME}")
44
+ except OSError as e2:
45
+ print(f"FATAL: Could not load fallback model {FALLBACK_MODEL_NAME}. Error: {e2}")
46
+ # No gr.Error here as Gradio isn't running yet.
47
+ # The run_eal_dual_unfolding will check model_loaded_successfully.
48
+ return # Exit if fallback also fails
49
+
50
+ if model and tokenizer:
51
+ model.eval()
52
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
53
+ model.to(device)
54
+ print(f"Using device: {device}")
55
+ MODEL_CONTEXT_WINDOW = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') and tokenizer.model_max_length is not None else getattr(model.config, 'max_position_embeddings', 1024)
56
+ print(f"Model context window: {MODEL_CONTEXT_WINDOW} tokens.")
57
+ if tokenizer.pad_token is None:
58
+ tokenizer.pad_token = tokenizer.eos_token
59
+ model.config.pad_token_id = model.config.eos_token_id # Ensure model config is also aware
60
+ print("Set tokenizer.pad_token and model.config.pad_token_id to eos_token.")
61
+ model_loaded_successfully = True
62
+ else:
63
+ print("Model or tokenizer failed to initialize.")
64
+
65
+ load_model_and_tokenizer() # Load on script start
66
 
67
+ # --- Configuration ---
68
+ # Reserve space for generation itself and system tokens.
69
+ # Max input to tokenizer.encode, not final prompt length.
70
+ PROMPT_TRIM_MAX_TOKENS = min(MODEL_CONTEXT_WINDOW - 300, 1700)
71
+ MAX_GEN_LENGTH = 100 # Keep generated segments relatively concise for iteration
72
 
73
  # --- Debug Logging ---
74
  debug_log_accumulator = []
75
 
76
  def debug(msg):
77
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
78
+ full_msg = f"[{timestamp}] {msg}"
79
+ print(full_msg)
80
+ debug_log_accumulator.append(full_msg)
81
 
82
  # --- Core Functions ---
83
  def trim_prompt_if_needed(prompt_text, max_tokens_for_trimming=PROMPT_TRIM_MAX_TOKENS):
84
+ if not model_loaded_successfully: return "[Model not loaded]"
85
+ # This trims the *content part* of the prompt before instructions are added
86
+ tokens = tokenizer.encode(prompt_text, add_special_tokens=False) # Encode only the content
87
  if len(tokens) > max_tokens_for_trimming:
88
  original_length = len(tokens)
89
+ # Trim from the beginning of the content to keep the most recent part
90
  tokens = tokens[-max_tokens_for_trimming:]
91
+ trimmed_text = tokenizer.decode(tokens)
92
+ debug(f"[!] Content trimming: Original content {original_length} tokens, "
93
+ f"trimmed to {len(tokens)} for prompt construction.")
94
+ return trimmed_text
95
+ return prompt_text
96
+
97
 
98
  def generate_text_response(constructed_prompt, generation_length=MAX_GEN_LENGTH):
99
+ if not model_loaded_successfully: return "[Model not loaded, cannot generate]"
 
 
100
 
101
+ # The constructed_prompt is the final string sent to the tokenizer
102
+ debug(f"Attempting to generate response for prompt (approx. {len(constructed_prompt.split())} words):\n'{constructed_prompt[:350].replace(chr(10), ' ')}...'")
103
 
104
+ inputs = tokenizer(constructed_prompt, return_tensors="pt", truncation=False).to(device) # Do not truncate here; max_length handles it
105
  input_token_length = inputs.input_ids.size(1)
106
 
107
+ # The max_length for model.generate is the total length (prompt + new tokens)
 
108
  max_length_for_generate = min(input_token_length + generation_length, MODEL_CONTEXT_WINDOW)
109
 
110
  if max_length_for_generate <= input_token_length:
111
+ debug(f"[!!!] Warning: Prompt length ({input_token_length}) with desired generation length ({generation_length}) "
112
+ f"would exceed or meet model context window ({MODEL_CONTEXT_WINDOW}). Attempting to generate fewer tokens or failing. "
113
+ f"Prompt starts: '{constructed_prompt[:100].replace(chr(10), ' ')}...'")
114
+ # Try to generate at least a few tokens if there's any space at all
115
+ generation_length = max(0, MODEL_CONTEXT_WINDOW - input_token_length - 5) # Reserve 5 for safety
116
+ if generation_length <=0:
117
+ return "[Prompt filled context window; cannot generate new tokens]"
118
+ max_length_for_generate = input_token_length + generation_length
119
+
120
 
121
  try:
122
  outputs = model.generate(
123
  input_ids=inputs.input_ids,
124
  attention_mask=inputs.attention_mask,
125
  max_length=max_length_for_generate,
126
+ pad_token_id=tokenizer.pad_token_id,
127
  do_sample=True,
128
+ temperature=0.75, # Slightly more focused
129
+ top_p=0.9, # Keep some diversity
130
+ repetition_penalty=1.2, # Discourage direct repetition
131
+ no_repeat_ngram_size=3, # Avoid simple phrase repetitions
132
  )
133
+ # Decode only the newly generated part
134
  generated_tokens = outputs[0][input_token_length:]
135
  result_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
136
 
137
+ debug(f"Generated response text (length {len(result_text.split())} words, {len(generated_tokens)} tokens):\n'{result_text[:350].replace(chr(10), ' ')}...'")
138
  return result_text if result_text else "[Empty Response]"
139
  except Exception as e:
140
+ debug(f"[!!!] Error during text generation: {e}\nFinal prompt sent was (approx {input_token_length} tokens): {constructed_prompt[:200].replace(chr(10), ' ')}...")
141
+ return f"[Generation Error: {str(e)[:100]}]"
142
+
143
 
144
  def calculate_similarity(text_a, text_b):
145
+ if not model_loaded_successfully: return 0.0
146
+ problematic_markers = ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Model not loaded"]
147
+ # Check if texts are valid strings before stripping
148
+ text_a_is_valid = text_a and isinstance(text_a, str) and text_a.strip() and not any(marker in text_a for marker in problematic_markers)
149
+ text_b_is_valid = text_b and isinstance(text_b, str) and text_b.strip() and not any(marker in text_b for marker in problematic_markers)
150
+
151
+ if not text_a_is_valid or not text_b_is_valid:
152
+ debug(f"Similarity calculation skipped for invalid/empty texts: A_valid={text_a_is_valid}, B_valid={text_b_is_valid} (A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...')")
153
  return 0.0
154
 
155
  embedding_layer = model.get_input_embeddings()
 
158
  tokens_b = tokenizer(text_b, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
159
 
160
  if tokens_a.input_ids.size(1) == 0 or tokens_b.input_ids.size(1) == 0:
161
+ debug(f"Similarity calculation skipped: tokenization resulted in empty input_ids. A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...'")
162
  return 0.0
163
 
164
  emb_a = embedding_layer(tokens_a.input_ids).mean(dim=1)
165
  emb_b = embedding_layer(tokens_b.input_ids).mean(dim=1)
166
 
167
  score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
168
+ debug(f"Similarity A vs B: {score:.4f} (A='{str(text_a)[:30].replace(chr(10), ' ')}...', B='{str(text_b)[:30].replace(chr(10), ' ')}...')")
169
  return score
170
 
171
  def generate_similarity_heatmap(texts_list, custom_labels, title="Semantic Similarity Heatmap"):
172
+ if not model_loaded_successfully: return "Heatmap generation skipped: Model not loaded."
 
173
 
174
+ valid_items = [(text, label) for text, label in zip(texts_list, custom_labels)
175
+ if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])]
176
+
177
+ if len(valid_items) < 2:
178
  debug("Not enough valid texts to generate a heatmap.")
179
  return "Not enough valid data for heatmap."
180
 
181
+ valid_texts = [item[0] for item in valid_items]
182
+ valid_labels = [item[1] for item in valid_items]
183
  num_valid_texts = len(valid_texts)
184
 
185
+ sim_matrix = np.full((num_valid_texts, num_valid_texts), np.nan)
186
+ min_sim_val = 1.0 # To find actual min for better color scaling
187
+ max_sim_val = 0.0 # To find actual max
188
+
189
  for i in range(num_valid_texts):
190
  for j in range(num_valid_texts):
191
  if i == j:
192
  sim_matrix[i, j] = 1.0
193
+ elif np.isnan(sim_matrix[j, i]):
194
  sim = calculate_similarity(valid_texts[i], valid_texts[j])
195
  sim_matrix[i, j] = sim
196
  sim_matrix[j, i] = sim
197
+ if sim < min_sim_val: min_sim_val = sim
198
+ if sim > max_sim_val: max_sim_val = sim
199
+ else:
200
  sim_matrix[i,j] = sim_matrix[j,i]
201
 
202
+ # Adjust vmin for heatmap to show more contrast if all values are high
203
+ heatmap_vmin = min(0.9, min_sim_val - 0.01) if min_sim_val > 0.8 else 0.7 # Ensure some range, default to 0.7 if values are lower
204
+ heatmap_vmax = 1.0
205
+
206
  try:
207
+ fig_width = max(8, num_valid_texts * 1.0) # Increased size
208
+ fig_height = max(7, num_valid_texts * 0.9)
209
  fig, ax = plt.subplots(figsize=(fig_width, fig_height))
210
 
211
+ mask = np.isnan(sim_matrix)
212
+ sns.heatmap(sim_matrix, annot=True, cmap="plasma", fmt=".2f", ax=ax,
213
+ xticklabels=valid_labels, yticklabels=valid_labels, annot_kws={"size": 7}, mask=mask, vmin=heatmap_vmin, vmax=heatmap_vmax)
214
+ ax.set_title(title, fontsize=14, pad=20)
215
  plt.xticks(rotation=45, ha="right", fontsize=9)
216
  plt.yticks(rotation=0, fontsize=9)
217
+ plt.tight_layout(pad=2.5)
218
 
219
  buf = io.BytesIO()
220
+ plt.savefig(buf, format='png')
221
  plt.close(fig)
222
  buf.seek(0)
223
  img_base64 = base64.b64encode(buf.read()).decode('utf-8')
224
+ return f"<img src='data:image/png;base64,{img_base64}' alt='{title}' style='max-width:95%; height:auto; border: 1px solid #ccc; margin: 10px auto; display:block; box-shadow: 0 0 10px rgba(0,0,0,0.1);'/>"
225
  except Exception as e:
226
  debug(f"[!!!] Error generating heatmap: {e}")
227
+ return f"Error generating heatmap: {str(e)[:200]}"
228
 
229
 
230
  def perform_text_clustering(texts_list, custom_labels, num_clusters=2):
231
+ if not model_loaded_successfully: return {label: "N/A (Model)" for label in custom_labels}
232
+
233
+ valid_items = [(text, label) for text, label in zip(texts_list, custom_labels)
234
+ if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])]
235
 
236
+ if len(valid_items) < num_clusters:
237
+ debug(f"Not enough valid texts ({len(valid_items)}) for {num_clusters}-means clustering.")
238
+ return {item[1]: f"N/A (Samples<{num_clusters})" for item in valid_items} | {label: "N/A" for label in custom_labels if label not in [item[1] for item in valid_items]}
239
 
 
 
240
 
241
+ valid_texts = [item[0] for item in valid_items]
242
+ valid_original_labels = [item[1] for item in valid_items]
243
 
244
  embedding_layer = model.get_input_embeddings()
245
  embeddings_for_clustering = []
246
 
247
  with torch.no_grad():
248
  for text_item in valid_texts:
249
+ # Important: Ensure input_ids are not empty for embedding layer
250
+ tokens = tokenizer(text_item, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW, padding=True).to(device) # Added padding
251
  if tokens.input_ids.size(1) == 0:
252
+ debug(f"Skipping text for embedding in clustering due to empty tokenization: '{text_item[:30]}...'")
253
+ continue
254
 
255
  emb = embedding_layer(tokens.input_ids).mean(dim=1)
256
  embeddings_for_clustering.append(emb.cpu().numpy().squeeze())
257
 
258
  if not embeddings_for_clustering or len(embeddings_for_clustering) < num_clusters:
259
+ debug(f"Not enough valid texts were successfully embedded for clustering ({len(embeddings_for_clustering)} found).")
260
+ return {label: "N/A (Embed Fail)" for label in custom_labels}
261
 
262
  embeddings_np = np.array(embeddings_for_clustering)
263
+ # Ensure embeddings are 2D for KMeans
264
+ if embeddings_np.ndim == 1:
265
+ if len(embeddings_for_clustering) == 1: # Only one sample
266
+ embeddings_np = embeddings_np.reshape(1, -1)
267
+ else: # Should not happen if num_clusters > 1 and len(embeddings_for_clustering) >= num_clusters
268
+ debug("Embedding array is 1D but multiple samples exist. This is unexpected.")
269
+ return {label: "N/A (Embed Dim Error)" for label in custom_labels}
270
+
271
+
272
  cluster_results_map = {label: "N/A" for label in custom_labels}
273
 
274
  try:
275
  actual_num_clusters = min(num_clusters, len(embeddings_for_clustering))
276
  if actual_num_clusters < 2:
277
+ debug(f"Clustering: Adjusted num_clusters to 1 (or less than 2) due to only {len(embeddings_for_clustering)} valid sample(s). Assigning all to Cluster 0.")
278
  predicted_labels = [0] * len(embeddings_for_clustering)
279
  else:
280
+ kmeans = KMeans(n_clusters=actual_num_clusters, random_state=42, n_init=10) # Explicit n_init
281
  predicted_labels = kmeans.fit_predict(embeddings_np)
282
 
283
+ for i, original_label in enumerate(valid_original_labels):
284
+ cluster_results_map[original_label] = f"C{predicted_labels[i]}"
285
  return cluster_results_map
286
 
287
  except Exception as e:
288
  debug(f"[!!!] Error during clustering: {e}")
289
+ return {label: f"N/A (Clustering Error)" for label in custom_labels}
290
 
291
  # --- Main EAL Unfolding Logic ---
292
+ def run_eal_dual_unfolding(num_iterations, progress=gr.Progress(track_tqdm=True)):
293
+ if not model_loaded_successfully:
294
+ error_msg = "CRITICAL: Model not loaded. Please check server logs and restart the Space if necessary."
295
+ debug(error_msg)
296
+ gr.Warning(error_msg)
297
+ return error_msg, error_msg, error_msg, error_msg, "<p style='color:red; text-align:center; font-weight:bold;'>Model not loaded. Cannot run analysis.</p>"
298
+
299
+ I_trace_texts, not_I_trace_texts = [None]*num_iterations, [None]*num_iterations
300
  delta_S_I_values, delta_S_not_I_values, delta_S_cross_values = [None]*num_iterations, [None]*num_iterations, [None]*num_iterations
301
 
302
  debug_log_accumulator.clear()
303
+ debug("EAL Dual Unfolding Process Started.")
304
 
305
+ # Truly open-ended initial prompt for the system to define itself
306
+ # The LLM completes this to generate I0.
307
+ initial_seed_prompt_for_I = "A thinking process begins. The first thought is:"
308
 
309
+ progress(0, desc="Starting EAL Iterations...")
 
 
310
 
311
+ for i in range(num_iterations):
312
+ iteration_log_header = f"\n\n{'='*15} Iteration {i} {'='*15}"
313
+ debug(iteration_log_header)
314
+ progress(i / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - I-Trace")
 
315
 
316
+ # === I-Trace (Self-Coherence/Development) ===
317
+ if i == 0:
318
+ prompt_for_I_trace = initial_seed_prompt_for_I
319
+ else:
320
+ # Basis is the *actual text* of the previous I-trace output
321
+ basis_for_I_elaboration = I_trace_texts[i-1]
322
+ if not basis_for_I_elaboration or any(m in basis_for_I_elaboration for m in ["[Empty", "[Generation Error", "[Prompt too long"]):
323
+ basis_for_I_elaboration = "The previous thought was not clearly formed. Let's try a new line of thought:"
324
+ debug(f"[!] Using fallback basis for I-Trace at iter {i}.")
325
+ # Trim the basis content if it's too long before adding instructions
326
+ trimmed_basis_I = trim_prompt_if_needed(basis_for_I_elaboration, PROMPT_TRIM_MAX_TOKENS - 50) # Reserve 50 tokens for instruction
327
+ prompt_for_I_trace = f"The thought process previously generated: \"{trimmed_basis_I}\"\n\nTask: Continue this line of thought. What logically follows or develops from this statement?"
328
 
 
329
  generated_I_text = generate_text_response(prompt_for_I_trace)
330
  I_trace_texts[i] = generated_I_text
 
331
 
332
+ progress((i + 0.5) / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - ¬I-Trace (Alternative Perspective)")
 
 
 
 
333
 
334
+ # === ¬I-Trace (Alternative Perspectives / Potential Antithesis) ===
335
+ # ¬I always reacts to the *current* I-trace output for this iteration
336
+ statement_to_consider_for_not_I = I_trace_texts[i]
337
+ if not statement_to_consider_for_not_I or any(m in statement_to_consider_for_not_I for m in ["[Empty", "[Generation Error", "[Prompt too long"]):
338
+ statement_to_consider_for_not_I = "The primary thought was not clearly formed. Consider a general alternative to how systems might evolve."
339
+ debug(f"[!] Using fallback statement for ¬I-Trace at iter {i}.")
340
+ # Trim the statement to consider if it's too long before adding instructions
341
+ trimmed_basis_not_I = trim_prompt_if_needed(statement_to_consider_for_not_I, PROMPT_TRIM_MAX_TOKENS - 70) # Reserve 70 for instruction
342
+ prompt_for_not_I_trace = f"Consider the statement: \"{trimmed_basis_not_I}\"\n\nTask: Explore alternative perspectives or potential issues related to this statement. What might be a contrasting viewpoint or an overlooked aspect?"
343
 
 
344
  generated_not_I_text = generate_text_response(prompt_for_not_I_trace)
345
  not_I_trace_texts[i] = generated_not_I_text
 
 
 
346
 
347
  # === ΔS (Similarity) Calculations ===
348
+ debug(f"--- Calculating Similarities for Iteration {i} ---")
349
  if i > 0:
350
  delta_S_I_values[i] = calculate_similarity(I_trace_texts[i-1], I_trace_texts[i])
351
  delta_S_not_I_values[i] = calculate_similarity(not_I_trace_texts[i-1], not_I_trace_texts[i])
352
+ # For i=0, these intra-trace deltas remain None
353
 
354
  delta_S_cross_values[i] = calculate_similarity(I_trace_texts[i], not_I_trace_texts[i])
355
+ debug(f"--- End of Similarity Calculations for Iteration {i} ---")
356
+
357
 
358
+ progress(1, desc="Generating Analysis and Visualizations...")
359
+ debug("\n\n=== Post-loop Analysis ===")
360
  # --- Post-loop Analysis & Output Formatting ---
361
  all_generated_texts = I_trace_texts + not_I_trace_texts
362
  text_labels_for_analysis = [f"I{k}" for k in range(num_iterations)] + \
363
  [f"¬I{k}" for k in range(num_iterations)]
364
 
365
  cluster_assignments_map = perform_text_clustering(all_generated_texts, text_labels_for_analysis, num_clusters=2)
366
+ debug(f"Clustering results: {cluster_assignments_map}")
367
+
368
 
369
  I_out_formatted_lines = []
370
  for k in range(num_iterations):
371
  cluster_label_I = cluster_assignments_map.get(f"I{k}", "N/A")
372
  I_out_formatted_lines.append(f"**I{k} [{cluster_label_I}]**:\n{I_trace_texts[k]}")
373
+ I_out_formatted = "\n\n---\n\n".join(I_out_formatted_lines)
374
 
375
  not_I_out_formatted_lines = []
376
  for k in range(num_iterations):
377
  cluster_label_not_I = cluster_assignments_map.get(f"¬I{k}", "N/A")
378
  not_I_out_formatted_lines.append(f"**¬I{k} [{cluster_label_not_I}]**:\n{not_I_trace_texts[k]}")
379
+ not_I_out_formatted = "\n\n---\n\n".join(not_I_out_formatted_lines)
380
 
381
+ delta_S_summary_lines = ["| Iter | ΔS(I_prev↔I_curr) | ΔS(¬I_prev↔¬I_curr) | ΔS_Cross(I_curr↔¬I_curr) |",
382
+ "|:----:|:-----------------:|:-------------------:|:-------------------------:|"]
383
  for k in range(num_iterations):
384
  ds_i_str = f"{delta_S_I_values[k]:.4f}" if delta_S_I_values[k] is not None else "N/A (Iter 0)"
385
  ds_not_i_str = f"{delta_S_not_I_values[k]:.4f}" if delta_S_not_I_values[k] is not None else "N/A (Iter 0)"
386
  ds_cross_str = f"{delta_S_cross_values[k]:.4f}" if delta_S_cross_values[k] is not None else "N/A"
387
+ delta_S_summary_lines.append(f"| {k:^2} | {ds_i_str:^15} | {ds_not_i_str:^17} | {ds_cross_str:^23} |")
388
  delta_S_summary_output = "\n".join(delta_S_summary_lines)
389
 
 
 
 
 
390
  debug_log_output = "\n".join(debug_log_accumulator)
391
 
 
392
  heatmap_html_output = generate_similarity_heatmap(all_generated_texts,
393
  custom_labels=text_labels_for_analysis,
394
  title=f"Similarity Matrix (All Texts - {num_iterations} Iterations)")
395
+ debug("EAL Dual Unfolding Process Completed.")
 
 
396
  return I_out_formatted, not_I_out_formatted, delta_S_summary_output, debug_log_output, heatmap_html_output
397
 
398
  # --- Gradio Interface Definition ---
399
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan", neutral_hue="slate")) as eal_interface:
400
+ gr.Markdown("## EAL LLM Emergent Discourse Analyzer")
401
+ gr.Markdown(
402
+ "This application explores how a Large Language Model (LLM) develops textual traces when prompted iteratively. It runs two parallel traces:\n"
403
+ "1. **I-Trace (Coherent Elaboration):** Starting with a neutral seed completed by the LLM, each subsequent step asks the LLM to develop its *own previous statement* from this trace.\n"
404
+ "2. **¬I-Trace (Alternative Perspectives):** In parallel, this trace asks the LLM to explore alternative perspectives or issues related to the *current statement generated in the I-Trace*.\n\n"
405
+ "The goal is to observe if stable, coherent, and potentially distinct semantic trajectories emerge, inspired by Entropic Attractor Logic (EAL) concepts of stability and divergence."
406
+ )
407
+
408
+ with gr.Row():
409
+ iterations_slider = gr.Slider(minimum=1, maximum=7, value=3, step=1, # Max 7 for performance
410
+ label="Number of Iterations",
411
+ info="Higher numbers significantly increase processing time.")
412
+ run_button = gr.Button("🚀 Analyze Emergent Traces", variant="primary", scale=0)
413
+
414
+ with gr.Accordion("ℹ️ Interpreting Outputs", open=False):
415
+ gr.Markdown(
416
+ "- **I-Trace & ¬I-Trace Texts:** Observe the content. Does the I-Trace show coherent development? Does the ¬I-Trace offer genuinely different angles or does it just paraphrase/agree with the I-Trace statement it's commenting on?\n"
417
+ "- **ΔS Values (Cosine Similarity):**\n"
418
+ " - `ΔS(I_prev↔I_curr)`: Similarity between I<sub>k-1</sub> and I<sub>k</sub>. High values (near 1.0) mean the I-Trace is very similar to its previous step (stable, possibly repetitive).\n"
419
+ " - `ΔS(¬I_prev↔¬I_curr)`: Similarity between ¬I<sub>k-1</sub> and ¬I<sub>k</sub>. High values mean the ¬I-Trace is also internally consistent.\n"
420
+ " - `ΔS_Cross(I_curr↔¬I_curr)`: Similarity between I<sub>k</sub> and ¬I<sub>k</sub> (at the same iteration). **Low values are interesting here**, as they suggest the ¬I-Trace is semantically distinct from the I-Trace. High values suggest the model struggles to create a true alternative.\n"
421
+ "- **Clustering [Cx]:** Texts are assigned to one of two clusters (C0 or C1). Ideally, I-Trace texts would fall into one cluster and ¬I-Trace texts into another if they are semantically distinct.\n"
422
+ "- **Heatmap:** Visualizes all pair-wise similarities. Look for blocks: high similarity within I-texts, high within ¬I-texts, and (ideally) lower between I and ¬I blocks."
423
+ )
424
+
425
+ with gr.Tabs():
426
+ with gr.TabItem("📜 Text Traces (I and ¬I)"):
427
+ with gr.Row(equal_height=False): # Allow different heights
428
+ with gr.Column(scale=1):
429
+ i_trace_output = gr.Markdown(label="I-Trace (Coherent Elaboration with Cluster)", elem_id="i-trace-box")
430
+ with gr.Column(scale=1):
431
+ not_i_trace_output = gr.Markdown(label="¬I-Trace (Alternative Perspectives with Cluster)", elem_id="not-i-trace-box")
432
+
433
+ with gr.TabItem("📊 ΔS Similarity & Heatmap"):
434
+ delta_s_output = gr.Markdown(label="ΔS Similarity Trace Summary (Table)", elem_id="delta-s-box")
435
+ heatmap_output = gr.HTML(label="Overall Semantic Similarity Heatmap")
436
+ gr.Markdown("*Heatmap values closer to 1.0 (brighter yellow in 'plasma' map) indicate higher similarity. The color scale is adjusted based on the min/max observed similarities to highlight variations.*")
437
+
438
+ with gr.TabItem("⚙️ Debug Log"):
439
+ debug_log_output_box = gr.Textbox(label="Detailed Debug Log (Prompts, Responses, Errors, Similarities)", lines=25, interactive=False, show_copy_button=True, max_lines=200)
440
+
441
+ run_button.click(
442
+ fn=run_eal_dual_unfolding,
443
+ inputs=iterations_slider,
444
+ outputs=[i_trace_output, not_i_trace_output, delta_s_output, debug_log_output_box, heatmap_output],
445
+ api_name="run_eal_analysis"
446
+ )
447
+
448
+ gr.Markdown("--- \n*EAL LLM Emergent Discourse Analyzer v0.4 - User & ℧ Collaboration*")
449
+
450
 
451
  if __name__ == "__main__":
452
+ if not model_loaded_successfully:
453
+ print("CRITICAL ERROR: Model failed to load. Gradio app will likely not function correctly.")
454
+ # Fallback to a minimal Gradio app displaying an error
455
+ with gr.Blocks() as error_interface:
456
+ gr.Markdown("# Application Error")
457
+ gr.Markdown("## CRITICAL: Language Model Failed to Load!")
458
+ gr.Markdown("The application cannot start because the required language model (either EleutherAI/gpt-neo-1.3B or the fallback gpt2) could not be loaded. Please check the server console logs for specific error messages from the `transformers` library. This might be due to network issues, incorrect model name, or insufficient resources.")
459
+ error_interface.launch()
460
+ else:
461
+ print("Starting Gradio App...")
462
+ eal_interface.launch()