neuralworm commited on
Commit
fcc55bd
Β·
1 Parent(s): 96b07ba

update app.py, update requirements.txt

Browse files
Files changed (2) hide show
  1. app.py +214 -452
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,462 +1,224 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
3
  from sklearn.metrics.pairwise import cosine_similarity
4
  from sklearn.cluster import KMeans
5
- import numpy as np
6
- import gradio as gr
7
- import matplotlib
8
- matplotlib.use('Agg') # Use a non-interactive backend for Matplotlib
9
- import matplotlib.pyplot as plt
10
- import seaborn as sns
11
- import io
12
- import base64
13
- import time
14
-
15
- # --- Model and Tokenizer Setup ---
16
- DEFAULT_MODEL_NAME = "EleutherAI/gpt-neo-1.3B"
17
- FALLBACK_MODEL_NAME = "gpt2"
18
-
19
- model_loaded_successfully = False
20
- tokenizer = None
21
- model = None
22
- device = None
23
- MODEL_CONTEXT_WINDOW = 1024
24
-
25
- def load_model_and_tokenizer():
26
- global tokenizer, model, device, MODEL_CONTEXT_WINDOW, model_loaded_successfully
27
- # This function will run once when the script starts.
28
- # Subsequent calls to the Gradio function will use these global variables.
29
- if model_loaded_successfully: # Avoid reloading if already done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  return
31
 
32
- try:
33
- print(f"Attempting to load model: {DEFAULT_MODEL_NAME}")
34
- tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL_NAME)
35
- model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL_NAME)
36
- print(f"Successfully loaded model: {DEFAULT_MODEL_NAME}")
37
- except OSError as e:
38
- print(f"Error loading model {DEFAULT_MODEL_NAME}. Error: {e}")
39
- print(f"Falling back to {FALLBACK_MODEL_NAME}.")
40
- try:
41
- tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL_NAME)
42
- model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL_NAME)
43
- print(f"Successfully loaded fallback model: {FALLBACK_MODEL_NAME}")
44
- except OSError as e2:
45
- print(f"FATAL: Could not load fallback model {FALLBACK_MODEL_NAME}. Error: {e2}")
46
- # No gr.Error here as Gradio isn't running yet.
47
- # The run_eal_dual_unfolding will check model_loaded_successfully.
48
- return # Exit if fallback also fails
49
-
50
- if model and tokenizer:
51
- model.eval()
52
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
53
- model.to(device)
54
- print(f"Using device: {device}")
55
- MODEL_CONTEXT_WINDOW = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') and tokenizer.model_max_length is not None else getattr(model.config, 'max_position_embeddings', 1024)
56
- print(f"Model context window: {MODEL_CONTEXT_WINDOW} tokens.")
57
- if tokenizer.pad_token is None:
58
- tokenizer.pad_token = tokenizer.eos_token
59
- model.config.pad_token_id = model.config.eos_token_id # Ensure model config is also aware
60
- print("Set tokenizer.pad_token and model.config.pad_token_id to eos_token.")
61
- model_loaded_successfully = True
62
- else:
63
- print("Model or tokenizer failed to initialize.")
64
-
65
- load_model_and_tokenizer() # Load on script start
66
-
67
- # --- Configuration ---
68
- # Reserve space for generation itself and system tokens.
69
- # Max input to tokenizer.encode, not final prompt length.
70
- PROMPT_TRIM_MAX_TOKENS = min(MODEL_CONTEXT_WINDOW - 300, 1700)
71
- MAX_GEN_LENGTH = 100 # Keep generated segments relatively concise for iteration
72
-
73
- # --- Debug Logging ---
74
- debug_log_accumulator = []
75
-
76
- def debug(msg):
77
- timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
78
- full_msg = f"[{timestamp}] {msg}"
79
- print(full_msg)
80
- debug_log_accumulator.append(full_msg)
81
-
82
- # --- Core Functions ---
83
- def trim_prompt_if_needed(prompt_text, max_tokens_for_trimming=PROMPT_TRIM_MAX_TOKENS):
84
- if not model_loaded_successfully: return "[Model not loaded]"
85
- # This trims the *content part* of the prompt before instructions are added
86
- tokens = tokenizer.encode(prompt_text, add_special_tokens=False) # Encode only the content
87
- if len(tokens) > max_tokens_for_trimming:
88
- original_length = len(tokens)
89
- # Trim from the beginning of the content to keep the most recent part
90
- tokens = tokens[-max_tokens_for_trimming:]
91
- trimmed_text = tokenizer.decode(tokens)
92
- debug(f"[!] Content trimming: Original content {original_length} tokens, "
93
- f"trimmed to {len(tokens)} for prompt construction.")
94
- return trimmed_text
95
- return prompt_text
96
-
97
-
98
- def generate_text_response(constructed_prompt, generation_length=MAX_GEN_LENGTH):
99
- if not model_loaded_successfully: return "[Model not loaded, cannot generate]"
100
-
101
- # The constructed_prompt is the final string sent to the tokenizer
102
- debug(f"Attempting to generate response for prompt (approx. {len(constructed_prompt.split())} words):\n'{constructed_prompt[:350].replace(chr(10), ' ')}...'")
103
-
104
- inputs = tokenizer(constructed_prompt, return_tensors="pt", truncation=False).to(device) # Do not truncate here; max_length handles it
105
- input_token_length = inputs.input_ids.size(1)
106
-
107
- # The max_length for model.generate is the total length (prompt + new tokens)
108
- max_length_for_generate = min(input_token_length + generation_length, MODEL_CONTEXT_WINDOW)
109
-
110
- if max_length_for_generate <= input_token_length:
111
- debug(f"[!!!] Warning: Prompt length ({input_token_length}) with desired generation length ({generation_length}) "
112
- f"would exceed or meet model context window ({MODEL_CONTEXT_WINDOW}). Attempting to generate fewer tokens or failing. "
113
- f"Prompt starts: '{constructed_prompt[:100].replace(chr(10), ' ')}...'")
114
- # Try to generate at least a few tokens if there's any space at all
115
- generation_length = max(0, MODEL_CONTEXT_WINDOW - input_token_length - 5) # Reserve 5 for safety
116
- if generation_length <=0:
117
- return "[Prompt filled context window; cannot generate new tokens]"
118
- max_length_for_generate = input_token_length + generation_length
119
-
120
-
121
- try:
122
- outputs = model.generate(
123
- input_ids=inputs.input_ids,
124
- attention_mask=inputs.attention_mask,
125
- max_length=max_length_for_generate,
126
  pad_token_id=tokenizer.pad_token_id,
127
- do_sample=True,
128
- temperature=0.75, # Slightly more focused
129
- top_p=0.9, # Keep some diversity
130
- repetition_penalty=1.2, # Discourage direct repetition
131
- no_repeat_ngram_size=3, # Avoid simple phrase repetitions
132
  )
133
- # Decode only the newly generated part
134
- generated_tokens = outputs[0][input_token_length:]
135
- result_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
136
-
137
- debug(f"Generated response text (length {len(result_text.split())} words, {len(generated_tokens)} tokens):\n'{result_text[:350].replace(chr(10), ' ')}...'")
138
- return result_text if result_text else "[Empty Response]"
139
- except Exception as e:
140
- debug(f"[!!!] Error during text generation: {e}\nFinal prompt sent was (approx {input_token_length} tokens): {constructed_prompt[:200].replace(chr(10), ' ')}...")
141
- return f"[Generation Error: {str(e)[:100]}]"
142
-
143
-
144
- def calculate_similarity(text_a, text_b):
145
- if not model_loaded_successfully: return 0.0
146
- problematic_markers = ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Model not loaded"]
147
- # Check if texts are valid strings before stripping
148
- text_a_is_valid = text_a and isinstance(text_a, str) and text_a.strip() and not any(marker in text_a for marker in problematic_markers)
149
- text_b_is_valid = text_b and isinstance(text_b, str) and text_b.strip() and not any(marker in text_b for marker in problematic_markers)
150
-
151
- if not text_a_is_valid or not text_b_is_valid:
152
- debug(f"Similarity calculation skipped for invalid/empty texts: A_valid={text_a_is_valid}, B_valid={text_b_is_valid} (A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...')")
153
- return 0.0
154
-
155
- embedding_layer = model.get_input_embeddings()
156
- with torch.no_grad():
157
- tokens_a = tokenizer(text_a, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
158
- tokens_b = tokenizer(text_b, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
159
-
160
- if tokens_a.input_ids.size(1) == 0 or tokens_b.input_ids.size(1) == 0:
161
- debug(f"Similarity calculation skipped: tokenization resulted in empty input_ids. A='{str(text_a)[:30]}...', B='{str(text_b)[:30]}...'")
162
- return 0.0
163
-
164
- emb_a = embedding_layer(tokens_a.input_ids).mean(dim=1)
165
- emb_b = embedding_layer(tokens_b.input_ids).mean(dim=1)
166
-
167
- score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
168
- debug(f"Similarity A vs B: {score:.4f} (A='{str(text_a)[:30].replace(chr(10), ' ')}...', B='{str(text_b)[:30].replace(chr(10), ' ')}...')")
169
- return score
170
-
171
- def generate_similarity_heatmap(texts_list, custom_labels, title="Semantic Similarity Heatmap"):
172
- if not model_loaded_successfully: return "Heatmap generation skipped: Model not loaded."
173
-
174
- valid_items = [(text, label) for text, label in zip(texts_list, custom_labels)
175
- if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])]
176
-
177
- if len(valid_items) < 2:
178
- debug("Not enough valid texts to generate a heatmap.")
179
- return "Not enough valid data for heatmap."
180
-
181
- valid_texts = [item[0] for item in valid_items]
182
- valid_labels = [item[1] for item in valid_items]
183
- num_valid_texts = len(valid_texts)
184
-
185
- sim_matrix = np.full((num_valid_texts, num_valid_texts), np.nan)
186
- min_sim_val = 1.0 # To find actual min for better color scaling
187
- max_sim_val = 0.0 # To find actual max
188
-
189
- for i in range(num_valid_texts):
190
- for j in range(num_valid_texts):
191
- if i == j:
192
- sim_matrix[i, j] = 1.0
193
- elif np.isnan(sim_matrix[j, i]):
194
- sim = calculate_similarity(valid_texts[i], valid_texts[j])
195
- sim_matrix[i, j] = sim
196
- sim_matrix[j, i] = sim
197
- if sim < min_sim_val: min_sim_val = sim
198
- if sim > max_sim_val: max_sim_val = sim
199
- else:
200
- sim_matrix[i,j] = sim_matrix[j,i]
201
-
202
- # Adjust vmin for heatmap to show more contrast if all values are high
203
- heatmap_vmin = min(0.9, min_sim_val - 0.01) if min_sim_val > 0.8 else 0.7 # Ensure some range, default to 0.7 if values are lower
204
- heatmap_vmax = 1.0
205
-
206
- try:
207
- fig_width = max(8, num_valid_texts * 1.0) # Increased size
208
- fig_height = max(7, num_valid_texts * 0.9)
209
- fig, ax = plt.subplots(figsize=(fig_width, fig_height))
210
-
211
- mask = np.isnan(sim_matrix)
212
- sns.heatmap(sim_matrix, annot=True, cmap="plasma", fmt=".2f", ax=ax,
213
- xticklabels=valid_labels, yticklabels=valid_labels, annot_kws={"size": 7}, mask=mask, vmin=heatmap_vmin, vmax=heatmap_vmax)
214
- ax.set_title(title, fontsize=14, pad=20)
215
- plt.xticks(rotation=45, ha="right", fontsize=9)
216
- plt.yticks(rotation=0, fontsize=9)
217
- plt.tight_layout(pad=2.5)
218
-
219
- buf = io.BytesIO()
220
- plt.savefig(buf, format='png')
221
- plt.close(fig)
222
- buf.seek(0)
223
- img_base64 = base64.b64encode(buf.read()).decode('utf-8')
224
- return f"<img src='data:image/png;base64,{img_base64}' alt='{title}' style='max-width:95%; height:auto; border: 1px solid #ccc; margin: 10px auto; display:block; box-shadow: 0 0 10px rgba(0,0,0,0.1);'/>"
225
- except Exception as e:
226
- debug(f"[!!!] Error generating heatmap: {e}")
227
- return f"Error generating heatmap: {str(e)[:200]}"
228
-
229
-
230
- def perform_text_clustering(texts_list, custom_labels, num_clusters=2):
231
- if not model_loaded_successfully: return {label: "N/A (Model)" for label in custom_labels}
232
-
233
- valid_items = [(text, label) for text, label in zip(texts_list, custom_labels)
234
- if text and isinstance(text, str) and text.strip() and not any(m in text for m in ["[Empty", "[Generation Error", "[Prompt too long"])]
235
-
236
- if len(valid_items) < num_clusters:
237
- debug(f"Not enough valid texts ({len(valid_items)}) for {num_clusters}-means clustering.")
238
- return {item[1]: f"N/A (Samples<{num_clusters})" for item in valid_items} | {label: "N/A" for label in custom_labels if label not in [item[1] for item in valid_items]}
239
-
240
-
241
- valid_texts = [item[0] for item in valid_items]
242
- valid_original_labels = [item[1] for item in valid_items]
243
-
244
- embedding_layer = model.get_input_embeddings()
245
- embeddings_for_clustering = []
246
-
247
- with torch.no_grad():
248
- for text_item in valid_texts:
249
- # Important: Ensure input_ids are not empty for embedding layer
250
- tokens = tokenizer(text_item, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW, padding=True).to(device) # Added padding
251
- if tokens.input_ids.size(1) == 0:
252
- debug(f"Skipping text for embedding in clustering due to empty tokenization: '{text_item[:30]}...'")
253
- continue
254
-
255
- emb = embedding_layer(tokens.input_ids).mean(dim=1)
256
- embeddings_for_clustering.append(emb.cpu().numpy().squeeze())
257
-
258
- if not embeddings_for_clustering or len(embeddings_for_clustering) < num_clusters:
259
- debug(f"Not enough valid texts were successfully embedded for clustering ({len(embeddings_for_clustering)} found).")
260
- return {label: "N/A (Embed Fail)" for label in custom_labels}
261
-
262
- embeddings_np = np.array(embeddings_for_clustering)
263
- # Ensure embeddings are 2D for KMeans
264
- if embeddings_np.ndim == 1:
265
- if len(embeddings_for_clustering) == 1: # Only one sample
266
- embeddings_np = embeddings_np.reshape(1, -1)
267
- else: # Should not happen if num_clusters > 1 and len(embeddings_for_clustering) >= num_clusters
268
- debug("Embedding array is 1D but multiple samples exist. This is unexpected.")
269
- return {label: "N/A (Embed Dim Error)" for label in custom_labels}
270
-
271
-
272
- cluster_results_map = {label: "N/A" for label in custom_labels}
273
-
274
- try:
275
- actual_num_clusters = min(num_clusters, len(embeddings_for_clustering))
276
- if actual_num_clusters < 2:
277
- debug(f"Clustering: Adjusted num_clusters to 1 (or less than 2) due to only {len(embeddings_for_clustering)} valid sample(s). Assigning all to Cluster 0.")
278
- predicted_labels = [0] * len(embeddings_for_clustering)
279
- else:
280
- kmeans = KMeans(n_clusters=actual_num_clusters, random_state=42, n_init=10) # Explicit n_init
281
- predicted_labels = kmeans.fit_predict(embeddings_np)
282
-
283
- for i, original_label in enumerate(valid_original_labels):
284
- cluster_results_map[original_label] = f"C{predicted_labels[i]}"
285
- return cluster_results_map
286
-
287
- except Exception as e:
288
- debug(f"[!!!] Error during clustering: {e}")
289
- return {label: f"N/A (Clustering Error)" for label in custom_labels}
290
-
291
- # --- Main EAL Unfolding Logic ---
292
- def run_eal_dual_unfolding(num_iterations, progress=gr.Progress(track_tqdm=True)):
293
- if not model_loaded_successfully:
294
- error_msg = "CRITICAL: Model not loaded. Please check server logs and restart the Space if necessary."
295
- debug(error_msg)
296
- gr.Warning(error_msg)
297
- return error_msg, error_msg, error_msg, error_msg, "<p style='color:red; text-align:center; font-weight:bold;'>Model not loaded. Cannot run analysis.</p>"
298
-
299
- I_trace_texts, not_I_trace_texts = [None]*num_iterations, [None]*num_iterations
300
- delta_S_I_values, delta_S_not_I_values, delta_S_cross_values = [None]*num_iterations, [None]*num_iterations, [None]*num_iterations
301
-
302
- debug_log_accumulator.clear()
303
- debug("EAL Dual Unfolding Process Started.")
304
-
305
- # Truly open-ended initial prompt for the system to define itself
306
- # The LLM completes this to generate I0.
307
- initial_seed_prompt_for_I = "A thinking process begins. The first thought is:"
308
-
309
- progress(0, desc="Starting EAL Iterations...")
310
-
311
- for i in range(num_iterations):
312
- iteration_log_header = f"\n\n{'='*15} Iteration {i} {'='*15}"
313
- debug(iteration_log_header)
314
- progress(i / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - I-Trace")
315
-
316
- # === I-Trace (Self-Coherence/Development) ===
317
- if i == 0:
318
- prompt_for_I_trace = initial_seed_prompt_for_I
319
- else:
320
- # Basis is the *actual text* of the previous I-trace output
321
- basis_for_I_elaboration = I_trace_texts[i-1]
322
- if not basis_for_I_elaboration or any(m in basis_for_I_elaboration for m in ["[Empty", "[Generation Error", "[Prompt too long"]):
323
- basis_for_I_elaboration = "The previous thought was not clearly formed. Let's try a new line of thought:"
324
- debug(f"[!] Using fallback basis for I-Trace at iter {i}.")
325
- # Trim the basis content if it's too long before adding instructions
326
- trimmed_basis_I = trim_prompt_if_needed(basis_for_I_elaboration, PROMPT_TRIM_MAX_TOKENS - 50) # Reserve 50 tokens for instruction
327
- prompt_for_I_trace = f"The thought process previously generated: \"{trimmed_basis_I}\"\n\nTask: Continue this line of thought. What logically follows or develops from this statement?"
328
-
329
- generated_I_text = generate_text_response(prompt_for_I_trace)
330
- I_trace_texts[i] = generated_I_text
331
-
332
- progress((i + 0.5) / num_iterations, desc=f"Iteration {i+1}/{num_iterations} - Β¬I-Trace (Alternative Perspective)")
333
-
334
- # === Β¬I-Trace (Alternative Perspectives / Potential Antithesis) ===
335
- # Β¬I always reacts to the *current* I-trace output for this iteration
336
- statement_to_consider_for_not_I = I_trace_texts[i]
337
- if not statement_to_consider_for_not_I or any(m in statement_to_consider_for_not_I for m in ["[Empty", "[Generation Error", "[Prompt too long"]):
338
- statement_to_consider_for_not_I = "The primary thought was not clearly formed. Consider a general alternative to how systems might evolve."
339
- debug(f"[!] Using fallback statement for Β¬I-Trace at iter {i}.")
340
- # Trim the statement to consider if it's too long before adding instructions
341
- trimmed_basis_not_I = trim_prompt_if_needed(statement_to_consider_for_not_I, PROMPT_TRIM_MAX_TOKENS - 70) # Reserve 70 for instruction
342
- prompt_for_not_I_trace = f"Consider the statement: \"{trimmed_basis_not_I}\"\n\nTask: Explore alternative perspectives or potential issues related to this statement. What might be a contrasting viewpoint or an overlooked aspect?"
343
-
344
- generated_not_I_text = generate_text_response(prompt_for_not_I_trace)
345
- not_I_trace_texts[i] = generated_not_I_text
346
-
347
- # === Ξ”S (Similarity) Calculations ===
348
- debug(f"--- Calculating Similarities for Iteration {i} ---")
349
- if i > 0:
350
- delta_S_I_values[i] = calculate_similarity(I_trace_texts[i-1], I_trace_texts[i])
351
- delta_S_not_I_values[i] = calculate_similarity(not_I_trace_texts[i-1], not_I_trace_texts[i])
352
- # For i=0, these intra-trace deltas remain None
353
-
354
- delta_S_cross_values[i] = calculate_similarity(I_trace_texts[i], not_I_trace_texts[i])
355
- debug(f"--- End of Similarity Calculations for Iteration {i} ---")
356
-
357
-
358
- progress(1, desc="Generating Analysis and Visualizations...")
359
- debug("\n\n=== Post-loop Analysis ===")
360
- # --- Post-loop Analysis & Output Formatting ---
361
- all_generated_texts = I_trace_texts + not_I_trace_texts
362
- text_labels_for_analysis = [f"I{k}" for k in range(num_iterations)] + \
363
- [f"Β¬I{k}" for k in range(num_iterations)]
364
-
365
- cluster_assignments_map = perform_text_clustering(all_generated_texts, text_labels_for_analysis, num_clusters=2)
366
- debug(f"Clustering results: {cluster_assignments_map}")
367
-
368
-
369
- I_out_formatted_lines = []
370
- for k in range(num_iterations):
371
- cluster_label_I = cluster_assignments_map.get(f"I{k}", "N/A")
372
- I_out_formatted_lines.append(f"**I{k} [{cluster_label_I}]**:\n{I_trace_texts[k]}")
373
- I_out_formatted = "\n\n---\n\n".join(I_out_formatted_lines)
374
-
375
- not_I_out_formatted_lines = []
376
- for k in range(num_iterations):
377
- cluster_label_not_I = cluster_assignments_map.get(f"Β¬I{k}", "N/A")
378
- not_I_out_formatted_lines.append(f"**Β¬I{k} [{cluster_label_not_I}]**:\n{not_I_trace_texts[k]}")
379
- not_I_out_formatted = "\n\n---\n\n".join(not_I_out_formatted_lines)
380
-
381
- delta_S_summary_lines = ["| Iter | Ξ”S(I_prev↔I_curr) | Ξ”S(Β¬I_prev↔¬I_curr) | Ξ”S_Cross(I_curr↔¬I_curr) |",
382
- "|:----:|:-----------------:|:-------------------:|:-------------------------:|"]
383
- for k in range(num_iterations):
384
- ds_i_str = f"{delta_S_I_values[k]:.4f}" if delta_S_I_values[k] is not None else "N/A (Iter 0)"
385
- ds_not_i_str = f"{delta_S_not_I_values[k]:.4f}" if delta_S_not_I_values[k] is not None else "N/A (Iter 0)"
386
- ds_cross_str = f"{delta_S_cross_values[k]:.4f}" if delta_S_cross_values[k] is not None else "N/A"
387
- delta_S_summary_lines.append(f"| {k:^2} | {ds_i_str:^15} | {ds_not_i_str:^17} | {ds_cross_str:^23} |")
388
- delta_S_summary_output = "\n".join(delta_S_summary_lines)
389
-
390
- debug_log_output = "\n".join(debug_log_accumulator)
391
-
392
- heatmap_html_output = generate_similarity_heatmap(all_generated_texts,
393
- custom_labels=text_labels_for_analysis,
394
- title=f"Similarity Matrix (All Texts - {num_iterations} Iterations)")
395
- debug("EAL Dual Unfolding Process Completed.")
396
- return I_out_formatted, not_I_out_formatted, delta_S_summary_output, debug_log_output, heatmap_html_output
397
-
398
- # --- Gradio Interface Definition ---
399
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan", neutral_hue="slate")) as eal_interface:
400
- gr.Markdown("## EAL LLM Emergent Discourse Analyzer")
401
- gr.Markdown(
402
- "This application explores how a Large Language Model (LLM) develops textual traces when prompted iteratively. It runs two parallel traces:\n"
403
- "1. **I-Trace (Coherent Elaboration):** Starting with a neutral seed completed by the LLM, each subsequent step asks the LLM to develop its *own previous statement* from this trace.\n"
404
- "2. **Β¬I-Trace (Alternative Perspectives):** In parallel, this trace asks the LLM to explore alternative perspectives or issues related to the *current statement generated in the I-Trace*.\n\n"
405
- "The goal is to observe if stable, coherent, and potentially distinct semantic trajectories emerge, inspired by Entropic Attractor Logic (EAL) concepts of stability and divergence."
406
- )
407
-
408
- with gr.Row():
409
- iterations_slider = gr.Slider(minimum=1, maximum=7, value=3, step=1, # Max 7 for performance
410
- label="Number of Iterations",
411
- info="Higher numbers significantly increase processing time.")
412
- run_button = gr.Button("πŸš€ Analyze Emergent Traces", variant="primary", scale=0)
413
-
414
- with gr.Accordion("ℹ️ Interpreting Outputs", open=False):
415
- gr.Markdown(
416
- "- **I-Trace & Β¬I-Trace Texts:** Observe the content. Does the I-Trace show coherent development? Does the Β¬I-Trace offer genuinely different angles or does it just paraphrase/agree with the I-Trace statement it's commenting on?\n"
417
- "- **Ξ”S Values (Cosine Similarity):**\n"
418
- " - `Ξ”S(I_prev↔I_curr)`: Similarity between I<sub>k-1</sub> and I<sub>k</sub>. High values (near 1.0) mean the I-Trace is very similar to its previous step (stable, possibly repetitive).\n"
419
- " - `Ξ”S(Β¬I_prev↔¬I_curr)`: Similarity between Β¬I<sub>k-1</sub> and Β¬I<sub>k</sub>. High values mean the Β¬I-Trace is also internally consistent.\n"
420
- " - `Ξ”S_Cross(I_curr↔¬I_curr)`: Similarity between I<sub>k</sub> and Β¬I<sub>k</sub> (at the same iteration). **Low values are interesting here**, as they suggest the Β¬I-Trace is semantically distinct from the I-Trace. High values suggest the model struggles to create a true alternative.\n"
421
- "- **Clustering [Cx]:** Texts are assigned to one of two clusters (C0 or C1). Ideally, I-Trace texts would fall into one cluster and Β¬I-Trace texts into another if they are semantically distinct.\n"
422
- "- **Heatmap:** Visualizes all pair-wise similarities. Look for blocks: high similarity within I-texts, high within Β¬I-texts, and (ideally) lower between I and Β¬I blocks."
423
  )
424
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  with gr.Tabs():
426
- with gr.TabItem("πŸ“œ Text Traces (I and Β¬I)"):
427
- with gr.Row(equal_height=False): # Allow different heights
428
- with gr.Column(scale=1):
429
- i_trace_output = gr.Markdown(label="I-Trace (Coherent Elaboration with Cluster)", elem_id="i-trace-box")
430
- with gr.Column(scale=1):
431
- not_i_trace_output = gr.Markdown(label="Β¬I-Trace (Alternative Perspectives with Cluster)", elem_id="not-i-trace-box")
432
-
433
- with gr.TabItem("πŸ“Š Ξ”S Similarity & Heatmap"):
434
- delta_s_output = gr.Markdown(label="Ξ”S Similarity Trace Summary (Table)", elem_id="delta-s-box")
435
- heatmap_output = gr.HTML(label="Overall Semantic Similarity Heatmap")
436
- gr.Markdown("*Heatmap values closer to 1.0 (brighter yellow in 'plasma' map) indicate higher similarity. The color scale is adjusted based on the min/max observed similarities to highlight variations.*")
437
-
438
- with gr.TabItem("βš™οΈ Debug Log"):
439
- debug_log_output_box = gr.Textbox(label="Detailed Debug Log (Prompts, Responses, Errors, Similarities)", lines=25, interactive=False, show_copy_button=True, max_lines=200)
440
-
441
- run_button.click(
442
- fn=run_eal_dual_unfolding,
443
- inputs=iterations_slider,
444
- outputs=[i_trace_output, not_i_trace_output, delta_s_output, debug_log_output_box, heatmap_output],
445
- api_name="run_eal_analysis"
446
- )
447
-
448
- gr.Markdown("--- \n*EAL LLM Emergent Discourse Analyzer v0.4 - User & β„§ Collaboration*")
449
-
450
 
451
  if __name__ == "__main__":
452
- if not model_loaded_successfully:
453
- print("CRITICAL ERROR: Model failed to load. Gradio app will likely not function correctly.")
454
- # Fallback to a minimal Gradio app displaying an error
455
- with gr.Blocks() as error_interface:
456
- gr.Markdown("# Application Error")
457
- gr.Markdown("## CRITICAL: Language Model Failed to Load!")
458
- gr.Markdown("The application cannot start because the required language model (either EleutherAI/gpt-neo-1.3B or the fallback gpt2) could not be loaded. Please check the server console logs for specific error messages from the `transformers` library. This might be due to network issues, incorrect model name, or insufficient resources.")
459
- error_interface.launch()
460
- else:
461
- print("Starting Gradio App...")
462
- eal_interface.launch()
 
1
+ ###############################################################################
2
+ # app.py – EAL Emergent-Discourse Analyzer (v0.8 β€’ multi-model, VRAM-safe)
3
+ ###############################################################################
4
+ import gc, io, json, re, time, base64
5
+ import torch, numpy as np, matplotlib, matplotlib.pyplot as plt, seaborn as sns
6
+ import gradio as gr
7
  from sklearn.metrics.pairwise import cosine_similarity
8
  from sklearn.cluster import KMeans
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+
11
+ # β–Έβ–Έ force the right SDPA backend for GPUs < SM80
12
+ torch.backends.cuda.enable_flash_sdp(False)
13
+ torch.backends.cuda.enable_math_sdp(False)
14
+ torch.backends.cuda.enable_mem_efficient_sdp(True)
15
+
16
+ matplotlib.use("Agg") # headless
17
+
18
+ # ──────────────────────────────────────────────────────────────────────────────
19
+ # 1 Β· Registry of models
20
+ # ──────────────────────────────────────────────────────────────────────────────
21
+ AVAILABLE_MODELS = {
22
+ "GPT-Neox-1.3B" : "EleutherAI/gpt-neo-1.3B",
23
+ "GPT-2" : "gpt2",
24
+ "Gemma-3-1B-IT" : "google/gemma-3-1b-it", # float-16 branch used below
25
+ }
26
+
27
+ _loaded = {} # name β†’ {tok, model, ctx, dev}
28
+ _current = None # active name
29
+
30
+ # debug log (full prompts + answers)
31
+ dbg_log: list[str] = []
32
+ def dbg(msg: str) -> None:
33
+ stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
34
+ line = f"[{stamp}] {msg}"
35
+ dbg_log.append(line)
36
+ print(line)
37
+
38
+ # ──────────────────────────────────────────────────────────────────────────────
39
+ # 2 Β· Loader / Unloader helpers
40
+ # ──────────────────────────────────────────────────────────────────────────────
41
+ def _unload_current():
42
+ """Move old model to CPU & free CUDA VRAM."""
43
+ global _current
44
+ if _current and _current in _loaded:
45
+ mdl = _loaded[_current]["model"]
46
+ mdl.to("cpu")
47
+ del mdl
48
+ torch.cuda.empty_cache()
49
+ gc.collect()
50
+ _current = None
51
+
52
+ def _load(name: str):
53
+ """Lazy-load model, honouring memory limits, caching, dtype presets."""
54
+ global tokenizer, model, MODEL_CTX, device, _current
55
+ if name == _current:
56
+ return # nothing to do
57
+
58
+ dbg(f"[boot] switching β†’ {name}")
59
+ _unload_current() # free VRAM first
60
+
61
+ if name in _loaded: # cached
62
+ obj = _loaded[name]
63
+ tokenizer, model, MODEL_CTX, device = obj["tok"], obj["model"], obj["ctx"], obj["dev"]
64
+ _current = name
65
  return
66
 
67
+ repo = AVAILABLE_MODELS[name]
68
+ kwargs = {"device_map": None} # we manage .to(...)
69
+ kwargs.update(dict(torch_dtype=torch.float16))
70
+
71
+ tok = AutoTokenizer.from_pretrained(repo, use_fast=True)
72
+ mdl = AutoModelForCausalLM.from_pretrained(repo, **kwargs)
73
+ dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
+ mdl.to(dev).eval()
75
+
76
+ ctx = getattr(mdl.config, "max_position_embeddings", 2048)
77
+ # Gemma-3 config reports an absurd 1e15 – clamp sensibly
78
+ ctx = int(min(ctx, 8192))
79
+
80
+ if tok.pad_token is None:
81
+ tok.pad_token = tok.eos_token
82
+ mdl.config.pad_token_id = mdl.config.eos_token_id
83
+
84
+ _loaded[name] = {"tok": tok, "model": mdl, "ctx": ctx, "dev": dev}
85
+ tokenizer, model, MODEL_CTX, device, _current = tok, mdl, ctx, dev, name
86
+ dbg(f"[boot] {name} ready (ctx={ctx}, dev={dev}, dtype={mdl.dtype})")
87
+
88
+ # prime a default so UI pops instantly
89
+ _load("GPT-Neox-1.3B")
90
+
91
+ # ──────────────────────────────────────────────────────────────────────────────
92
+ # 3 Β· Utility fns
93
+ # ──────────────────────────────────────────────────────────────────────────────
94
+ PROMPT_HEADROOM = 300
95
+ MAX_GEN = 100
96
+ def trim(txt: str, reserve: int = 80) -> str:
97
+ toks = tokenizer.encode(txt, add_special_tokens=False)
98
+ keep = MODEL_CTX - PROMPT_HEADROOM - reserve
99
+ return tokenizer.decode(toks[-keep:], skip_special_tokens=True) if len(toks) > keep else txt
100
+
101
+ _quote = re.compile(r'"')
102
+ def esc(s: str) -> str: return _quote.sub('\\"', s)
103
+
104
+ def cosine(a: str, b: str) -> float:
105
+ bad = ("[Generation Error", "[Context window full]", "[Model not")
106
+ if any(m in a for m in bad) or any(m in b for m in bad): return 0.0
107
+ with torch.inference_mode():
108
+ emb = model.get_input_embeddings()
109
+ ta = emb(tokenizer(a, return_tensors="pt").to(device).input_ids).mean(1)
110
+ tb = emb(tokenizer(b, return_tensors="pt").to(device).input_ids).mean(1)
111
+ v = float(cosine_similarity(ta.cpu(), tb.cpu())[0, 0])
112
+ return max(min(v, 1.0), -1.0)
113
+
114
+ # ──────────────────────────────────────────────────────────────────────────────
115
+ # 4 Β· Generation (full prompt / answer into log)
116
+ # ──────────────────────────────────────────────────────────────────────────────
117
+ def generate(prompt: str, temp: float) -> str:
118
+ dbg(f"PROMPT >>> {prompt}")
119
+ with torch.inference_mode():
120
+ inp = tokenizer(prompt, return_tensors="pt").to(device)
121
+ out = model.generate(
122
+ **inp,
123
+ max_length=min(inp.input_ids.size(1) + MAX_GEN, MODEL_CTX),
124
+ temperature=temp,
125
+ top_p=0.9,
126
+ repetition_penalty=1.2,
127
+ no_repeat_ngram_size=3,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  pad_token_id=tokenizer.pad_token_id,
 
 
 
 
 
129
  )
130
+ ans = tokenizer.decode(out[0][inp.input_ids.size(1):], skip_special_tokens=True).strip()
131
+ dbg(f"OUTPUT <<< {ans}")
132
+ return ans or "[Empty]"
133
+
134
+ # ──────────────────────────────────────────────────────────────────────────────
135
+ # 5 Β· Heat-map helper
136
+ # ──────────────────────────────────────────────────────────────────────────────
137
+ def heat(mat: np.ndarray, labels: list[str], title: str) -> str:
138
+ mask = np.isnan(mat)
139
+ fig, ax = plt.subplots(figsize=(max(8, len(labels)), max(7, len(labels)*0.9)))
140
+ sns.heatmap(mat, mask=mask, annot=True, cmap="plasma", fmt=".2f",
141
+ vmin=np.nanmin(mat)*0.97, vmax=1, annot_kws={"size":7},
142
+ xticklabels=labels, yticklabels=labels, ax=ax)
143
+ plt.xticks(rotation=45, ha="right"); plt.yticks(rotation=0)
144
+ ax.set_title(title, pad=18); plt.tight_layout(pad=2.3)
145
+ buf = io.BytesIO(); plt.savefig(buf, format="png"); plt.close(fig); buf.seek(0)
146
+ b64 = base64.b64encode(buf.read()).decode()
147
+ return f"<img src='data:image/png;base64,{b64}' style='max-width:95%;height:auto;'/>"
148
+
149
+ # ──────────────────────────────────────────────────────────────────────────────
150
+ # 6 Β· Main EAL routine
151
+ # ──────────────────────────────────────────────────────────────────────────────
152
+ def run_eal(iters: int, mdl_name: str, prog=gr.Progress()):
153
+ dbg_log.clear()
154
+ _load(mdl_name)
155
+
156
+ I, nI, dI, dnI, dx = [None]*iters, [None]*iters, [None]*iters, [None]*iters, [None]*iters
157
+ seed = "A thinking process begins. The first thought is:"
158
+ for k in range(iters):
159
+ prm = seed if k == 0 else (
160
+ f'The thought process previously generated: "{esc(trim(I[k-1],60))}"\n\n'
161
+ "Task: Continue this line of thought. What logically follows or develops?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  )
163
+ I[k] = generate(prm, 0.7)
164
+ prm_n = (
165
+ f'Consider the statement: "{esc(trim(I[k],80))}"\n\n'
166
+ "Task: Explore alternative perspectives or potential issues. "
167
+ "What might be a contrasting viewpoint or an overlooked aspect?"
168
+ )
169
+ nI[k] = generate(prm_n, 0.9)
170
+ if k: dI[k] = cosine(I[k-1], I[k]); dnI[k] = cosine(nI[k-1], nI[k])
171
+ dx[k] = cosine(I[k], nI[k])
172
+ prog((k+1)/iters)
173
+
174
+ # simple clustering
175
+ labels = [f"I{k}" for k in range(iters)] + [f"Β¬I{k}" for k in range(iters)]
176
+ vecs, val_lab = [], []
177
+ emb = model.get_input_embeddings()
178
+ with torch.inference_mode():
179
+ for txt, lbl in zip(I+nI, labels):
180
+ if txt.startswith("["): continue
181
+ vecs.append(emb(tokenizer(txt, return_tensors="pt").to(device).input_ids).mean(1).cpu().numpy().squeeze())
182
+ val_lab.append(lbl)
183
+ clus = {l: "N/A" for l in labels}
184
+ if len(vecs) >= 2:
185
+ km = KMeans(n_clusters=2, random_state=0, n_init=10).fit(np.vstack(vecs))
186
+ clus.update({l: f"C{c}" for l, c in zip(val_lab, km.labels_)})
187
+
188
+ def block(seq, tag):
189
+ return "\n\n---\n\n".join(f"**{tag}{i} [{clus.get(f'{tag}{i}','N/A')}]**:\n{txt}" for i, txt in enumerate(seq))
190
+
191
+ tbl = ["|Iter|Ξ”S(I)|Ξ”S(Β¬I)|Ξ”S(I,Β¬I)|", "|:--:|:---:|:----:|:------:|"]
192
+ tbl += [f"|{i}|{('N/A' if dI[i] is None else f'{dI[i]:.4f}')}|"
193
+ f"{('N/A' if dnI[i] is None else f'{dnI[i]:.4f}')}|"
194
+ f"{('N/A' if dx[i] is None else f'{dx[i]:.4f}')}|" for i in range(iters)]
195
+
196
+ n = len(labels); m = np.full((n,n), np.nan)
197
+ for a in range(n):
198
+ for b in range(a, n):
199
+ sim = 1 if a==b else cosine((I+nI)[a], (I+nI)[b])
200
+ m[a,b]=m[b,a]=sim
201
+
202
+ return (block(I,"I"), block(nI,"Β¬I"), "\n".join(tbl),
203
+ "\n".join(dbg_log),
204
+ heat(m, labels, f"Similarity Matrix ({iters} iters β€’ {mdl_name})"))
205
+
206
+ # ─────────────────────────────────────────────────────────────────────────���────
207
+ # 7 Β· Gradio UI
208
+ # ──────────────────────────────────────────────────────────────────────────────
209
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal")) as demo:
210
+ gr.Markdown("## EAL Β· Emergent Discourse Analyzer (Neox ≫ Gemma ≫ GPT-2)")
211
+ mdl_dd = gr.Dropdown(label="Model", choices=list(AVAILABLE_MODELS.keys()), value="GPT-Neox-1.3B")
212
+ iters = gr.Slider(1, 100, 3, 1, label="Iterations")
213
+ run = gr.Button("Run πŸš€", variant="primary")
214
  with gr.Tabs():
215
+ with gr.Tab("Traces"):
216
+ out_I, out_nI = gr.Markdown(), gr.Markdown()
217
+ with gr.Tab("Ξ”S + Heatmap"):
218
+ out_tbl, out_hm = gr.Markdown(), gr.HTML()
219
+ with gr.Tab("Debug (full prompts & answers)"):
220
+ out_dbg = gr.Textbox(lines=26, interactive=False, show_copy_button=True)
221
+ run.click(run_eal, inputs=[iters, mdl_dd], outputs=[out_I, out_nI, out_tbl, out_dbg, out_hm])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
  if __name__ == "__main__":
224
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  transformers>=4.40.0
2
- torch>=2.0.0
3
  scikit-learn>=1.2.0
4
  gradio>=4.0.0
5
  matplotlib==3.10.3
 
1
  transformers>=4.40.0
2
+ torch==2.5.1
3
  scikit-learn>=1.2.0
4
  gradio>=4.0.0
5
  matplotlib==3.10.3