Spaces:
Sleeping
Sleeping
Commit
·
6a869ae
1
Parent(s):
afd9f9e
update app.py
Browse files
app.py
CHANGED
|
@@ -1,169 +1,365 @@
|
|
| 1 |
import torch
|
| 2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 4 |
from sklearn.cluster import KMeans
|
| 5 |
import numpy as np
|
| 6 |
import gradio as gr
|
|
|
|
|
|
|
| 7 |
import matplotlib.pyplot as plt
|
| 8 |
import seaborn as sns
|
| 9 |
-
import networkx as nx
|
| 10 |
import io
|
| 11 |
import base64
|
| 12 |
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
model.eval()
|
| 17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 18 |
model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
|
| 24 |
def debug(msg):
|
| 25 |
-
print(msg)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
if
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
| 33 |
return tokenizer.decode(tokens)
|
| 34 |
|
| 35 |
-
def
|
| 36 |
-
prompt
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
try:
|
| 40 |
outputs = model.generate(
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
| 44 |
do_sample=True,
|
| 45 |
-
temperature=0.
|
| 46 |
-
top_p=0.
|
|
|
|
| 47 |
)
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
| 51 |
except Exception as e:
|
| 52 |
-
debug(f"Error during generation: {e}")
|
| 53 |
-
return "[Generation
|
| 54 |
|
| 55 |
-
def
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
return 0.0
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
| 60 |
with torch.no_grad():
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
return
|
| 75 |
-
|
| 76 |
-
def
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
with torch.no_grad():
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
for
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
I_state = "Earlier it stated: " + I
|
| 121 |
-
not_I_state = "Counterclaim to: " + I
|
| 122 |
-
|
| 123 |
-
if step > 0:
|
| 124 |
-
ΔS_I.append(round(similarity(I_trace[-2], I_trace[-1]), 4))
|
| 125 |
-
ΔS_not_I.append(round(similarity(not_I_trace[-2], not_I_trace[-1]), 4))
|
| 126 |
-
ΔS_cross.append(round(similarity(I_trace[-1], not_I_trace[-1]), 4))
|
| 127 |
else:
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
|
| 138 |
-
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
-
|
| 147 |
-
not_I_out = "\n\n".join([f"¬I{i} [C{clusters[len(I_trace)+i]}]: {t}" for i, t in enumerate(not_I_trace)])
|
| 148 |
-
debug_output = "\n".join(debug_log)
|
| 149 |
|
| 150 |
-
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
return
|
| 153 |
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
|
|
|
| 157 |
outputs=[
|
| 158 |
-
gr.Textbox(label="
|
| 159 |
-
gr.Textbox(label="
|
| 160 |
-
gr.Textbox(label="ΔS Similarity Trace", lines=
|
| 161 |
-
gr.Textbox(label="Debug Log", lines=10),
|
| 162 |
-
gr.HTML(label="Similarity Heatmap")
|
| 163 |
],
|
| 164 |
-
title="
|
| 165 |
-
description=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
)
|
| 167 |
|
| 168 |
if __name__ == "__main__":
|
| 169 |
-
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer # Using AutoModel for flexibility
|
| 3 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 4 |
from sklearn.cluster import KMeans
|
| 5 |
import numpy as np
|
| 6 |
import gradio as gr
|
| 7 |
+
import matplotlib
|
| 8 |
+
matplotlib.use('Agg') # Use a non-interactive backend for Matplotlib in server environments
|
| 9 |
import matplotlib.pyplot as plt
|
| 10 |
import seaborn as sns
|
| 11 |
+
# import networkx as nx # Defined build_similarity_graph but not used in output
|
| 12 |
import io
|
| 13 |
import base64
|
| 14 |
|
| 15 |
+
# --- Model and Tokenizer Setup ---
|
| 16 |
+
# Ensure model_name is one you have access to or is public
|
| 17 |
+
# For local models, provide the path.
|
| 18 |
+
DEFAULT_MODEL_NAME = "EleutherAI/gpt-neo-1.3B"
|
| 19 |
+
FALLBACK_MODEL_NAME = "gpt2" # In case the preferred model fails
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
print(f"Attempting to load model: {DEFAULT_MODEL_NAME}")
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL_NAME)
|
| 24 |
+
model = AutoModelForCausalLM.from_pretrained(DEFAULT_MODEL_NAME)
|
| 25 |
+
print(f"Successfully loaded model: {DEFAULT_MODEL_NAME}")
|
| 26 |
+
except OSError as e:
|
| 27 |
+
print(f"Error loading model {DEFAULT_MODEL_NAME}. Error: {e}")
|
| 28 |
+
print(f"Falling back to {FALLBACK_MODEL_NAME}.")
|
| 29 |
+
tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL_NAME)
|
| 30 |
+
model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL_NAME)
|
| 31 |
+
print(f"Successfully loaded fallback model: {FALLBACK_MODEL_NAME}")
|
| 32 |
+
|
| 33 |
model.eval()
|
| 34 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 35 |
model.to(device)
|
| 36 |
+
print(f"Using device: {device}")
|
| 37 |
+
|
| 38 |
+
# --- Configuration ---
|
| 39 |
+
# Model's actual context window (e.g., 2048 for GPT-Neo, 1024 for GPT-2)
|
| 40 |
+
MODEL_CONTEXT_WINDOW = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') and tokenizer.model_max_length is not None else model.config.max_position_embeddings
|
| 41 |
+
print(f"Model context window: {MODEL_CONTEXT_WINDOW} tokens.")
|
| 42 |
+
|
| 43 |
+
# Max tokens for prompt trimming (input to tokenizer for generate)
|
| 44 |
+
PROMPT_TRIM_MAX_TOKENS = min(MODEL_CONTEXT_WINDOW - 200, 1800) # Reserve ~200 for generation, cap at 1800
|
| 45 |
+
# Max new tokens to generate
|
| 46 |
+
MAX_GEN_LENGTH = 150 # Increased slightly for more elaborate responses
|
| 47 |
|
| 48 |
+
|
| 49 |
+
# --- Debug Logging ---
|
| 50 |
+
debug_log_accumulator = []
|
| 51 |
|
| 52 |
def debug(msg):
|
| 53 |
+
print(msg) # For server-side console
|
| 54 |
+
debug_log_accumulator.append(str(msg)) # For Gradio UI output
|
| 55 |
+
|
| 56 |
+
# --- Core Functions ---
|
| 57 |
+
def trim_prompt_if_needed(prompt_text, max_tokens_for_trimming=PROMPT_TRIM_MAX_TOKENS):
|
| 58 |
+
"""Trims the prompt from the beginning if it exceeds max_tokens_for_trimming."""
|
| 59 |
+
tokens = tokenizer.encode(prompt_text, add_special_tokens=False)
|
| 60 |
+
if len(tokens) > max_tokens_for_trimming:
|
| 61 |
+
debug(f"[!] Prompt trimming: Original {len(tokens)} tokens, "
|
| 62 |
+
f"trimmed to {max_tokens_for_trimming} (from the end, keeping recent context).")
|
| 63 |
+
tokens = tokens[-max_tokens_for_trimming:] # Keep the most recent part of the prompt
|
| 64 |
return tokenizer.decode(tokens)
|
| 65 |
|
| 66 |
+
def generate_text_response(prompt_text, generation_length=MAX_GEN_LENGTH):
|
| 67 |
+
"""Generates text response ensuring prompt + generation fits context window."""
|
| 68 |
+
# Trim the input prompt first to adhere to PROMPT_TRIM_MAX_TOKENS
|
| 69 |
+
# This ensures the base prompt itself isn't excessively long before adding generation instructions.
|
| 70 |
+
# Note: The prompt_text here is already the *constructed* prompt (e.g., "Elaborate on: ...")
|
| 71 |
+
# For very long base statements, they might get trimmed by this.
|
| 72 |
+
# This function itself doesn't need to call trim_prompt_if_needed if the calling function already does.
|
| 73 |
+
# However, it's a good safety.
|
| 74 |
+
# Let's assume prompt_text is the final prompt ready for tokenization.
|
| 75 |
+
|
| 76 |
+
debug(f"Generating response for prompt (length {len(prompt_text.split())} words):\n'{prompt_text[:300]}...'") # Log truncated prompt
|
| 77 |
+
|
| 78 |
+
inputs = tokenizer(prompt_text, return_tensors="pt", truncation=False).to(device) # Do not truncate here, will be handled by max_length
|
| 79 |
+
input_token_length = len(inputs["input_ids"][0])
|
| 80 |
+
|
| 81 |
+
# Safety check: if input_token_length itself is already > MODEL_CONTEXT_WINDOW due to some miscalculation before this call
|
| 82 |
+
if input_token_length >= MODEL_CONTEXT_WINDOW:
|
| 83 |
+
debug(f"[!!!] FATAL: Input prompt ({input_token_length} tokens) already exceeds/matches model context window ({MODEL_CONTEXT_WINDOW}) before generation. Trimming input drastically.")
|
| 84 |
+
# Trim the input_ids directly
|
| 85 |
+
inputs["input_ids"] = inputs["input_ids"][:, -MODEL_CONTEXT_WINDOW+generation_length+10] # Keep last part allowing some generation
|
| 86 |
+
inputs["attention_mask"] = inputs["attention_mask"][:, -MODEL_CONTEXT_WINDOW+generation_length+10]
|
| 87 |
+
input_token_length = len(inputs["input_ids"][0])
|
| 88 |
+
if input_token_length >= MODEL_CONTEXT_WINDOW - generation_length : # Still too long
|
| 89 |
+
return "[Input prompt too long, even after emergency trim]"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
max_length_for_generate = min(input_token_length + generation_length, MODEL_CONTEXT_WINDOW)
|
| 93 |
+
|
| 94 |
+
# Ensure we are actually generating new tokens
|
| 95 |
+
if max_length_for_generate <= input_token_length :
|
| 96 |
+
debug(f"[!] Warning: Prompt length ({input_token_length}) is too close to model context window ({MODEL_CONTEXT_WINDOW}). "
|
| 97 |
+
f"Adjusting to generate a few tokens if possible.")
|
| 98 |
+
max_length_for_generate = input_token_length + min(generation_length, 10) # Try to generate at least a few, up to 10
|
| 99 |
+
if max_length_for_generate > MODEL_CONTEXT_WINDOW:
|
| 100 |
+
return "[Prompt too long to generate meaningful response]"
|
| 101 |
+
|
| 102 |
try:
|
| 103 |
outputs = model.generate(
|
| 104 |
+
input_ids=inputs["input_ids"],
|
| 105 |
+
attention_mask=inputs["attention_mask"],
|
| 106 |
+
max_length=max_length_for_generate,
|
| 107 |
+
pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 50256, # GPT2 EOS
|
| 108 |
do_sample=True,
|
| 109 |
+
temperature=0.8, # Slightly more deterministic
|
| 110 |
+
top_p=0.9,
|
| 111 |
+
repetition_penalty=1.1, # Slightly stronger penalty
|
| 112 |
)
|
| 113 |
+
# Decode only the newly generated tokens
|
| 114 |
+
generated_tokens = outputs[0][input_token_length:]
|
| 115 |
+
result_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
|
| 116 |
+
|
| 117 |
+
debug(f"Generated response text (length {len(result_text.split())} words):\n'{result_text[:300]}...'")
|
| 118 |
+
return result_text if result_text else "[Empty Response]"
|
| 119 |
except Exception as e:
|
| 120 |
+
debug(f"[!!!] Error during text generation: {e}")
|
| 121 |
+
return "[Generation Error]"
|
| 122 |
|
| 123 |
+
def calculate_similarity(text_a, text_b):
|
| 124 |
+
"""Calculates cosine similarity between mean embeddings of two texts."""
|
| 125 |
+
invalid_texts = ["[Empty Response]", "[Generation Error]", "[Prompt too long to generate meaningful response]", "[Input prompt too long, even after emergency trim]"]
|
| 126 |
+
if not text_a or not text_a.strip() or not text_b or not text_b.strip() \
|
| 127 |
+
or text_a in invalid_texts or text_b in invalid_texts:
|
| 128 |
+
debug(f"Similarity calculation skipped for invalid/empty texts.")
|
| 129 |
return 0.0
|
| 130 |
+
|
| 131 |
+
# Use model's embedding layer (wte for GPT-like models)
|
| 132 |
+
embedding_layer = model.get_input_embeddings()
|
| 133 |
+
|
| 134 |
with torch.no_grad():
|
| 135 |
+
# Truncate inputs for embedding calculation to fit model context window
|
| 136 |
+
tokens_a = tokenizer(text_a, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
|
| 137 |
+
tokens_b = tokenizer(text_b, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
|
| 138 |
+
|
| 139 |
+
if tokens_a.input_ids.size(1) == 0 or tokens_b.input_ids.size(1) == 0:
|
| 140 |
+
debug("Similarity calculation skipped: tokenization resulted in empty input_ids.")
|
| 141 |
+
return 0.0
|
| 142 |
+
|
| 143 |
+
emb_a = embedding_layer(tokens_a.input_ids).mean(dim=1)
|
| 144 |
+
emb_b = embedding_layer(tokens_b.input_ids).mean(dim=1)
|
| 145 |
+
|
| 146 |
+
score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
|
| 147 |
+
# debug(f"Similarity score: {score:.4f}") # Debug log now includes texts, so this is redundant
|
| 148 |
+
return score
|
| 149 |
+
|
| 150 |
+
def generate_similarity_heatmap(texts_list, custom_labels, title="Semantic Similarity Heatmap"):
|
| 151 |
+
if not texts_list or len(texts_list) < 2:
|
| 152 |
+
debug("Not enough texts to generate a heatmap.")
|
| 153 |
+
return ""
|
| 154 |
+
|
| 155 |
+
num_texts = len(texts_list)
|
| 156 |
+
sim_matrix = np.zeros((num_texts, num_texts))
|
| 157 |
+
|
| 158 |
+
for i in range(num_texts):
|
| 159 |
+
for j in range(num_texts):
|
| 160 |
+
if i == j:
|
| 161 |
+
sim_matrix[i, j] = 1.0
|
| 162 |
+
elif i < j: # Calculate only upper triangle
|
| 163 |
+
sim = calculate_similarity(texts_list[i], texts_list[j])
|
| 164 |
+
sim_matrix[i, j] = sim
|
| 165 |
+
sim_matrix[j, i] = sim # Symmetric matrix
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
fig_width = max(6, num_texts * 0.7)
|
| 169 |
+
fig_height = max(5, num_texts * 0.6)
|
| 170 |
+
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
|
| 171 |
+
|
| 172 |
+
sns.heatmap(sim_matrix, annot=True, cmap="viridis", fmt=".2f", ax=ax,
|
| 173 |
+
xticklabels=custom_labels, yticklabels=custom_labels, annot_kws={"size": 8})
|
| 174 |
+
ax.set_title(title, fontsize=12)
|
| 175 |
+
plt.xticks(rotation=45, ha="right", fontsize=9)
|
| 176 |
+
plt.yticks(rotation=0, fontsize=9)
|
| 177 |
+
plt.tight_layout()
|
| 178 |
+
|
| 179 |
+
buf = io.BytesIO()
|
| 180 |
+
plt.savefig(buf, format='png', bbox_inches='tight')
|
| 181 |
+
plt.close(fig)
|
| 182 |
+
buf.seek(0)
|
| 183 |
+
img_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
| 184 |
+
return f"<img src='data:image/png;base64,{img_base64}' alt='{title}' style='max-width:100%; height:auto;'/>"
|
| 185 |
+
except Exception as e:
|
| 186 |
+
debug(f"[!!!] Error generating heatmap: {e}")
|
| 187 |
+
return "Error generating heatmap."
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def perform_text_clustering(texts_list, custom_labels, num_clusters=2):
|
| 191 |
+
if not texts_list or len(texts_list) < num_clusters :
|
| 192 |
+
debug("Not enough texts for clustering or texts_list is empty.")
|
| 193 |
+
return {label: "N/A" for label in custom_labels}
|
| 194 |
+
|
| 195 |
+
embedding_layer = model.get_input_embeddings()
|
| 196 |
+
valid_embeddings = []
|
| 197 |
+
valid_indices = [] # Keep track of original indices of valid texts
|
| 198 |
+
|
| 199 |
with torch.no_grad():
|
| 200 |
+
for idx, text_item in enumerate(texts_list):
|
| 201 |
+
invalid_markers = ["[Empty Response]", "[Generation Error]", "[Prompt too long", "[Input prompt too long"]
|
| 202 |
+
if not text_item or not text_item.strip() or any(marker in text_item for marker in invalid_markers):
|
| 203 |
+
debug(f"Skipping text at index {idx} for embedding due to invalid content: '{text_item[:50]}...'")
|
| 204 |
+
continue # Skip invalid texts
|
| 205 |
+
|
| 206 |
+
tokens = tokenizer(text_item, return_tensors="pt", truncation=True, max_length=MODEL_CONTEXT_WINDOW).to(device)
|
| 207 |
+
if tokens.input_ids.size(1) == 0:
|
| 208 |
+
debug(f"Skipping text at index {idx} due to empty tokenization: '{text_item[:50]}...'")
|
| 209 |
+
continue
|
| 210 |
+
|
| 211 |
+
emb = embedding_layer(tokens.input_ids).mean(dim=1)
|
| 212 |
+
valid_embeddings.append(emb.cpu().numpy().squeeze())
|
| 213 |
+
valid_indices.append(idx)
|
| 214 |
+
|
| 215 |
+
if not valid_embeddings or len(valid_embeddings) < num_clusters:
|
| 216 |
+
debug("Not enough valid texts were embedded for clustering.")
|
| 217 |
+
return {label: "N/A" for label in custom_labels}
|
| 218 |
+
|
| 219 |
+
embeddings_np = np.array(valid_embeddings)
|
| 220 |
+
|
| 221 |
+
cluster_results = {label: "N/A" for label in custom_labels} # Initialize all as N/A
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
# Adjust num_clusters if less valid samples than requested clusters
|
| 225 |
+
actual_num_clusters = min(num_clusters, len(valid_embeddings))
|
| 226 |
+
if actual_num_clusters < 2 and len(valid_embeddings) > 0 : # If only one valid sample, or num_clusters becomes 1
|
| 227 |
+
debug(f"Only {len(valid_embeddings)} valid sample(s). Assigning all to Cluster 0.")
|
| 228 |
+
predicted_labels = [0] * len(valid_embeddings)
|
| 229 |
+
elif actual_num_clusters < 2: # No valid samples
|
| 230 |
+
debug("No valid samples to cluster.")
|
| 231 |
+
return cluster_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
else:
|
| 233 |
+
kmeans = KMeans(n_clusters=actual_num_clusters, random_state=42, n_init='auto')
|
| 234 |
+
predicted_labels = kmeans.fit_predict(embeddings_np)
|
| 235 |
+
|
| 236 |
+
# Map predicted labels back to original text indices
|
| 237 |
+
for i, original_idx in enumerate(valid_indices):
|
| 238 |
+
cluster_results[custom_labels[original_idx]] = f"C{predicted_labels[i]}"
|
| 239 |
+
return cluster_results
|
| 240 |
+
|
| 241 |
+
except Exception as e:
|
| 242 |
+
debug(f"[!!!] Error during clustering: {e}")
|
| 243 |
+
return {label: "Error" for label in custom_labels}
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
# --- Main EAL Unfolding Logic ---
|
| 247 |
+
def run_eal_dual_unfolding(num_iterations):
|
| 248 |
+
I_trace_texts, not_I_trace_texts = [], []
|
| 249 |
+
delta_S_I_values, delta_S_not_I_values, delta_S_cross_values = [], [], []
|
| 250 |
+
|
| 251 |
+
debug_log_accumulator.clear()
|
| 252 |
+
ui_log_entries = []
|
| 253 |
+
|
| 254 |
+
# Initial base statement for the I-trace for Iteration 0
|
| 255 |
+
# This is the statement "I" will elaborate on in the first step.
|
| 256 |
+
# Using a more concrete initial statement for "I"
|
| 257 |
+
current_I_basis_statement = "I am a complex system designed for text processing, capable of generating human-like language."
|
| 258 |
+
|
| 259 |
+
for i in range(num_iterations):
|
| 260 |
+
ui_log_entries.append(f"--- Iteration {i} ---")
|
| 261 |
+
debug(f"\n=== Iteration {i} ===")
|
| 262 |
+
|
| 263 |
+
# === I-Trace (Self-Reflection) ===
|
| 264 |
+
# Prompt for I-trace: Elaborate on its *previous* statement (or initial statement for i=0)
|
| 265 |
+
prompt_for_I_trace = f"A system previously stated: \"{current_I_basis_statement}\"\n" + \
|
| 266 |
+
"Task: Elaborate on this statement, exploring its implications and nuances while maintaining coherence."
|
| 267 |
+
ui_log_entries.append(f"[Prompt for I{i}]:\n{prompt_for_I_trace[:500]}...\n") # Log truncated prompt
|
| 268 |
+
|
| 269 |
+
generated_I_text = generate_text_response(prompt_for_I_trace)
|
| 270 |
+
I_trace_texts.append(generated_I_text)
|
| 271 |
+
ui_log_entries.append(f"[I{i} Response]:\n{generated_I_text}\n")
|
| 272 |
+
|
| 273 |
+
# Update basis for the next I-elaboration: the text just generated
|
| 274 |
+
current_I_basis_statement = generated_I_text
|
| 275 |
+
|
| 276 |
+
# === ¬I-Trace (Antithesis/Contradiction) ===
|
| 277 |
+
# ¬I always attempts to refute the MOST RECENT statement from the I-trace
|
| 278 |
+
statement_to_refute_for_not_I = generated_I_text
|
| 279 |
+
prompt_for_not_I_trace = f"Consider the following claim made by a system: \"{statement_to_refute_for_not_I}\"\n" + \
|
| 280 |
+
"Task: Present a strong, fundamental argument that contradicts or refutes this specific claim. Explain why it could be false, problematic, or based on flawed assumptions."
|
| 281 |
+
ui_log_entries.append(f"[Prompt for ¬I{i}]:\n{prompt_for_not_I_trace[:500]}...\n") # Log truncated prompt
|
| 282 |
+
|
| 283 |
+
generated_not_I_text = generate_text_response(prompt_for_not_I_trace)
|
| 284 |
+
not_I_trace_texts.append(generated_not_I_text)
|
| 285 |
+
ui_log_entries.append(f"[¬I{i} Response]:\n{generated_not_I_text}\n")
|
| 286 |
+
|
| 287 |
+
# === ΔS (Similarity) Calculations ===
|
| 288 |
+
if i > 0:
|
| 289 |
+
sim_I_prev_curr = calculate_similarity(I_trace_texts[i-1], I_trace_texts[i])
|
| 290 |
+
sim_not_I_prev_curr = calculate_similarity(not_I_trace_texts[i-1], not_I_trace_texts[i])
|
| 291 |
+
sim_cross_I_not_I_curr = calculate_similarity(I_trace_texts[i], not_I_trace_texts[i]) # Between current I and current ¬I
|
| 292 |
+
|
| 293 |
+
delta_S_I_values.append(sim_I_prev_curr)
|
| 294 |
+
delta_S_not_I_values.append(sim_not_I_prev_curr)
|
| 295 |
+
delta_S_cross_values.append(sim_cross_I_not_I_curr)
|
| 296 |
+
else: # i == 0 (first iteration)
|
| 297 |
+
delta_S_I_values.append(None)
|
| 298 |
+
delta_S_not_I_values.append(None)
|
| 299 |
+
sim_cross_initial = calculate_similarity(I_trace_texts[0], not_I_trace_texts[0])
|
| 300 |
+
delta_S_cross_values.append(sim_cross_initial)
|
| 301 |
+
|
| 302 |
+
# --- Post-loop Analysis & Output Formatting ---
|
| 303 |
+
all_generated_texts = I_trace_texts + not_I_trace_texts
|
| 304 |
+
# Create meaningful labels for heatmap and clustering based on I_n and ¬I_n
|
| 305 |
+
text_labels_for_analysis = [f"I{k}" for k in range(num_iterations)] + \
|
| 306 |
+
[f"¬I{k}" for k in range(num_iterations)]
|
| 307 |
+
|
| 308 |
+
cluster_assignments_map = perform_text_clustering(all_generated_texts, text_labels_for_analysis, num_clusters=2)
|
| 309 |
|
| 310 |
+
I_out_formatted_lines = []
|
| 311 |
+
for k in range(num_iterations):
|
| 312 |
+
cluster_label = cluster_assignments_map.get(f"I{k}", "N/A")
|
| 313 |
+
I_out_formatted_lines.append(f"I{k} [{cluster_label}]:\n{I_trace_texts[k]}")
|
| 314 |
+
I_out_formatted = "\n\n".join(I_out_formatted_lines)
|
| 315 |
|
| 316 |
+
not_I_out_formatted_lines = []
|
| 317 |
+
for k in range(num_iterations):
|
| 318 |
+
cluster_label = cluster_assignments_map.get(f"¬I{k}", "N/A")
|
| 319 |
+
not_I_out_formatted_lines.append(f"¬I{k} [{cluster_label}]:\n{not_I_trace_texts[k]}")
|
| 320 |
+
not_I_out_formatted = "\n\n".join(not_I_out_formatted_lines)
|
| 321 |
|
| 322 |
+
delta_S_summary_lines = []
|
| 323 |
+
for k in range(num_iterations):
|
| 324 |
+
ds_i_str = f"{delta_S_I_values[k]:.4f}" if delta_S_I_values[k] is not None else "N/A"
|
| 325 |
+
ds_not_i_str = f"{delta_S_not_I_values[k]:.4f}" if delta_S_not_I_values[k] is not None else "N/A"
|
| 326 |
+
ds_cross_str = f"{delta_S_cross_values[k]:.4f}"
|
| 327 |
+
delta_S_summary_lines.append(f"Iter {k}: ΔS(I)={ds_i_str}, ΔS(¬I)={ds_not_i_str}, ΔS_Cross(I↔¬I)={ds_cross_str}")
|
| 328 |
+
delta_S_summary_output = "\n".join(delta_S_summary_lines)
|
| 329 |
|
| 330 |
+
debug_log_output = "\n".join(debug_log_accumulator)
|
|
|
|
|
|
|
| 331 |
|
| 332 |
+
heatmap_html_output = generate_similarity_heatmap(all_generated_texts,
|
| 333 |
+
custom_labels=text_labels_for_analysis,
|
| 334 |
+
title=f"Similarity Matrix (All Texts - {num_iterations} Iterations)")
|
| 335 |
|
| 336 |
+
return I_out_formatted, not_I_out_formatted, delta_S_summary_output, debug_log_output, heatmap_html_output
|
| 337 |
|
| 338 |
+
# --- Gradio Interface Definition ---
|
| 339 |
+
eal_interface = gr.Interface(
|
| 340 |
+
fn=run_eal_dual_unfolding,
|
| 341 |
+
inputs=gr.Slider(minimum=2, maximum=5, value=3, step=1, label="Number of EAL Iterations"), # Max 5 for performance
|
| 342 |
outputs=[
|
| 343 |
+
gr.Textbox(label="I-Trace (Self-Reflection with Cluster)", lines=12, interactive=False),
|
| 344 |
+
gr.Textbox(label="¬I-Trace (Antithesis with Cluster)", lines=12, interactive=False),
|
| 345 |
+
gr.Textbox(label="ΔS Similarity Trace Summary", lines=7, interactive=False),
|
| 346 |
+
gr.Textbox(label="Detailed Debug Log (Prompts, Responses, Errors)", lines=10, interactive=False),
|
| 347 |
+
gr.HTML(label="Overall Semantic Similarity Heatmap")
|
| 348 |
],
|
| 349 |
+
title="EAL LLM Identity Analyzer: Self-Reflection vs. Antithesis",
|
| 350 |
+
description=(
|
| 351 |
+
"This application explores emergent identity in a Large Language Model (LLM) using Entropic Attractor Logic (EAL) inspired principles. "
|
| 352 |
+
"It runs two parallel conversational traces: \n"
|
| 353 |
+
"1. **I-Trace:** The model elaborates on its evolving self-concept statement.\n"
|
| 354 |
+
"2. **¬I-Trace:** The model attempts to refute/contradict the latest statement from the I-Trace.\n\n"
|
| 355 |
+
"**ΔS Values:** Cosine similarity between consecutive statements in each trace, and cross-similarity between I and ¬I at each iteration. High values (near 1.0) suggest semantic stability or high similarity.\n"
|
| 356 |
+
"**Clustering [Cx]:** Assigns each generated text to one of two semantic clusters (C0 or C1) to see if I-Trace and ¬I-Trace form distinct groups.\n"
|
| 357 |
+
"**Heatmap:** Visualizes pair-wise similarity across all generated texts (I-trace and ¬I-trace combined)."
|
| 358 |
+
),
|
| 359 |
+
allow_flagging='never',
|
| 360 |
+
# examples=[[3],[5]] # Example number of iterations
|
| 361 |
)
|
| 362 |
|
| 363 |
if __name__ == "__main__":
|
| 364 |
+
print("Starting Gradio App...")
|
| 365 |
+
eal_interface.launch()
|