Spaces:
Sleeping
Sleeping
import torch | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
from sklearn.metrics.pairwise import cosine_similarity | |
import numpy as np | |
import gradio as gr | |
# Load GPT-2 and tokenizer | |
model_name = "gpt2" | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
model.eval() | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
# Debug log list | |
debug_log = [] | |
def debug(msg): | |
print(msg) | |
debug_log.append(str(msg)) | |
# Generate GPT-2 response | |
def generate_response(prompt, max_length=100): | |
debug(f"Generating response for prompt:\n{prompt}") | |
inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
outputs = model.generate( | |
**inputs, | |
max_length=len(inputs["input_ids"][0]) + max_length, | |
pad_token_id=tokenizer.eos_token_id, | |
do_sample=True, | |
temperature=0.9, | |
top_p=0.95, | |
) | |
result = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() | |
debug(f"Response:\n{result}") | |
return result | |
# Cosine similarity as ΔS proxy | |
def similarity(a, b): | |
tok_a = tokenizer(a, return_tensors="pt").to(device) | |
tok_b = tokenizer(b, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
emb_a = model.transformer.wte(tok_a.input_ids).mean(dim=1) | |
emb_b = model.transformer.wte(tok_b.input_ids).mean(dim=1) | |
score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0]) | |
debug(f"Similarity: {score}") | |
return score | |
# Dual unfolding: I (self-view), ¬I (contradiction) | |
def dual_identity_unfolding(n_steps): | |
I_trace, not_I_trace = [], [] | |
ΔS_I, ΔS_not_I, ΔS_cross = [], [], [] | |
debug_log.clear() | |
log = [] | |
current_I = "The system begins to think about itself:\n" | |
current_not_I = "The system begins to contradict its previous thoughts:\n" | |
for step in range(n_steps): | |
log.append(f"--- Step {step} ---") | |
I = generate_response(current_I) | |
not_I = generate_response(current_not_I) | |
I_trace.append(I) | |
not_I_trace.append(not_I) | |
log.append(f"[Iₙ]:\n{I}\n") | |
log.append(f"[¬Iₙ]:\n{not_I}\n") | |
if step > 0: | |
s_I = similarity(I_trace[step - 1], I_trace[step]) | |
s_not_I = similarity(not_I_trace[step - 1], not_I_trace[step]) | |
s_cross = similarity(I_trace[step], not_I_trace[step]) | |
ΔS_I.append(round(s_I, 4)) | |
ΔS_not_I.append(round(s_not_I, 4)) | |
ΔS_cross.append(round(s_cross, 4)) | |
log.append(f"ΔS(I{step - 1} → I{step}) = {s_I}") | |
log.append(f"ΔS(¬I{step - 1} → ¬I{step}) = {s_not_I}") | |
log.append(f"ΔS(I{step} ↔ ¬I{step}) = {s_cross}\n") | |
else: | |
log.append("ΔS not applicable for first step.\n") | |
current_I = f'The system previously said:\n"{I}"\nNow it reflects further:\n' | |
current_not_I = f'The system previously said:\n"{I}"\nNow it contradicts itself:\n' | |
result_log = "\n".join(log) | |
debug_output = "\n".join(debug_log) | |
I_out = "\n\n".join([f"I{n}: {txt}" for n, txt in enumerate(I_trace)]) | |
not_I_out = "\n\n".join([f"¬I{n}: {txt}" for n, txt in enumerate(not_I_trace)]) | |
ΔS_out = "\n".join([ | |
f"Step {i}: ΔS(I) = {ΔS_I[i]}, ΔS(¬I) = {ΔS_not_I[i]}, ΔS Cross = {ΔS_cross[i]}" | |
for i in range(len(ΔS_I)) | |
]) | |
return I_out, not_I_out, ΔS_out, debug_output | |
# Gradio UI | |
iface = gr.Interface( | |
fn=dual_identity_unfolding, | |
inputs=gr.Slider(2, 10, value=4, step=1, label="Number of Identity Steps"), | |
outputs=[ | |
gr.Textbox(label="Iₙ (Identity Trace)", lines=20), | |
gr.Textbox(label="¬Iₙ (Antithesis Trace)", lines=20), | |
gr.Textbox(label="ΔS Trace", lines=12), | |
gr.Textbox(label="Debug Log", lines=10), | |
], | |
title="GPT-2 Dual Identity Analyzer (EAL Framework)", | |
description=( | |
"This app evaluates whether GPT-2 can form a stable identity by recursively reflecting " | |
"on its own outputs (Iₙ), and simultaneously handle contradictions (¬Iₙ). " | |
"ΔS tracks convergence, oscillation, and semantic symmetry." | |
), | |
) | |
if __name__ == "__main__": | |
iface.launch() |