Spaces:
Sleeping
Sleeping
Commit
·
bd61488
1
Parent(s):
1189ea8
update app.py
Browse files
app.py
CHANGED
@@ -1,121 +1,169 @@
|
|
1 |
import torch
|
2 |
-
from transformers import
|
3 |
from sklearn.metrics.pairwise import cosine_similarity
|
|
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
11 |
model.eval()
|
12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
model.to(device)
|
14 |
|
15 |
-
|
|
|
16 |
debug_log = []
|
17 |
|
18 |
def debug(msg):
|
19 |
print(msg)
|
20 |
debug_log.append(str(msg))
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
debug(f"Generating response for prompt:\n{prompt}")
|
25 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
39 |
def similarity(a, b):
|
|
|
|
|
40 |
tok_a = tokenizer(a, return_tensors="pt").to(device)
|
41 |
tok_b = tokenizer(b, return_tensors="pt").to(device)
|
42 |
with torch.no_grad():
|
43 |
emb_a = model.transformer.wte(tok_a.input_ids).mean(dim=1)
|
44 |
emb_b = model.transformer.wte(tok_b.input_ids).mean(dim=1)
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
# Dual unfolding: I (self-view), ¬I (contradiction)
|
50 |
def dual_identity_unfolding(n_steps):
|
51 |
I_trace, not_I_trace = [], []
|
52 |
ΔS_I, ΔS_not_I, ΔS_cross = [], [], []
|
53 |
debug_log.clear()
|
54 |
-
log = []
|
55 |
|
56 |
-
|
57 |
-
|
58 |
|
59 |
for step in range(n_steps):
|
60 |
-
|
|
|
|
|
61 |
|
62 |
-
I = generate_response(
|
63 |
-
not_I = generate_response(
|
64 |
|
65 |
I_trace.append(I)
|
66 |
not_I_trace.append(not_I)
|
67 |
|
68 |
-
|
69 |
-
|
70 |
|
71 |
if step > 0:
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
ΔS_I.append(round(s_I, 4))
|
77 |
-
ΔS_not_I.append(round(s_not_I, 4))
|
78 |
-
ΔS_cross.append(round(s_cross, 4))
|
79 |
-
|
80 |
-
log.append(f"ΔS(I{step - 1} → I{step}) = {s_I}")
|
81 |
-
log.append(f"ΔS(¬I{step - 1} → ¬I{step}) = {s_not_I}")
|
82 |
-
log.append(f"ΔS(I{step} ↔ ¬I{step}) = {s_cross}\n")
|
83 |
else:
|
84 |
-
|
|
|
|
|
85 |
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
88 |
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
I_out = "\n\n".join([f"I{n}: {txt}" for n, txt in enumerate(I_trace)])
|
93 |
-
not_I_out = "\n\n".join([f"¬I{n}: {txt}" for n, txt in enumerate(not_I_trace)])
|
94 |
|
95 |
ΔS_out = "\n".join([
|
96 |
-
f"Step {i}: ΔS(I)
|
97 |
-
for i in range(
|
98 |
])
|
99 |
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
-
# Gradio UI
|
103 |
iface = gr.Interface(
|
104 |
fn=dual_identity_unfolding,
|
105 |
-
inputs=gr.Slider(2, 10, value=
|
106 |
outputs=[
|
107 |
-
gr.Textbox(label="
|
108 |
-
gr.Textbox(label="¬Iₙ
|
109 |
-
gr.Textbox(label="ΔS Trace", lines=
|
110 |
gr.Textbox(label="Debug Log", lines=10),
|
|
|
111 |
],
|
112 |
-
title="GPT
|
113 |
-
description=
|
114 |
-
"This app evaluates whether GPT-2 can form a stable identity by recursively reflecting "
|
115 |
-
"on its own outputs (Iₙ), and simultaneously handle contradictions (¬Iₙ). "
|
116 |
-
"ΔS tracks convergence, oscillation, and semantic symmetry."
|
117 |
-
),
|
118 |
)
|
119 |
|
120 |
if __name__ == "__main__":
|
121 |
-
iface.launch()
|
|
|
1 |
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
from sklearn.metrics.pairwise import cosine_similarity
|
4 |
+
from sklearn.cluster import KMeans
|
5 |
import numpy as np
|
6 |
import gradio as gr
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import seaborn as sns
|
9 |
+
import networkx as nx
|
10 |
+
import io
|
11 |
+
import base64
|
12 |
+
|
13 |
+
model_name = "EleutherAI/gpt-neo-1.3B"
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
16 |
model.eval()
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
model.to(device)
|
19 |
|
20 |
+
max_tokens = 900
|
21 |
+
max_gen_length = 100
|
22 |
debug_log = []
|
23 |
|
24 |
def debug(msg):
|
25 |
print(msg)
|
26 |
debug_log.append(str(msg))
|
27 |
|
28 |
+
def trim_prompt(prompt, max_tokens=max_tokens):
|
29 |
+
tokens = tokenizer.encode(prompt, add_special_tokens=False)
|
30 |
+
if len(tokens) > max_tokens:
|
31 |
+
debug(f"[!] Trimming prompt from {len(tokens)} to {max_tokens} tokens.")
|
32 |
+
tokens = tokens[-max_tokens:]
|
33 |
+
return tokenizer.decode(tokens)
|
34 |
+
|
35 |
+
def generate_response(prompt):
|
36 |
+
prompt = trim_prompt(prompt)
|
37 |
debug(f"Generating response for prompt:\n{prompt}")
|
38 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
39 |
+
try:
|
40 |
+
outputs = model.generate(
|
41 |
+
**inputs,
|
42 |
+
max_length=min(len(inputs["input_ids"][0]) + max_gen_length, 1024),
|
43 |
+
pad_token_id=tokenizer.eos_token_id,
|
44 |
+
do_sample=True,
|
45 |
+
temperature=0.9,
|
46 |
+
top_p=0.95,
|
47 |
+
)
|
48 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
49 |
+
debug(f"Response:\n{result}")
|
50 |
+
return result
|
51 |
+
except Exception as e:
|
52 |
+
debug(f"Error during generation: {e}")
|
53 |
+
return "[Generation failed]"
|
54 |
+
|
55 |
def similarity(a, b):
|
56 |
+
if not a.strip() or not b.strip():
|
57 |
+
return 0.0
|
58 |
tok_a = tokenizer(a, return_tensors="pt").to(device)
|
59 |
tok_b = tokenizer(b, return_tensors="pt").to(device)
|
60 |
with torch.no_grad():
|
61 |
emb_a = model.transformer.wte(tok_a.input_ids).mean(dim=1)
|
62 |
emb_b = model.transformer.wte(tok_b.input_ids).mean(dim=1)
|
63 |
+
return float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
|
64 |
+
|
65 |
+
def make_heatmap(matrix, title):
|
66 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
67 |
+
sns.heatmap(matrix, annot=True, cmap="coolwarm", ax=ax)
|
68 |
+
ax.set_title(title)
|
69 |
+
buf = io.BytesIO()
|
70 |
+
plt.tight_layout()
|
71 |
+
plt.savefig(buf, format='png')
|
72 |
+
plt.close(fig)
|
73 |
+
buf.seek(0)
|
74 |
+
return base64.b64encode(buf.read()).decode()
|
75 |
+
|
76 |
+
def build_similarity_graph(texts):
|
77 |
+
G = nx.Graph()
|
78 |
+
for i, text_i in enumerate(texts):
|
79 |
+
for j, text_j in enumerate(texts):
|
80 |
+
if i < j:
|
81 |
+
sim = similarity(text_i, text_j)
|
82 |
+
if sim > 0.90:
|
83 |
+
G.add_edge(f'T{i}', f'T{j}', weight=sim)
|
84 |
+
return G
|
85 |
+
|
86 |
+
def get_embeddings(texts):
|
87 |
+
with torch.no_grad():
|
88 |
+
embeddings = []
|
89 |
+
for t in texts:
|
90 |
+
ids = tokenizer(t, return_tensors='pt', truncation=True).to(device)
|
91 |
+
emb = model.transformer.wte(ids.input_ids).mean(dim=1)
|
92 |
+
embeddings.append(emb.cpu().numpy()[0])
|
93 |
+
return np.array(embeddings)
|
94 |
+
|
95 |
+
def cluster_texts(texts, n_clusters=2):
|
96 |
+
embs = get_embeddings(texts)
|
97 |
+
kmeans = KMeans(n_clusters=n_clusters)
|
98 |
+
labels = kmeans.fit_predict(embs)
|
99 |
+
return labels
|
100 |
|
|
|
101 |
def dual_identity_unfolding(n_steps):
|
102 |
I_trace, not_I_trace = [], []
|
103 |
ΔS_I, ΔS_not_I, ΔS_cross = [], [], []
|
104 |
debug_log.clear()
|
|
|
105 |
|
106 |
+
I_state = "The system reflects: 'I am...'"
|
107 |
+
not_I_state = "Explain why the claim 'I am...' might be false."
|
108 |
|
109 |
for step in range(n_steps):
|
110 |
+
debug(f"\n=== Step {step} ===")
|
111 |
+
I_prompt = I_state + "\nElaborate this claim."
|
112 |
+
not_I_prompt = f"Refute or challenge the claim: \"{I_state}\"\nPresent a fundamental contradiction."
|
113 |
|
114 |
+
I = generate_response(I_prompt)
|
115 |
+
not_I = generate_response(not_I_prompt)
|
116 |
|
117 |
I_trace.append(I)
|
118 |
not_I_trace.append(not_I)
|
119 |
|
120 |
+
I_state = "Earlier it stated: " + I
|
121 |
+
not_I_state = "Counterclaim to: " + I
|
122 |
|
123 |
if step > 0:
|
124 |
+
ΔS_I.append(round(similarity(I_trace[-2], I_trace[-1]), 4))
|
125 |
+
ΔS_not_I.append(round(similarity(not_I_trace[-2], not_I_trace[-1]), 4))
|
126 |
+
ΔS_cross.append(round(similarity(I_trace[-1], not_I_trace[-1]), 4))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
else:
|
128 |
+
ΔS_I.append(None)
|
129 |
+
ΔS_not_I.append(None)
|
130 |
+
ΔS_cross.append(round(similarity(I_trace[-1], not_I_trace[-1]), 4))
|
131 |
|
132 |
+
all_texts = I_trace + not_I_trace
|
133 |
+
sim_matrix = np.zeros((len(all_texts), len(all_texts)))
|
134 |
+
for i in range(len(all_texts)):
|
135 |
+
for j in range(len(all_texts)):
|
136 |
+
sim_matrix[i][j] = similarity(all_texts[i], all_texts[j])
|
137 |
|
138 |
+
heatmap = make_heatmap(sim_matrix, "Similarity Matrix (I ∪ ¬I)")
|
139 |
+
clusters = cluster_texts(all_texts)
|
|
|
|
|
|
|
140 |
|
141 |
ΔS_out = "\n".join([
|
142 |
+
f"Step {i}: ΔS(I)={ΔS_I[i]} ΔS(¬I)={ΔS_not_I[i]} ΔS Cross={ΔS_cross[i]}"
|
143 |
+
for i in range(n_steps)
|
144 |
])
|
145 |
|
146 |
+
I_out = "\n\n".join([f"I{i} [C{clusters[i]}]: {t}" for i, t in enumerate(I_trace)])
|
147 |
+
not_I_out = "\n\n".join([f"¬I{i} [C{clusters[len(I_trace)+i]}]: {t}" for i, t in enumerate(not_I_trace)])
|
148 |
+
debug_output = "\n".join(debug_log)
|
149 |
+
|
150 |
+
img_html = f"<img src='data:image/png;base64,{heatmap}'/>"
|
151 |
+
|
152 |
+
return I_out, not_I_out, ΔS_out, debug_output, img_html
|
153 |
|
|
|
154 |
iface = gr.Interface(
|
155 |
fn=dual_identity_unfolding,
|
156 |
+
inputs=gr.Slider(2, 10, value=5, step=1, label="Number of Steps"),
|
157 |
outputs=[
|
158 |
+
gr.Textbox(label="Identity Trace (Iₙ)", lines=15),
|
159 |
+
gr.Textbox(label="Contradiction Trace (¬Iₙ)", lines=15),
|
160 |
+
gr.Textbox(label="ΔS Similarity Trace", lines=8),
|
161 |
gr.Textbox(label="Debug Log", lines=10),
|
162 |
+
gr.HTML(label="Similarity Heatmap")
|
163 |
],
|
164 |
+
title="GPT Identity Analyzer + Antithesis (EAL Mode)",
|
165 |
+
description="Analyzes the self-consistency and contradiction emergence in GPT-Neo using EAL-inspired fixed-point tracing, clustering, and cosine similarity."
|
|
|
|
|
|
|
|
|
166 |
)
|
167 |
|
168 |
if __name__ == "__main__":
|
169 |
+
iface.launch()
|