Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,10 +16,10 @@ model.to(device)
|
|
16 |
debug_log = []
|
17 |
|
18 |
def debug(msg):
|
19 |
-
print(msg)
|
20 |
-
debug_log.append(str(msg))
|
21 |
|
22 |
-
# Generate
|
23 |
def generate_response(prompt, max_length=100):
|
24 |
debug(f"Generating response for prompt:\n{prompt}")
|
25 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
@@ -32,10 +32,10 @@ def generate_response(prompt, max_length=100):
|
|
32 |
top_p=0.95,
|
33 |
)
|
34 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
35 |
-
debug(f"
|
36 |
return result
|
37 |
|
38 |
-
#
|
39 |
def similarity(a, b):
|
40 |
tok_a = tokenizer(a, return_tensors="pt").to(device)
|
41 |
tok_b = tokenizer(b, return_tensors="pt").to(device)
|
@@ -43,60 +43,77 @@ def similarity(a, b):
|
|
43 |
emb_a = model.transformer.wte(tok_a.input_ids).mean(dim=1)
|
44 |
emb_b = model.transformer.wte(tok_b.input_ids).mean(dim=1)
|
45 |
score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
|
46 |
-
debug(f"Similarity
|
47 |
return score
|
48 |
|
49 |
-
#
|
50 |
-
def
|
51 |
-
|
52 |
-
Δ
|
53 |
-
log = []
|
54 |
debug_log.clear()
|
|
|
55 |
|
56 |
-
|
|
|
57 |
|
58 |
for step in range(n_steps):
|
59 |
log.append(f"--- Step {step} ---")
|
60 |
-
log.append(f"[Prompt to GPT-2]:\n{current_prompt}")
|
61 |
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
64 |
|
65 |
-
log.append(f"[
|
|
|
66 |
|
67 |
if step > 0:
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
else:
|
72 |
log.append("ΔS not applicable for first step.\n")
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
"Now it continues thinking about what that implies:\n"
|
77 |
-
)
|
78 |
|
79 |
-
|
80 |
-
trace_summary = "\n".join(
|
81 |
-
[f"ΔS({i} → {i+1}) = {ΔS_trace[i]}" for i in range(len(ΔS_trace))]
|
82 |
-
)
|
83 |
debug_output = "\n".join(debug_log)
|
84 |
-
return summary, trace_summary, debug_output
|
85 |
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
iface = gr.Interface(
|
88 |
-
fn=
|
89 |
-
inputs=gr.Slider(2, 10, value=
|
90 |
outputs=[
|
91 |
-
gr.Textbox(label="
|
92 |
-
gr.Textbox(label="
|
|
|
93 |
gr.Textbox(label="Debug Log", lines=10),
|
94 |
],
|
95 |
-
title="GPT-2 Identity
|
96 |
description=(
|
97 |
-
"This app
|
98 |
-
"
|
99 |
-
"
|
100 |
),
|
101 |
)
|
102 |
|
|
|
16 |
debug_log = []
|
17 |
|
18 |
def debug(msg):
|
19 |
+
print(msg)
|
20 |
+
debug_log.append(str(msg))
|
21 |
|
22 |
+
# Generate GPT-2 response
|
23 |
def generate_response(prompt, max_length=100):
|
24 |
debug(f"Generating response for prompt:\n{prompt}")
|
25 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
32 |
top_p=0.95,
|
33 |
)
|
34 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
35 |
+
debug(f"Response:\n{result}")
|
36 |
return result
|
37 |
|
38 |
+
# Cosine similarity as ΔS proxy
|
39 |
def similarity(a, b):
|
40 |
tok_a = tokenizer(a, return_tensors="pt").to(device)
|
41 |
tok_b = tokenizer(b, return_tensors="pt").to(device)
|
|
|
43 |
emb_a = model.transformer.wte(tok_a.input_ids).mean(dim=1)
|
44 |
emb_b = model.transformer.wte(tok_b.input_ids).mean(dim=1)
|
45 |
score = float(cosine_similarity(emb_a.cpu().numpy(), emb_b.cpu().numpy())[0][0])
|
46 |
+
debug(f"Similarity: {score}")
|
47 |
return score
|
48 |
|
49 |
+
# Dual unfolding: I (self-view), ¬I (contradiction)
|
50 |
+
def dual_identity_unfolding(n_steps):
|
51 |
+
I_trace, not_I_trace = [], []
|
52 |
+
ΔS_I, ΔS_not_I, ΔS_cross = [], [], []
|
|
|
53 |
debug_log.clear()
|
54 |
+
log = []
|
55 |
|
56 |
+
current_I = "The system begins to think about itself:\n"
|
57 |
+
current_not_I = "The system begins to contradict its previous thoughts:\n"
|
58 |
|
59 |
for step in range(n_steps):
|
60 |
log.append(f"--- Step {step} ---")
|
|
|
61 |
|
62 |
+
I = generate_response(current_I)
|
63 |
+
not_I = generate_response(current_not_I)
|
64 |
+
|
65 |
+
I_trace.append(I)
|
66 |
+
not_I_trace.append(not_I)
|
67 |
|
68 |
+
log.append(f"[Iₙ]:\n{I}\n")
|
69 |
+
log.append(f"[¬Iₙ]:\n{not_I}\n")
|
70 |
|
71 |
if step > 0:
|
72 |
+
s_I = similarity(I_trace[step - 1], I_trace[step])
|
73 |
+
s_not_I = similarity(not_I_trace[step - 1], not_I_trace[step])
|
74 |
+
s_cross = similarity(I_trace[step], not_I_trace[step])
|
75 |
+
|
76 |
+
ΔS_I.append(round(s_I, 4))
|
77 |
+
ΔS_not_I.append(round(s_not_I, 4))
|
78 |
+
ΔS_cross.append(round(s_cross, 4))
|
79 |
+
|
80 |
+
log.append(f"ΔS(I{step - 1} → I{step}) = {s_I}")
|
81 |
+
log.append(f"ΔS(¬I{step - 1} → ¬I{step}) = {s_not_I}")
|
82 |
+
log.append(f"ΔS(I{step} ↔ ¬I{step}) = {s_cross}\n")
|
83 |
else:
|
84 |
log.append("ΔS not applicable for first step.\n")
|
85 |
|
86 |
+
current_I = f'The system previously said:\n"{I}"\nNow it reflects further:\n'
|
87 |
+
current_not_I = f'The system previously said:\n"{I}"\nNow it contradicts itself:\n'
|
|
|
|
|
88 |
|
89 |
+
result_log = "\n".join(log)
|
|
|
|
|
|
|
90 |
debug_output = "\n".join(debug_log)
|
|
|
91 |
|
92 |
+
I_out = "\n\n".join([f"I{n}: {txt}" for n, txt in enumerate(I_trace)])
|
93 |
+
not_I_out = "\n\n".join([f"¬I{n}: {txt}" for n, txt in enumerate(not_I_trace)])
|
94 |
+
|
95 |
+
ΔS_out = "\n".join([
|
96 |
+
f"Step {i}: ΔS(I) = {ΔS_I[i]}, ΔS(¬I) = {ΔS_not_I[i]}, ΔS Cross = {ΔS_cross[i]}"
|
97 |
+
for i in range(len(ΔS_I))
|
98 |
+
])
|
99 |
+
|
100 |
+
return I_out, not_I_out, ΔS_out, debug_output
|
101 |
+
|
102 |
+
# Gradio UI
|
103 |
iface = gr.Interface(
|
104 |
+
fn=dual_identity_unfolding,
|
105 |
+
inputs=gr.Slider(2, 10, value=4, step=1, label="Number of Identity Steps"),
|
106 |
outputs=[
|
107 |
+
gr.Textbox(label="Iₙ (Identity Trace)", lines=20),
|
108 |
+
gr.Textbox(label="¬Iₙ (Antithesis Trace)", lines=20),
|
109 |
+
gr.Textbox(label="ΔS Trace", lines=12),
|
110 |
gr.Textbox(label="Debug Log", lines=10),
|
111 |
],
|
112 |
+
title="GPT-2 Dual Identity Analyzer (EAL Framework)",
|
113 |
description=(
|
114 |
+
"This app evaluates whether GPT-2 can form a stable identity by recursively reflecting "
|
115 |
+
"on its own outputs (Iₙ), and simultaneously handle contradictions (¬Iₙ). "
|
116 |
+
"ΔS tracks convergence, oscillation, and semantic symmetry."
|
117 |
),
|
118 |
)
|
119 |
|