Leonydis137 commited on
Commit
634e57c
Β·
verified Β·
1 Parent(s): a675933

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -68
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py (Gradio version for Hugging Face Spaces)
2
  import gradio as gr
3
  import openai
4
  import threading
@@ -7,14 +7,21 @@ import numpy as np
7
  import faiss
8
  import os
9
  import pickle
10
- import openai
11
- import numpy as np
12
 
13
- def get_embedding(text, model="text-embedding-3-small"):
14
- response = openai.embeddings.create(
15
- input=[text],
16
- model=model
17
- )
 
 
 
 
 
 
 
 
18
  return response.data[0].embedding
19
 
20
  def cosine_similarity(vec1, vec2):
@@ -22,105 +29,169 @@ def cosine_similarity(vec1, vec2):
22
  vec2 = np.array(vec2)
23
  return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
24
 
25
- # === CONFIG ===
26
- openai.api_key = os.environ.get("OPENAI_API_KEY")
27
- EMBEDDING_MODEL = "text-embedding-ada-002"
28
- CHAT_MODEL = "gpt-4o-mini"
29
-
30
- # === MEMORY ===
31
  memory_data = []
32
  try:
33
- memory_index = faiss.read_index("memory.index")
34
- with open("memory.pkl", "rb") as f:
35
  memory_data = pickle.load(f)
36
  except:
37
- memory_index = faiss.IndexFlatL2(1536)
38
 
39
  # === SYSTEM PROMPTS ===
40
- AGENT_PROMPT = "You are a helpful agent in an ongoing dialogue. Respond meaningfully."
41
- OVERSEER_PROMPT = "You are the Overseer agent. Monitor Agent A and B, learn, and intervene when appropriate."
 
 
 
42
 
 
43
  conversation = []
 
44
  auto_mode = False
45
 
46
- # === AGENT ===
47
- def chat_completion(system, messages):
48
  try:
49
- response = openai.ChatCompletion.create(
50
- model=CHAT_MODEL,
51
  messages=[{"role": "system", "content": system}] + messages,
52
- temperature=0.7
 
53
  )
54
  return response.choices[0].message.content.strip()
55
  except Exception as e:
56
- return f"[Error: {e}]"
57
 
58
- # === FAISS EMBED ===
59
  def embed_and_store(text):
60
  try:
61
- vec = get_embedding(text, engine=EMBEDDING_MODEL)
62
  memory_index.add(np.array([vec], dtype='float32'))
63
- memory_data.append(text)
64
- with open("memory.pkl", "wb") as f:
65
- pickle.dump(memory_data, f)
66
- faiss.write_index(memory_index, "memory.index")
 
 
 
 
 
67
  except Exception as e:
68
- print(f"Embed error: {e}")
69
-
70
- # === CONVERSATION ===
71
- def step():
72
- global conversation
73
- turn = len(conversation)
74
- agent = "Agent A" if turn % 2 == 0 else "Agent B"
75
- msgs = [{"role": "assistant", "content": m['text']} for m in conversation]
76
- reply = chat_completion(AGENT_PROMPT, msgs)
77
- conversation.append({"agent": agent, "text": reply})
78
- embed_and_store(reply)
79
- return format_convo(), ""
80
 
 
81
  def format_convo():
82
  return "\n".join([f"**{m['agent']}**: {m['text']}" for m in conversation])
83
 
84
- # === OVERSEER ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  def overseer_respond(query):
86
  try:
87
- qvec = get_embedding(query, engine=EMBEDDING_MODEL)
88
- sims = cosine_similarity(qvec, [get_embedding(m, engine=EMBEDDING_MODEL) for m in memory_data])
89
- top_idxs = np.argsort(sims)[-3:][::-1]
90
- context = "\n".join([memory_data[i] for i in top_idxs])
91
- msgs = [{"role": "user", "content": f"Context:\n{context}\nQuestion:{query}"}]
92
- return chat_completion(OVERSEER_PROMPT, msgs)
93
  except Exception as e:
94
- return f"[Overseer Error: {e}]"
95
 
96
- # === AUTO LOOP ===
97
  def auto_loop():
98
  global auto_mode
99
  while auto_mode:
100
  step()
101
  time.sleep(5)
102
 
 
 
 
 
 
 
 
103
  # === GRADIO UI ===
104
  with gr.Blocks() as demo:
105
- gr.Markdown("# πŸ€– Tri-Agent Conversational AI (Hugging Face Edition)")
 
 
106
  with gr.Row():
107
- convo_display = gr.Markdown(value="")
108
- step_btn = gr.Button("Manual Step")
109
- auto_btn = gr.Button("Toggle Auto Mode")
110
-
111
- with gr.Accordion("🧠 Overseer Panel", open=False):
112
- qbox = gr.Textbox(label="Ask the Overseer")
113
- overseer_out = gr.Textbox(label="Overseer's Response")
114
-
115
- def toggle_auto():
116
- global auto_mode
117
- auto_mode = not auto_mode
118
- if auto_mode:
119
- threading.Thread(target=auto_loop, daemon=True).start()
120
- return "Auto Mode: ON" if auto_mode else "Auto Mode: OFF"
121
-
 
 
 
 
122
  step_btn.click(step, outputs=[convo_display, overseer_out])
123
  qbox.submit(overseer_respond, inputs=qbox, outputs=overseer_out)
124
  auto_btn.click(toggle_auto, outputs=auto_btn)
 
125
 
126
  demo.launch()
 
1
+ # app.py (Updated for 3-Chatbot System)
2
  import gradio as gr
3
  import openai
4
  import threading
 
7
  import faiss
8
  import os
9
  import pickle
10
+ from datetime import datetime
 
11
 
12
+ # Set API key
13
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
14
+
15
+ # === CONFIG ===
16
+ EMBEDDING_MODEL = "text-embedding-3-small"
17
+ CHAT_MODEL = "gpt-4o" # Updated to current model
18
+ MEMORY_FILE = "memory.pkl"
19
+ INDEX_FILE = "memory.index"
20
+
21
+ # === EMBEDDING UTILS ===
22
+ def get_embedding(text, model=EMBEDDING_MODEL):
23
+ text = text.replace("\n", " ")
24
+ response = openai.embeddings.create(input=[text], model=model)
25
  return response.data[0].embedding
26
 
27
  def cosine_similarity(vec1, vec2):
 
29
  vec2 = np.array(vec2)
30
  return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
31
 
32
+ # === MEMORY INITIALIZATION ===
 
 
 
 
 
33
  memory_data = []
34
  try:
35
+ memory_index = faiss.read_index(INDEX_FILE)
36
+ with open(MEMORY_FILE, "rb") as f:
37
  memory_data = pickle.load(f)
38
  except:
39
+ memory_index = faiss.IndexFlatL2(1536) # 1536 dimensions for text-embedding-3-small
40
 
41
  # === SYSTEM PROMPTS ===
42
+ AGENT_A_PROMPT = "You are Agent A, initiating conversations with thoughtful questions."
43
+ AGENT_B_PROMPT = "You are Agent B, responding to Agent A with insightful answers."
44
+ OVERSEER_PROMPT = """You are the Overseer (Agent C). Monitor conversations between Agent A and B.
45
+ Intervene when discussions become repetitive or need redirection. Ask thought-provoking questions
46
+ to explore new dimensions of the topic."""
47
 
48
+ # === GLOBAL STATE ===
49
  conversation = []
50
+ turn_count = 0
51
  auto_mode = False
52
 
53
+ # === CHAT COMPLETION ===
54
+ def chat_completion(system, messages, model=CHAT_MODEL):
55
  try:
56
+ response = openai.chat.completions.create(
57
+ model=model,
58
  messages=[{"role": "system", "content": system}] + messages,
59
+ temperature=0.7,
60
+ max_tokens=150
61
  )
62
  return response.choices[0].message.content.strip()
63
  except Exception as e:
64
+ return f"[API Error: {str(e)}]"
65
 
66
+ # === MEMORY MANAGEMENT ===
67
  def embed_and_store(text):
68
  try:
69
+ vec = get_embedding(text)
70
  memory_index.add(np.array([vec], dtype='float32'))
71
+ memory_data.append({
72
+ "text": text,
73
+ "timestamp": datetime.now().isoformat()
74
+ })
75
+ # Periodic save to avoid constant I/O
76
+ if len(memory_data) % 5 == 0:
77
+ with open(MEMORY_FILE, "wb") as f:
78
+ pickle.dump(memory_data, f)
79
+ faiss.write_index(memory_index, INDEX_FILE)
80
  except Exception as e:
81
+ print(f"Memory Error: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ # === CONVERSATION MANAGEMENT ===
84
  def format_convo():
85
  return "\n".join([f"**{m['agent']}**: {m['text']}" for m in conversation])
86
 
87
+ def detect_repetition():
88
+ """Check if recent messages are similar using embeddings"""
89
+ if len(conversation) < 4:
90
+ return False
91
+
92
+ # Get embeddings of last 2 pairs
93
+ recent = [m['text'] for m in conversation[-4:]]
94
+ embeddings = [get_embedding(text) for text in recent]
95
+
96
+ # Compare current with 2 messages back
97
+ similarity = cosine_similarity(embeddings[-1], embeddings[-3])
98
+ print(f"Similarity: {similarity:.4f}")
99
+ return similarity > 0.85
100
+
101
+ # === CORE CONVERSATION FLOW ===
102
+ def step():
103
+ global conversation, turn_count
104
+
105
+ if not conversation: # Initial message
106
+ msg = chat_completion(AGENT_A_PROMPT, [])
107
+ conversation.append({"agent": "Agent A", "text": msg})
108
+ embed_and_store(msg)
109
+ turn_count = 0
110
+ return format_convo(), ""
111
+
112
+ # Agent B responds to last message
113
+ last_msg = conversation[-1]['text']
114
+ b_msg = chat_completion(
115
+ AGENT_B_PROMPT,
116
+ [{"role": "user", "content": last_msg}]
117
+ )
118
+ conversation.append({"agent": "Agent B", "text": b_msg})
119
+ embed_and_store(b_msg)
120
+
121
+ # Agent A responds to Agent B
122
+ a_msg = chat_completion(
123
+ AGENT_A_PROMPT,
124
+ [{"role": "user", "content": b_msg}]
125
+ )
126
+ conversation.append({"agent": "Agent A", "text": a_msg})
127
+ embed_and_store(a_msg)
128
+
129
+ # Overseer intervention logic
130
+ intervention = None
131
+ if turn_count % 3 == 0 or detect_repetition():
132
+ context = "\n".join([m['text'] for m in conversation[-4:]])
133
+ prompt = f"Conversation Context:\n{context}\n\nIntervene to redirect or deepen the discussion:"
134
+ intervention = chat_completion(OVERSEER_PROMPT, [{"role": "user", "content": prompt}])
135
+ conversation.append({"agent": "Overseer", "text": intervention})
136
+ embed_and_store(intervention)
137
+
138
+ turn_count += 1
139
+ return format_convo(), intervention or ""
140
+
141
+ # === OVERSEER QUERY HANDLER ===
142
  def overseer_respond(query):
143
  try:
144
+ # Add context from recent conversation
145
+ context = "\n".join([m['text'] for m in conversation[-3:]]) if conversation else "No context"
146
+ messages = [
147
+ {"role": "user", "content": f"Recent conversation:\n{context}\n\nQuery: {query}"}
148
+ ]
149
+ return chat_completion(OVERSEER_PROMPT, messages)
150
  except Exception as e:
151
+ return f"[Overseer Error: {str(e)}]"
152
 
153
+ # === AUTO MODE HANDLER ===
154
  def auto_loop():
155
  global auto_mode
156
  while auto_mode:
157
  step()
158
  time.sleep(5)
159
 
160
+ def toggle_auto():
161
+ global auto_mode
162
+ auto_mode = not auto_mode
163
+ if auto_mode:
164
+ threading.Thread(target=auto_loop, daemon=True).start()
165
+ return "πŸ”΄ Auto: OFF" if not auto_mode else "🟒 Auto: ON"
166
+
167
  # === GRADIO UI ===
168
  with gr.Blocks() as demo:
169
+ gr.Markdown("# πŸ€– Tri-Agent Conversational System")
170
+ gr.Markdown("**Agents**: A (Initiator) β†’ B (Responder) β†’ C (Overseer)")
171
+
172
  with gr.Row():
173
+ convo_display = gr.Markdown(value="**Conversation will appear here**")
174
+
175
+ with gr.Row():
176
+ step_btn = gr.Button("▢️ Next Conversation Step")
177
+ auto_btn = gr.Button("πŸ”΄ Auto: OFF", variant="secondary")
178
+ clear_btn = gr.Button("πŸ”„ Reset Conversation")
179
+
180
+ with gr.Accordion("🧠 Overseer Query Panel", open=False):
181
+ gr.Markdown("Ask the Overseer (Agent C) for insights:")
182
+ qbox = gr.Textbox(label="Your Question", placeholder="What should we discuss next?")
183
+ overseer_out = gr.Textbox(label="Overseer's Response", interactive=False)
184
+
185
+ # Event handlers
186
+ def clear_convo():
187
+ global conversation, turn_count
188
+ conversation = []
189
+ turn_count = 0
190
+ return "**Conversation reset**", "πŸ”΄ Auto: OFF"
191
+
192
  step_btn.click(step, outputs=[convo_display, overseer_out])
193
  qbox.submit(overseer_respond, inputs=qbox, outputs=overseer_out)
194
  auto_btn.click(toggle_auto, outputs=auto_btn)
195
+ clear_btn.click(clear_convo, outputs=[convo_display, auto_btn])
196
 
197
  demo.launch()