gtani commited on
Commit
adec707
·
1 Parent(s): e4f94d7

Refactor chat function to streamline message handling and improve clarity

Browse files
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -15,31 +15,22 @@ SYSTEM_PROMPT = (
15
  )
16
 
17
  def chat(user_message, history):
18
- # 1) ignore empty
19
- if not user_message or not user_message.strip():
20
  return
21
 
22
- # 2) build the UI history (what Gradio shows)
23
  ui_history = history + [{"role":"user","content":user_message}]
24
 
25
- # 3) build the actual text prompt we’ll send to Claude
26
- prompt_lines = [SYSTEM_PROMPT]
27
- for msg in history:
28
- # capitalize role for clarity
29
- role = msg["role"].capitalize()
30
- prompt_lines.append(f"{role}: {msg['content']}")
31
- prompt_lines.append(f"User: {user_message}")
32
- prompt_lines.append("Assistant:") # Claude will continue from here
33
- full_prompt = "\n".join(prompt_lines)
34
 
35
- # 4) stream from the LLM
36
- full_resp = ""
37
- for token in claude_llm.stream(full_prompt):
38
- full_resp += token
39
- yield ui_history + [{"role":"assistant","content": full_resp}]
40
 
41
- # 5) final append
42
- ui_history.append({"role":"assistant","content": full_resp})
43
  yield ui_history
44
 
45
 
 
15
  )
16
 
17
  def chat(user_message, history):
18
+ if not user_message.strip():
 
19
  return
20
 
 
21
  ui_history = history + [{"role":"user","content":user_message}]
22
 
23
+ # build a proper messages array instead of a raw prompt string
24
+ llm_messages = [{"role":"system","content":SYSTEM_PROMPT}] \
25
+ + history \
26
+ + [{"role":"user","content":user_message}]
 
 
 
 
 
27
 
28
+ full = ""
29
+ for token in claude_llm.stream(llm_messages):
30
+ full += token
31
+ yield [{"role":"assistant","content":full}]
 
32
 
33
+ ui_history.append({"role":"assistant","content":full})
 
34
  yield ui_history
35
 
36