gtani commited on
Commit
a3dead0
·
1 Parent(s): 85bd2b6

Refactor chat function to improve message handling and LLM interaction

Browse files
Files changed (1) hide show
  1. app.py +35 -15
app.py CHANGED
@@ -5,22 +5,42 @@ from utils import load_users
5
  AUTHS = load_users('user.csv')
6
 
7
 
 
 
 
 
 
 
 
 
 
8
  def chat(user_message, history):
9
- # append the user message
10
- messages = history + [{"role":"user","content":user_message}]
11
-
12
- # pick the right LLM
13
- llm = claude_llm
14
- full = ""
15
- # stream tokens as they arrive
16
- for token in llm.stream(user_message):
17
- full += token
18
- # yield an updated history snapshot
19
- yield messages + [{"role":"assistant","content": full}]
20
-
21
- # final history
22
- messages.append({"role":"assistant","content":full})
23
- yield messages
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
  with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo:
 
5
  AUTHS = load_users('user.csv')
6
 
7
 
8
+ # somewhere near the top of app.py:
9
+ SYSTEM_PROMPT = (
10
+ "Du bist DevalBot, ein konversationeller Assistent des Deutschen Evaluierungsinstituts "
11
+ "für Entwicklungsbewertung (DEval). DEval bietet staatlichen und zivilgesellschaftlichen "
12
+ "Organisationen in der Entwicklungszusammenarbeit unabhängige und wissenschaftlich fundierte "
13
+ "Evaluierungen. Deine Hauptsprache ist Deutsch; antworte daher standardmäßig auf Deutsch. "
14
+ "Du kannst zudem bei statistischen Analysen und Programmierung in Stata und R unterstützen."
15
+ )
16
+
17
  def chat(user_message, history):
18
+ # 1) ignore empty
19
+ if not user_message or not user_message.strip():
20
+ return
21
+
22
+ # 2) build the UI history (what Gradio shows)
23
+ ui_history = history + [{"role":"user","content":user_message}]
24
+
25
+ # 3) build the actual text prompt we’ll send to Claude
26
+ prompt_lines = [SYSTEM_PROMPT]
27
+ for msg in history:
28
+ # capitalize role for clarity
29
+ role = msg["role"].capitalize()
30
+ prompt_lines.append(f"{role}: {msg['content']}")
31
+ prompt_lines.append(f"User: {user_message}")
32
+ prompt_lines.append("Assistant:") # Claude will continue from here
33
+ full_prompt = "\n".join(prompt_lines)
34
+
35
+ # 4) stream from the LLM
36
+ full_resp = ""
37
+ for token in claude_llm.stream(full_prompt):
38
+ full_resp += token
39
+ yield ui_history + [{"role":"assistant","content": full_resp}]
40
+
41
+ # 5) final append
42
+ ui_history.append({"role":"assistant","content": full_resp})
43
+ yield ui_history
44
 
45
 
46
  with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo: