Spaces:
Running
Running
import gradio as gr | |
from bedrock_client import claude_llm, get_anthropic_client, claude_stream_response | |
from utils import load_users | |
AUTHS = load_users('user.csv') | |
HISTORY_LIMIT = 30 # max number of turns (user+assistant) to keep | |
# 1) Your system prompt | |
SYSTEM_PROMPT = ( | |
"Du bist DevalBot, ein konversationeller Assistent des Deutschen Evaluierungsinstituts fΓΌr Entwicklungsbewertung (DEval). DEval bietet staatlichen und zivilgesellschaftlichen Organisationen in der Entwicklungszusammenarbeit unabhΓ€ngige und wissenschaftlich fundierte Evaluierungen. Deine Hauptsprache ist Deutsch; antworte daher standardmΓ€Γig auf Deutsch. Du kannst zudem bei statistischen Analysen und Programmierung in Stata und R unterstΓΌtzen. Antworte sachlich, prΓ€zise und stelle bei Unklarheiten klΓ€rende RΓΌckfragen." | |
) | |
def chat(user_message, history): | |
# ββ 1) Guard against empty input βββββββββββββββββββββ | |
if not user_message or not user_message.strip(): | |
return | |
# ββ 2) Build the LLMβs messages list βββββββββββββββββ | |
# Always start with the SYSTEM_PROMPT, then the UI history, | |
# then the new user turn: | |
llm_messages = [{"role":"system","content":SYSTEM_PROMPT}] | |
llm_messages += history | |
llm_messages.append({"role":"user","content":user_message}) | |
# ββ 3) Kick off the streaming call βββββββββββββββββββ | |
client = get_anthropic_client() | |
streamer = lambda msgs: claude_stream_response(msgs, client) | |
# ββ 4) Immediately show the userβs turn in the UI β | |
ui_history = history + [{"role":"user","content":user_message}] | |
full_resp = "" | |
try: | |
for chunk in streamer(llm_messages): | |
full_resp += chunk | |
# yield the UI history plus the growing assistant bubble | |
yield ui_history + [{"role":"assistant","content": full_resp}] | |
except Exception as e: | |
# surface any error inline | |
err = f"β οΈ Oops, something went wrong: {e}" | |
yield ui_history + [{"role":"assistant","content": err}] | |
return | |
# ββ 5) Finalize the assistant turn in the UI βββββββββ | |
ui_history.append({"role":"assistant","content": full_resp}) | |
# ββ 6) Trim to the last N turns ββββββββββββββββββββββ | |
if len(ui_history) > HISTORY_LIMIT: | |
ui_history = ui_history[-HISTORY_LIMIT:] | |
yield ui_history | |
with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo: | |
# ββ Logo + Header + Logout ββββββββββββββββββββββββββββββββ | |
gr.Image( | |
value="static/logo.png", | |
show_label=False, | |
interactive=False, | |
show_download_button=False, | |
show_fullscreen_button=False, | |
elem_id="logo-primary", # matches the CSS above | |
) | |
#logout_btn = gr.Button("Logout", elem_id="logout-btn") | |
# inject auto-reload script | |
gr.HTML( | |
""" | |
<script> | |
// Reload the page after 1 minutes (300β000 ms) | |
setTimeout(() => { | |
window.location.reload(); | |
}, 1000); | |
</script> | |
""" | |
) | |
gr.ChatInterface( | |
chat, | |
type="messages", | |
editable=True, | |
concurrency_limit=200, | |
save_history=True, | |
) | |
demo.queue().launch(auth=AUTHS, ssr_mode=False) |