import gradio as gr from bedrock_client import bedrock_llm from langchain.schema import SystemMessage, HumanMessage, AIMessage import os from distutils.util import strtobool MULTIMODAL = os.environ.get("MULTIMODAL", "false") # 1) convert common truthy/falsy strings to bool try: MULTIMODAL = bool(strtobool(MULTIMODAL)) except ValueError: # catch unrecognized values print(f"Invalid MULTIMODAL value Set to Default Value=False: Use true/false, 1/0, yes/no.") AUTHS = [(os.environ.get('USER'), os.environ.get('PW'))] SYSTEM_PROMPT = os.environ.get('SYSTEM_PROMPT', '') def chat(message, history): # 1) start with the system prompt history_langchain_format: list = [SystemMessage(content=SYSTEM_PROMPT)] # 2) replay the user/assistant turns for msg in history: if msg["role"] == "user": history_langchain_format.append(HumanMessage(content=msg["content"])) elif msg["role"] == "assistant": history_langchain_format.append(AIMessage(content=msg["content"])) # 3) append the new user message history_langchain_format.append(HumanMessage(content=message)) stream =bedrock_llm.stream(history_langchain_format) full = next(stream) for chunk in stream: full +=chunk yield full.content with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo: # ── Logo + Header + Logout ──────────────────────────────── with gr.Row(): with gr.Column(scale=1): gr.Image( value="static/logo.png", height=50, show_label=False, interactive=False, show_download_button=False, show_fullscreen_button=False, elem_id="logo-primary", # matches the CSS above ) with gr.Column(scale=10): gr.Markdown( "# DEvalBot\n\n" "**Hinweis:** Bitte gebe keine vertraulichen Informationen ein. " "Dazu zählen u.a. sensible personenbezogene Daten, institutsinterne " "Informationen oder Dokumente, unveröffentlichte Berichtsinhalte, " "vertrauliche Informationen oder Dokumente externer Organisationen " "sowie sensible erhobene Daten (wie etwa Interviewtranskripte).", elem_id="header-text" ) # inject auto-reload script gr.HTML( """ """ ) gr.ChatInterface( chat, type="messages", multimodal=MULTIMODAL, editable=True, concurrency_limit=20, save_history=True, ) demo.queue().launch( ssr_mode=False)