Spaces:
Running
Running
File size: 4,234 Bytes
014199f a505ed2 014199f dc84669 014199f dc84669 014199f f904c62 014199f 0ffc7ed 014199f 0ffc7ed 014199f 0ffc7ed 014199f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
# import gradio as gr
# from bedrock_client import bedrock_llm
# from langchain.schema import SystemMessage, HumanMessage, AIMessage
# import os
# from distutils.util import strtobool
# MULTIMODAL = os.environ.get("MULTIMODAL", "false")
# # 1) convert common truthy/falsy strings to bool
# try:
# MULTIMODAL = bool(strtobool(MULTIMODAL))
# except ValueError:
# # catch unrecognized values
# print(f"Invalid MULTIMODAL value: Use true/false, 1/0, yes/no.")
# AUTHS = [(os.environ.get('USER'), os.environ.get('PW'))]
# SYSTEM_PROMPT = os.environ.get('SYSTEM_PROMPT', '')
# def chat(message, history):
# # 1) start with the system prompt
# history_langchain_format: list = [SystemMessage(content=SYSTEM_PROMPT)]
# # 2) replay the user/assistant turns
# for msg in history:
# if msg["role"] == "user":
# history_langchain_format.append(HumanMessage(content=msg["content"]))
# elif msg["role"] == "assistant":
# history_langchain_format.append(AIMessage(content=msg["content"]))
# # 3) append the new user message
# history_langchain_format.append(HumanMessage(content=message))
# stream =bedrock_llm.stream(history_langchain_format)
# full = next(stream)
# for chunk in stream:
# full +=chunk
# yield full.content
# with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo:
# # ββ Logo + Header + Logout ββββββββββββββββββββββββββββββββ
# with gr.Row():
# with gr.Column(scale=1):
# gr.Image(
# value="static/logo.png",
# height=50,
# show_label=False,
# interactive=False,
# show_download_button=False,
# show_fullscreen_button=False,
# elem_id="logo-primary", # matches the CSS above
# )
# with gr.Column(scale=10):
# gr.Markdown(
# "# DEvalBot\n\n"
# "**Hinweis:** Bitte gebe keine vertraulichen Informationen ein. "
# "Dazu zΓ€hlen u.a. sensible personenbezogene Daten, institutsinterne "
# "Informationen oder Dokumente, unverΓΆffentlichte Berichtsinhalte, "
# "vertrauliche Informationen oder Dokumente externer Organisationen "
# "sowie sensible erhobene Daten (wie etwa Interviewtranskripte).", elem_id="header-text"
# )
# # inject auto-reload script
# gr.HTML(
# """
# <script>
# // Reload the page after 1 minutes (300β000 ms)
# setTimeout(() => {
# window.location.reload();
# }, 1000);
# </script>
# """
# )
# gr.ChatInterface(
# chat,
# type="messages",
# multimodal=MULTIMODAL,
# editable=True,
# concurrency_limit=20,
# save_history=True,
# )
# demo.queue().launch(auth=AUTHS, share=True, ssr_mode=False)
import gradio as gr
# Replace this with your target URL
NEW_PAGE_URL = "https://huggingface.co/spaces/evaluatorhub42/DEvalbot"
with gr.Blocks(css_paths=["static/deval.css"], theme=gr.themes.Default(primary_hue="blue", secondary_hue="yellow")) as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Image(
value="static/logo.png",
height=50,
show_label=False,
interactive=False,
show_download_button=False,
show_fullscreen_button=False,
elem_id="logo-primary",
)
with gr.Column(scale=10):
pass # Empty space or title if needed
with gr.Row():
with gr.Column(scale=1):
pass
with gr.Column(scale=10):
gr.Markdown(
f"""
## β οΈ DEvalBot has moved!
Please visit the new page:
π [**{NEW_PAGE_URL}**]({NEW_PAGE_URL})
""",
elem_id="redirect-message"
)
demo.launch() |