Spaces:
Running
Running
Improve ENV VARIABLE MANAGMENT
Browse files- app.py +19 -14
- bedrock_client.py +6 -15
app.py
CHANGED
@@ -1,24 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
-
from utils import load_users
|
3 |
from bedrock_client import bedrock_llm
|
4 |
from langchain.schema import SystemMessage, HumanMessage, AIMessage
|
5 |
import os
|
|
|
6 |
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
AUTHS = [(os.environ.get('USER'), os.environ.get('PW'))]
|
9 |
|
|
|
|
|
|
|
10 |
def chat(message, history):
|
11 |
-
# 3a) Build Bedrock input (with system prompt + raw dict‐history)
|
12 |
-
system_prompt = (
|
13 |
-
"Du bist DevalBot, ein konversationeller Assistent des Deutschen Evaluierungsinstituts "
|
14 |
-
"für Entwicklungsbewertung (DEval). DEval bietet staatlichen und zivilgesellschaftlichen "
|
15 |
-
"Organisationen in der Entwicklungszusammenarbeit unabhängige und wissenschaftlich fundierte "
|
16 |
-
"Evaluierungen. Deine Hauptsprache ist Deutsch; antworte daher standardmäßig auf Deutsch. "
|
17 |
-
"Du kannst zudem bei statistischen Analysen und Programmierung in Stata und R unterstützen."
|
18 |
-
)
|
19 |
|
20 |
# 1) start with the system prompt
|
21 |
-
history_langchain_format: list = [SystemMessage(content=
|
22 |
|
23 |
# 2) replay the user/assistant turns
|
24 |
for msg in history:
|
@@ -31,7 +35,9 @@ def chat(message, history):
|
|
31 |
history_langchain_format.append(HumanMessage(content=message))
|
32 |
|
33 |
stream =bedrock_llm.stream(history_langchain_format)
|
|
|
34 |
full = next(stream)
|
|
|
35 |
for chunk in stream:
|
36 |
full +=chunk
|
37 |
yield full.content
|
@@ -62,8 +68,6 @@ with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_
|
|
62 |
"sowie sensible erhobene Daten (wie etwa Interviewtranskripte).", elem_id="header-text"
|
63 |
)
|
64 |
|
65 |
-
#Hinweis: Bitte gebe keine vertraulichen Informationen ein. Dazu zählen u.a. sensible personenbezogene Daten, institutsinterne Informationen oder Dokumente, unveröffentlichte Berichtsinhalte, vertrauliche Informationen oder Dokumente externer Organisationen sowie sensible erhobene Daten (wie etwa Interviewtranskripte).
|
66 |
-
#logout_btn = gr.Button("Logout", elem_id="logout-btn")
|
67 |
# inject auto-reload script
|
68 |
gr.HTML(
|
69 |
"""
|
@@ -78,11 +82,12 @@ with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_
|
|
78 |
gr.ChatInterface(
|
79 |
chat,
|
80 |
type="messages",
|
|
|
81 |
editable=True,
|
82 |
-
concurrency_limit=
|
83 |
save_history=True,
|
84 |
)
|
85 |
|
86 |
|
87 |
|
88 |
-
demo.queue().launch(
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from bedrock_client import bedrock_llm
|
3 |
from langchain.schema import SystemMessage, HumanMessage, AIMessage
|
4 |
import os
|
5 |
+
from distutils.util import strtobool
|
6 |
|
7 |
|
8 |
+
MULTIMODAL = False
|
9 |
+
|
10 |
+
# 1) convert common truthy/falsy strings to bool
|
11 |
+
try:
|
12 |
+
MULTIMODAL = bool(strtobool(MULTIMODAL))
|
13 |
+
except ValueError:
|
14 |
+
# catch unrecognized values
|
15 |
+
raise ValueError(f"Invalid MULTIMODAL value: {MULTIMODAL!r}. Use true/false, 1/0, yes/no.")
|
16 |
+
|
17 |
AUTHS = [(os.environ.get('USER'), os.environ.get('PW'))]
|
18 |
|
19 |
+
SYSTEM_PROMPT = os.environ.get('SYSTEM_PROMPT', '')
|
20 |
+
|
21 |
+
|
22 |
def chat(message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# 1) start with the system prompt
|
25 |
+
history_langchain_format: list = [SystemMessage(content=SYSTEM_PROMPT)]
|
26 |
|
27 |
# 2) replay the user/assistant turns
|
28 |
for msg in history:
|
|
|
35 |
history_langchain_format.append(HumanMessage(content=message))
|
36 |
|
37 |
stream =bedrock_llm.stream(history_langchain_format)
|
38 |
+
|
39 |
full = next(stream)
|
40 |
+
|
41 |
for chunk in stream:
|
42 |
full +=chunk
|
43 |
yield full.content
|
|
|
68 |
"sowie sensible erhobene Daten (wie etwa Interviewtranskripte).", elem_id="header-text"
|
69 |
)
|
70 |
|
|
|
|
|
71 |
# inject auto-reload script
|
72 |
gr.HTML(
|
73 |
"""
|
|
|
82 |
gr.ChatInterface(
|
83 |
chat,
|
84 |
type="messages",
|
85 |
+
multimodal=MULTIMODAL,
|
86 |
editable=True,
|
87 |
+
concurrency_limit=20,
|
88 |
save_history=True,
|
89 |
)
|
90 |
|
91 |
|
92 |
|
93 |
+
demo.queue().launch( ssr_mode=False)
|
bedrock_client.py
CHANGED
@@ -4,24 +4,15 @@ from langchain_aws.chat_models import ChatBedrockConverse
|
|
4 |
from langchain_aws.llms.bedrock import BedrockLLM
|
5 |
|
6 |
|
7 |
-
# bedrock_llm1 = BedrockLLM(
|
8 |
-
# aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"],
|
9 |
-
# aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"],
|
10 |
-
# region_name = "eu-west-1",
|
11 |
-
# provider = "mistral",
|
12 |
-
# model_id = "mistral.mistral-large-2402-v1:0",
|
13 |
-
# streaming = True,
|
14 |
-
# model_kwargs = {"temperature": 0.7},
|
15 |
-
# )
|
16 |
-
|
17 |
# Initialize the streaming Bedrock chat model
|
18 |
bedrock_llm = ChatBedrockConverse(
|
19 |
-
aws_access_key_id =os.environ.get("AWS_ACCESS_KEY_ID"),
|
20 |
-
aws_secret_access_key =os.environ.get("AWS_SECRET_ACCESS_KEY"),
|
21 |
-
region_name =os.environ.get("AWS_DEFAULT_REGION", "eu-west-1"),
|
22 |
provider = os.environ.get("PROVIDER", "mistral"),
|
23 |
-
model_id =os.environ.get("MODEL_ID", "mistral.mistral-large-2402-v1:0"), # or your preferred Bedrock model
|
24 |
-
temperature= 0.7)
|
|
|
25 |
|
26 |
|
27 |
|
|
|
4 |
from langchain_aws.llms.bedrock import BedrockLLM
|
5 |
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Initialize the streaming Bedrock chat model
|
8 |
bedrock_llm = ChatBedrockConverse(
|
9 |
+
aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID"),
|
10 |
+
aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY"),
|
11 |
+
region_name = os.environ.get("AWS_DEFAULT_REGION", "eu-west-1"),
|
12 |
provider = os.environ.get("PROVIDER", "mistral"),
|
13 |
+
model_id = os.environ.get("MODEL_ID", "mistral.mistral-large-2402-v1:0"), # or your preferred Bedrock model
|
14 |
+
temperature = eval(os.environ.get("TEMPERATURE", "0.7"))
|
15 |
+
)
|
16 |
|
17 |
|
18 |
|