Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,69 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import time
|
3 |
+
import requests
|
4 |
+
import os
|
5 |
+
from huggingface_hub import InferenceClient
|
6 |
|
7 |
+
# Hugging Face API Setup
|
8 |
+
API_TOKEN = os.environ.get("HUGGINGFACE_API_TOKEN")
|
9 |
+
GPT2XL_API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2-xl"
|
10 |
+
MISTRAL_MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.3"
|
11 |
+
client = InferenceClient(api_key=API_TOKEN)
|
12 |
+
|
13 |
+
# Query GPT-2 XL
|
14 |
+
def query_from_gpt2xl(text: str):
|
15 |
+
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
16 |
+
while True:
|
17 |
+
response = requests.post(GPT2XL_API_URL, headers=headers, json={"inputs": text})
|
18 |
+
response_data = response.json()
|
19 |
+
if "error" in response_data and "loading" in response_data["error"]:
|
20 |
+
wait_time = response_data.get("estimated_time", 10)
|
21 |
+
st.info(f"Model is loading. Waiting for {wait_time:.2f} seconds...")
|
22 |
+
time.sleep(wait_time)
|
23 |
+
else:
|
24 |
+
return response_data[0]["generated_text"]
|
25 |
+
|
26 |
+
# Query Mistral
|
27 |
+
def query_from_mistral(text: str):
|
28 |
+
messages = [{"role": "user", "content": text}]
|
29 |
+
completion = client.chat.completions.create(
|
30 |
+
model=MISTRAL_MODEL_NAME,
|
31 |
+
messages=messages,
|
32 |
+
max_tokens=500,
|
33 |
+
)
|
34 |
+
return completion.choices[0].message["content"]
|
35 |
+
|
36 |
+
def main():
|
37 |
+
st.set_page_config(page_title="Multi-Model Chat", layout="centered")
|
38 |
+
st.title("🤖 Multi-Model Chat")
|
39 |
+
st.markdown("Chat with either **GPT-2 XL** or **Mistral-7B-Instruct** via Hugging Face API.")
|
40 |
+
|
41 |
+
if "messages" not in st.session_state:
|
42 |
+
st.session_state.messages = []
|
43 |
+
|
44 |
+
model_choice = st.selectbox("Select a model:", ["GPT-2 XL", "Mistral-7B-Instruct"])
|
45 |
+
|
46 |
+
with st.form(key="chat_form", clear_on_submit=True):
|
47 |
+
user_input = st.text_input("You:", "")
|
48 |
+
submit = st.form_submit_button("Send")
|
49 |
+
|
50 |
+
if submit and user_input:
|
51 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
52 |
+
with st.spinner("Fetching response..."):
|
53 |
+
try:
|
54 |
+
if model_choice == "GPT-2 XL":
|
55 |
+
response = query_from_gpt2xl(user_input)
|
56 |
+
elif model_choice == "Mistral-7B-Instruct":
|
57 |
+
response = query_from_mistral(user_input)
|
58 |
+
st.session_state.messages.append({"role": "bot", "content": response})
|
59 |
+
except Exception as e:
|
60 |
+
st.error(f"Error: {e}")
|
61 |
+
|
62 |
+
for message in st.session_state.messages:
|
63 |
+
if message["role"] == "user":
|
64 |
+
st.markdown(f"**You:** {message['content']}")
|
65 |
+
elif message["role"] == "bot":
|
66 |
+
st.markdown(f"**Bot:** {message['content']}")
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
main()
|