ruslanmv's picture
Update app.py
b1c2780 verified
raw
history blame
3.57 kB
# app.py
import os
import requests
import streamlit as st
from models import get_hf_api
# Configure API
API_URL = get_hf_api()
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
# Page configuration
st.set_page_config(
page_title="DeepSeek Chatbot - ruslanmv.com",
page_icon="πŸ€–",
layout="centered"
)
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar configuration
with st.sidebar:
st.header("Model Configuration")
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
system_message = st.text_area(
"System Message",
value="You are a friendly Chatbot created by ruslanmv.com",
height=100
)
max_tokens = st.slider(
"Max Tokens",
1, 4000, 512
)
temperature = st.slider(
"Temperature",
0.1, 4.0, 0.7
)
top_p = st.slider(
"Top-p",
0.1, 1.0, 0.9
)
# Chat interface
st.title("πŸ€– DeepSeek Chatbot")
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handle input
if prompt := st.chat_input("Type your message..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
try:
with st.spinner("Generating response..."):
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
# Make API request
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": full_prompt,
"parameters": {
"max_new_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"return_full_text": False
}
}
)
# Handle API errors
if response.status_code != 200:
error_msg = response.json().get('error', 'Unknown API error')
st.error(f"API Error: {error_msg}")
if "loading" in error_msg.lower():
st.info("Please wait a moment and try again. The model might be loading.")
return
# Process successful response
result = response.json()
if isinstance(result, list):
# Handle normal response format
assistant_response = result[0].get('generated_text', 'No response generated')
# Clean up response
if "Assistant:" in assistant_response:
assistant_response = assistant_response.split("Assistant:")[-1].strip()
elif isinstance(result, dict) and 'error' in result:
# Handle error format
st.error(f"API Error: {result['error']}")
return
else:
st.error("Unexpected response format from API")
return
with st.chat_message("assistant"):
st.markdown(assistant_response)
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
except Exception as e:
st.error(f"Application Error: {str(e)}")