ajaynagotha commited on
Commit
23bfc7b
Β·
verified Β·
1 Parent(s): 7cc1093

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -67
app.py CHANGED
@@ -1,86 +1,108 @@
1
  import streamlit as st
2
  import requests
3
  import logging
 
4
 
5
  # Configure logging
6
  logging.basicConfig(level=logging.INFO)
7
  logger = logging.getLogger(__name__)
8
 
9
- # Page configuration
10
  st.set_page_config(
11
  page_title="DeepSeek Chatbot - NextGenWebAI",
12
  page_icon="πŸ€–",
13
- layout="centered"
14
  )
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Initialize session state for chat history
17
  if "messages" not in st.session_state:
18
  st.session_state.messages = []
19
 
20
- # Sidebar configuration
21
  with st.sidebar:
22
- st.header("Model Configuration")
23
- st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
 
24
 
25
- # Dropdown to select model
26
  model_options = [
27
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
28
  ]
29
- selected_model = st.selectbox("Select Model", model_options, index=0)
30
 
 
31
  system_message = st.text_area(
32
- "System Message",
33
- value="You are a friendly chatbot created by https://cursor-ai-demo.vercel.app. Provide clear, accurate, and brief answers. Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives.",
34
- height=100
35
- )
36
-
37
- max_tokens = st.slider(
38
- "Max Tokens",
39
- 10, 4000, 100
40
  )
41
 
42
- temperature = st.slider(
43
- "Temperature",
44
- 0.1, 4.0, 0.3
45
- )
46
-
47
- top_p = st.slider(
48
- "Top-p",
49
- 0.1, 1.0, 0.6
50
- )
51
 
52
  # Function to query the Hugging Face API
53
  def query(payload, api_url):
54
  headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
55
- logger.info(f"Sending request to {api_url} with payload: {payload}")
56
- response = requests.post(api_url, headers=headers, json=payload)
57
- logger.info(f"Received response: {response.status_code}, {response.text}")
58
  try:
 
 
59
  return response.json()
60
- except requests.exceptions.JSONDecodeError:
61
- logger.error(f"Failed to decode JSON response: {response.text}")
62
  return None
63
 
64
- # Chat interface
65
  st.title("πŸ€– DeepSeek Chatbot")
66
- st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
67
 
68
- # Display chat history
69
  for message in st.session_state.messages:
70
  with st.chat_message(message["role"]):
71
- st.markdown(message["content"])
 
72
 
73
- # Handle input
74
- if prompt := st.chat_input("Type your message..."):
75
  st.session_state.messages.append({"role": "user", "content": prompt})
76
 
77
  with st.chat_message("user"):
78
- st.markdown(prompt)
79
 
80
  try:
81
- with st.spinner("Generating response..."):
82
- # Prepare the payload for the API
83
- # Combine system message and user input into a single prompt
84
  full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
85
  payload = {
86
  "inputs": full_prompt,
@@ -92,37 +114,16 @@ if prompt := st.chat_input("Type your message..."):
92
  }
93
  }
94
 
95
- # Dynamically construct the API URL based on the selected model
96
  api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
97
- logger.info(f"Selected model: {selected_model}, API URL: {api_url}")
98
-
99
- # Query the Hugging Face API using the selected model
100
  output = query(payload, api_url)
101
 
102
- # Handle API response
103
- if output is not None and isinstance(output, list) and len(output) > 0:
104
- if 'generated_text' in output[0]:
105
- # Extract the assistant's response
106
- assistant_response = output[0]['generated_text'].strip()
107
-
108
- # Check for and remove duplicate responses
109
- responses = assistant_response.split("\n</think>\n")
110
- unique_response = responses[0].strip()
111
-
112
- logger.info(f"Generated response: {unique_response}")
113
-
114
- # Append response to chat only once
115
- with st.chat_message("assistant"):
116
- st.markdown(unique_response)
117
-
118
- st.session_state.messages.append({"role": "assistant", "content": unique_response})
119
- else:
120
- logger.error(f"Unexpected API response structure: {output}")
121
- st.error("Error: Unexpected response from the model. Please try again.")
122
  else:
123
- logger.error(f"Empty or invalid API response: {output}")
124
- st.error("Error: Unable to generate a response. Please check the model and try again.")
125
-
126
  except Exception as e:
127
  logger.error(f"Application Error: {str(e)}", exc_info=True)
128
- st.error(f"Application Error: {str(e)}")
 
1
  import streamlit as st
2
  import requests
3
  import logging
4
+ import time
5
 
6
  # Configure logging
7
  logging.basicConfig(level=logging.INFO)
8
  logger = logging.getLogger(__name__)
9
 
10
+ # Set page configuration
11
  st.set_page_config(
12
  page_title="DeepSeek Chatbot - NextGenWebAI",
13
  page_icon="πŸ€–",
14
+ layout="wide"
15
  )
16
 
17
+ # Custom CSS for UI Enhancements
18
+ st.markdown("""
19
+ <style>
20
+ body {
21
+ font-family: 'Arial', sans-serif;
22
+ }
23
+ .stChatMessage {
24
+ border-radius: 10px;
25
+ padding: 10px;
26
+ margin: 5px 0;
27
+ }
28
+ .user {
29
+ background-color: #007BFF;
30
+ color: white;
31
+ text-align: right;
32
+ border-radius: 12px 12px 0px 12px;
33
+ }
34
+ .assistant {
35
+ background-color: #F1F1F1;
36
+ color: black;
37
+ text-align: left;
38
+ border-radius: 12px 12px 12px 0px;
39
+ }
40
+ .sidebar .sidebar-content {
41
+ background-color: #222;
42
+ color: white;
43
+ }
44
+ </style>
45
+ """, unsafe_allow_html=True)
46
+
47
  # Initialize session state for chat history
48
  if "messages" not in st.session_state:
49
  st.session_state.messages = []
50
 
51
+ # Sidebar Configuration
52
  with st.sidebar:
53
+ st.image("https://huggingface.co/front/thumbnails/hf-logo.png", width=150)
54
+ st.header("βš™οΈ Model Configuration")
55
+ st.markdown("[πŸ”‘ Get HuggingFace Token](https://huggingface.co/settings/tokens)")
56
 
57
+ # Model selection
58
  model_options = [
59
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
60
  ]
61
+ selected_model = st.selectbox("πŸ” Select AI Model", model_options, index=0)
62
 
63
+ # System prompt
64
  system_message = st.text_area(
65
+ "πŸ“œ System Instructions",
66
+ value="You are a friendly chatbot. Provide clear and engaging responses.",
67
+ height=80
 
 
 
 
 
68
  )
69
 
70
+ # Chat settings
71
+ max_tokens = st.slider("πŸ”’ Max Tokens", 10, 4000, 300)
72
+ temperature = st.slider("πŸ”₯ Temperature", 0.1, 2.0, 0.7)
73
+ top_p = st.slider("🎯 Top-p", 0.1, 1.0, 0.9)
 
 
 
 
 
74
 
75
  # Function to query the Hugging Face API
76
  def query(payload, api_url):
77
  headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
 
 
 
78
  try:
79
+ response = requests.post(api_url, headers=headers, json=payload)
80
+ response.raise_for_status() # Raise HTTP errors if any
81
  return response.json()
82
+ except requests.exceptions.RequestException as e:
83
+ logger.error(f"Request Error: {e}")
84
  return None
85
 
86
+ # Main Chat Interface
87
  st.title("πŸ€– DeepSeek Chatbot")
88
+ st.caption("πŸš€ AI-powered chatbot using Hugging Face API")
89
 
90
+ # Display chat history with enhanced UI
91
  for message in st.session_state.messages:
92
  with st.chat_message(message["role"]):
93
+ class_name = "user" if message["role"] == "user" else "assistant"
94
+ st.markdown(f"<div class='{class_name}'>{message['content']}</div>", unsafe_allow_html=True)
95
 
96
+ # Handle user input
97
+ if prompt := st.chat_input("πŸ’¬ Type your message..."):
98
  st.session_state.messages.append({"role": "user", "content": prompt})
99
 
100
  with st.chat_message("user"):
101
+ st.markdown(f"<div class='user'>{prompt}</div>", unsafe_allow_html=True)
102
 
103
  try:
104
+ with st.spinner("πŸ€– Thinking..."):
105
+ time.sleep(1) # Simulate processing time
 
106
  full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
107
  payload = {
108
  "inputs": full_prompt,
 
114
  }
115
  }
116
 
 
117
  api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
 
 
 
118
  output = query(payload, api_url)
119
 
120
+ if output and isinstance(output, list) and 'generated_text' in output[0]:
121
+ assistant_response = output[0]['generated_text'].strip()
122
+ with st.chat_message("assistant"):
123
+ st.markdown(f"<div class='assistant'>{assistant_response}</div>", unsafe_allow_html=True)
124
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  else:
126
+ st.error("⚠️ Unable to generate a response. Please try again.")
 
 
127
  except Exception as e:
128
  logger.error(f"Application Error: {str(e)}", exc_info=True)
129
+ st.error(f"⚠️ Error: {str(e)}")