Spaces:
Running
Running
import streamlit as st | |
import speech_recognition as sr | |
from openai import OpenAI | |
# Initialize session state for chat visibility | |
if "chat_started" not in st.session_state: | |
st.session_state.chat_started = False | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [] | |
if "feedback" not in st.session_state: | |
st.session_state.feedback = {} | |
st.set_page_config(page_title="π¬Chatbot", page_icon="π¬", layout="wide") | |
# π― Welcome Screen (Only Show if Chat Not Started) | |
if not st.session_state.chat_started: | |
st.title("π€ Welcome to Deepseek AI Chatbot!") | |
st.write("A smart chatbot powered by the ne model Deepseek R1, designed to assist you with text generation.") | |
st.subheader("β¨ Features") | |
st.markdown(""" | |
- π **Generate content**: Stories, articles, code, poems, dialogues, and more! | |
- ποΈ **Voice Input**: Speak instead of typing your prompt. | |
- π **Customizable Tone & Format**: Choose between formal, informal, humorous, technical styles. | |
- βοΈ **Adjustable Creativity**: Control randomness with temperature settings. | |
- π **Chat History**: Review past conversations and feedback. | |
""") | |
if st.button("π Start Chat"): | |
st.session_state.chat_started = True | |
st.rerun() | |
st.stop() | |
st.title("π€ Deepseek AI Chatbot!") | |
# π€ Function to capture voice input | |
def get_voice_input(): | |
recognizer = sr.Recognizer() | |
with sr.Microphone() as source: | |
st.info("π€ Listening... Speak now!") | |
try: | |
audio = recognizer.listen(source, timeout=5) | |
text = recognizer.recognize_google(audio) | |
return text | |
except sr.UnknownValueError: | |
st.error("π Could not understand the audio.") | |
except sr.RequestError: | |
st.error("π Speech Recognition service unavailable.") | |
return "" | |
# π€ Voice Input Button | |
if st.button("π€ Speak Prompt"): | |
voice_text = get_voice_input() | |
if voice_text: | |
st.session_state["user_prompt"] = voice_text | |
else: | |
st.warning("No voice input detected.") | |
# π User Input Text Area | |
user_prompt = st.text_area("Enter your prompt:", st.session_state.get("user_prompt", "")) | |
# βοΈ Sidebar Settings | |
st.sidebar.header("βοΈ Settings") | |
output_format = st.sidebar.selectbox("Select Output Format", ["Story", "Poem", "Article", "Code", "Dialogue"]) | |
tone = st.sidebar.selectbox("Select Tone/Style", ["Formal", "Informal", "Humorous", "Technical"]) | |
temperature = st.sidebar.slider("Creativity Level (Temperature)", 0.0, 1.0, 0.7) | |
max_tokens = st.sidebar.slider("Response Length (Max Tokens)", 100, 1024, 500) | |
creative_mode = st.sidebar.checkbox("Enable Creative Mode", value=True) | |
# π Chat History in Sidebar | |
st.sidebar.header("π Chat History") | |
if st.sidebar.button("ποΈ Clear Chat History"): | |
st.session_state.chat_history = [] | |
st.session_state.feedback = {} | |
with st.sidebar.expander("π View Chat History", expanded=False): | |
for i, chat in enumerate(reversed(st.session_state.chat_history)): | |
st.markdown(f"**User:** {chat['user']}") | |
bot_preview = chat['bot'][:200] + ("..." if len(chat['bot']) > 200 else "") | |
st.markdown(f"**Bot:** {bot_preview}") | |
if len(chat['bot']) > 200: | |
if st.toggle(f"π Show Full Response ({i+1})", key=f"toggle_{i}"): | |
st.markdown(chat['bot']) | |
feedback_value = st.session_state.feedback.get(chat['user'], "No Feedback Given") | |
st.markdown(f"**Feedback:** {feedback_value}") | |
st.markdown("---") | |
response_container = st.empty() | |
feedback_container = st.empty() | |
# π Generate Response | |
if st.button("Generate Response"): | |
if user_prompt.strip(): | |
client = OpenAI( | |
base_url="https://integrate.api.nvidia.com/v1", | |
api_key="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" | |
) | |
messages = [{"role": "user", "content": f"Generate a {output_format.lower()} in {tone.lower()} style: {user_prompt}"}] | |
try: | |
completion = client.chat.completions.create( | |
model="tiiuae/falcon3-7b-instruct", | |
messages=messages, | |
temperature=temperature, | |
top_p=0.9 if creative_mode else 0.7, | |
max_tokens=max_tokens, | |
stream=True | |
) | |
response_text = "" | |
progress_text = st.empty() | |
for chunk in completion: | |
if chunk.choices[0].delta.content is not None: | |
response_text += chunk.choices[0].delta.content | |
progress_text.markdown(f"### Generating... β³\n{response_text}") | |
progress_text.empty() | |
response_container.markdown(f"### Generated Response\n{response_text}") | |
chat_entry = {"user": user_prompt, "bot": response_text} | |
st.session_state.chat_history.append(chat_entry) | |
feedback = feedback_container.radio( | |
"Was this response helpful?", | |
["π Yes", "π No"], | |
index=None, | |
key=f"feedback_{len(st.session_state.chat_history)}", | |
horizontal=True | |
) | |
st.session_state.feedback[user_prompt] = feedback | |
except Exception as e: | |
st.error(f"β Error: {e}") | |