Spaces:
Running
Running
File size: 5,427 Bytes
c83e41c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import streamlit as st
import speech_recognition as sr
from openai import OpenAI
# Initialize session state for chat visibility
if "chat_started" not in st.session_state:
st.session_state.chat_started = False
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "feedback" not in st.session_state:
st.session_state.feedback = {}
st.set_page_config(page_title="π¬Chatbot", page_icon="π¬", layout="wide")
# π― Welcome Screen (Only Show if Chat Not Started)
if not st.session_state.chat_started:
st.title("π€ Welcome to Deepseek AI Chatbot!")
st.write("A smart chatbot powered by the ne model Deepseek R1, designed to assist you with text generation.")
st.subheader("β¨ Features")
st.markdown("""
- π **Generate content**: Stories, articles, code, poems, dialogues, and more!
- ποΈ **Voice Input**: Speak instead of typing your prompt.
- π **Customizable Tone & Format**: Choose between formal, informal, humorous, technical styles.
- βοΈ **Adjustable Creativity**: Control randomness with temperature settings.
- π **Chat History**: Review past conversations and feedback.
""")
if st.button("π Start Chat"):
st.session_state.chat_started = True
st.rerun()
st.stop()
st.title("π€ Deepseek AI Chatbot!")
# π€ Function to capture voice input
def get_voice_input():
recognizer = sr.Recognizer()
with sr.Microphone() as source:
st.info("π€ Listening... Speak now!")
try:
audio = recognizer.listen(source, timeout=5)
text = recognizer.recognize_google(audio)
return text
except sr.UnknownValueError:
st.error("π Could not understand the audio.")
except sr.RequestError:
st.error("π Speech Recognition service unavailable.")
return ""
# π€ Voice Input Button
if st.button("π€ Speak Prompt"):
voice_text = get_voice_input()
if voice_text:
st.session_state["user_prompt"] = voice_text
else:
st.warning("No voice input detected.")
# π User Input Text Area
user_prompt = st.text_area("Enter your prompt:", st.session_state.get("user_prompt", ""))
# βοΈ Sidebar Settings
st.sidebar.header("βοΈ Settings")
output_format = st.sidebar.selectbox("Select Output Format", ["Story", "Poem", "Article", "Code", "Dialogue"])
tone = st.sidebar.selectbox("Select Tone/Style", ["Formal", "Informal", "Humorous", "Technical"])
temperature = st.sidebar.slider("Creativity Level (Temperature)", 0.0, 1.0, 0.7)
max_tokens = st.sidebar.slider("Response Length (Max Tokens)", 100, 1024, 500)
creative_mode = st.sidebar.checkbox("Enable Creative Mode", value=True)
# π Chat History in Sidebar
st.sidebar.header("π Chat History")
if st.sidebar.button("ποΈ Clear Chat History"):
st.session_state.chat_history = []
st.session_state.feedback = {}
with st.sidebar.expander("π View Chat History", expanded=False):
for i, chat in enumerate(reversed(st.session_state.chat_history)):
st.markdown(f"**User:** {chat['user']}")
bot_preview = chat['bot'][:200] + ("..." if len(chat['bot']) > 200 else "")
st.markdown(f"**Bot:** {bot_preview}")
if len(chat['bot']) > 200:
if st.toggle(f"π Show Full Response ({i+1})", key=f"toggle_{i}"):
st.markdown(chat['bot'])
feedback_value = st.session_state.feedback.get(chat['user'], "No Feedback Given")
st.markdown(f"**Feedback:** {feedback_value}")
st.markdown("---")
response_container = st.empty()
feedback_container = st.empty()
# π Generate Response
if st.button("Generate Response"):
if user_prompt.strip():
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
messages = [{"role": "user", "content": f"Generate a {output_format.lower()} in {tone.lower()} style: {user_prompt}"}]
try:
completion = client.chat.completions.create(
model="tiiuae/falcon3-7b-instruct",
messages=messages,
temperature=temperature,
top_p=0.9 if creative_mode else 0.7,
max_tokens=max_tokens,
stream=True
)
response_text = ""
progress_text = st.empty()
for chunk in completion:
if chunk.choices[0].delta.content is not None:
response_text += chunk.choices[0].delta.content
progress_text.markdown(f"### Generating... β³\n{response_text}")
progress_text.empty()
response_container.markdown(f"### Generated Response\n{response_text}")
chat_entry = {"user": user_prompt, "bot": response_text}
st.session_state.chat_history.append(chat_entry)
feedback = feedback_container.radio(
"Was this response helpful?",
["π Yes", "π No"],
index=None,
key=f"feedback_{len(st.session_state.chat_history)}",
horizontal=True
)
st.session_state.feedback[user_prompt] = feedback
except Exception as e:
st.error(f"β Error: {e}")
|