sabahat-shakeel commited on
Commit
a8acb9b
Β·
verified Β·
1 Parent(s): 0fe1bb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -98
app.py CHANGED
@@ -1,99 +1,42 @@
1
  import streamlit as st
2
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
-
4
- # Configure the Hugging Face API key (no need to pass it in the pipeline call)
5
- HF_API_KEY = st.secrets['huggingface_api_key']
6
-
7
- # Ensure you're logged in using the Hugging Face CLI if using private models
8
- # huggingface-cli login
9
-
10
- # Initialize the Hugging Face model and tokenizer
11
- model_name = 'gpt2-medium' # or another GPT-2 version you want to use
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
-
15
- # Initialize the text generation pipeline
16
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
17
-
18
- # Function to get response from the Hugging Face model
19
- def get_chatbot_response(user_input):
20
- try:
21
- # Generate the response using the Hugging Face model
22
- response = generator(user_input, max_length=100, num_return_sequences=1)
23
- return response[0]['generated_text']
24
- except Exception as e:
25
- return f"Error: {str(e)}"
26
-
27
- # Streamlit interface
28
- st.set_page_config(page_title="Smart ChatBot", layout="centered")
29
-
30
- # Custom CSS for chat bubbles with full width and emojis
31
- st.markdown("""
32
- <style>
33
- .chat-container {
34
- display: flex;
35
- flex-direction: column;
36
- width: 100%;
37
- }
38
- .chat-bubble {
39
- width: 100%;
40
- padding: 15px;
41
- margin: 10px 0;
42
- border-radius: 10px;
43
- font-size: 18px;
44
- color: white;
45
- display: inline-block;
46
- line-height: 1.5;
47
- }
48
- .user-bubble {
49
- background: #6a82fb; /* Soft blue */
50
- align-self: flex-end;
51
- border-radius: 10px 10px 10px 10px;
52
- }
53
- .bot-bubble {
54
- background: #fc5c7d; /* Soft pink */
55
- align-self: flex-start;
56
- border-radius: 10px 10px 10px 10px;
57
- }
58
- .chat-header {
59
- font-size: 35px;
60
- font-weight: bold;
61
- margin-bottom: 20px;
62
- color: #3d3d3d;
63
- }
64
- .emoji {
65
- font-size: 22px;
66
- margin-right: 10px;
67
- }
68
- </style>
69
- """, unsafe_allow_html=True)
70
-
71
- # Chat header and intro
72
- st.markdown('<div class="chat-header">AI Chatbot - Your Companion πŸ’»</div>', unsafe_allow_html=True)
73
- st.write("Powered by Hugging Face AI for smart, engaging conversations. πŸ€–")
74
-
75
- # Initialize session state for conversation history if not already initialized
76
- if "history" not in st.session_state:
77
- st.session_state["history"] = []
78
-
79
- # Create the chat form
80
- with st.form(key="chat_form", clear_on_submit=True):
81
- user_input = st.text_input("Your message here... ✍️", max_chars=2000, label_visibility="collapsed")
82
- submit_button = st.form_submit_button("Send πŸš€")
83
-
84
- if submit_button:
85
- if user_input:
86
- # Get response from the chatbot
87
- response = get_chatbot_response(user_input)
88
- # Store user input and bot response in session state history
89
- st.session_state.history.append((user_input, response))
90
- else:
91
- st.warning("Please Enter A Prompt πŸ˜…")
92
-
93
- # Display chat history
94
- if st.session_state["history"]:
95
- st.markdown('<div class="chat-container">', unsafe_allow_html=True)
96
- for user_input, response in st.session_state["history"]:
97
- st.markdown(f'<div class="chat-bubble user-bubble"><span class="emoji">πŸ‘€</span>You: {user_input}</div>', unsafe_allow_html=True)
98
- st.markdown(f'<div class="chat-bubble bot-bubble"><span class="emoji">πŸ€–</span>Bot: {response}</div>', unsafe_allow_html=True)
99
- st.markdown('</div>', unsafe_allow_html=True)
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load the model and tokenizer
5
+ @st.cache_resource
6
+ def load_model_and_tokenizer():
7
+ model_name = "microsoft/DialoGPT-medium" # Replace with your chosen model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+ return tokenizer, model
11
+
12
+ tokenizer, model = load_model_and_tokenizer()
13
+
14
+ # Streamlit App
15
+ st.title("General Chatbot")
16
+ st.write("A chatbot powered by an open-source model from Hugging Face.")
17
+
18
+ # Initialize the conversation
19
+ if "conversation_history" not in st.session_state:
20
+ st.session_state["conversation_history"] = []
21
+
22
+ # Input box for user query
23
+ user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_input")
24
+
25
+ if st.button("Send") and user_input:
26
+ # Append user input to history
27
+ st.session_state["conversation_history"].append({"role": "user", "content": user_input})
28
+
29
+ # Tokenize and generate response
30
+ input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
31
+ chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
32
+ response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
33
+
34
+ # Append model response to history
35
+ st.session_state["conversation_history"].append({"role": "assistant", "content": response})
36
+
37
+ # Display the conversation
38
+ for message in st.session_state["conversation_history"]:
39
+ if message["role"] == "user":
40
+ st.write(f"**You:** {message['content']}")
41
+ else:
42
+ st.write(f"**Bot:** {message['content']}")