sabahat-shakeel commited on
Commit
8b51148
·
verified ·
1 Parent(s): 9c0b768

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -33
app.py CHANGED
@@ -1,43 +1,87 @@
1
- import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load the model and tokenizer
5
- @st.cache_resource
6
- def load_model_and_tokenizer():
7
- model_name = "microsoft/DialoGPT-medium" # Replace with your chosen model
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
- return tokenizer, model
11
 
12
- tokenizer, model = load_model_and_tokenizer()
13
 
14
- # Streamlit App
15
- st.title("General Chatbot")
16
- st.write("A chatbot powered by an open-source model from Hugging Face.")
17
 
18
- # Initialize the conversation
19
- if "conversation_history" not in st.session_state:
20
- st.session_state["conversation_history"] = []
21
 
22
- # Input box for user query
23
- user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_input")
24
 
25
- if st.button("Send") and user_input:
26
- # Append user input to history
27
- st.session_state["conversation_history"].append({"role": "user", "content": user_input})
28
 
29
- # Tokenize and generate response
30
- input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
31
- chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
32
- response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- # Append model response to history
35
- st.session_state["conversation_history"].append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
36
 
37
- # Display the conversation
38
- for message in st.session_state["conversation_history"]:
39
- if message["role"] == "user":
40
- st.write(f"**You:** {message['content']}")
41
- else:
42
- st.write(f"**Bot:** {message['content']}")
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # # Load the model and tokenizer
5
+ # @st.cache_resource
6
+ # def load_model_and_tokenizer():
7
+ # model_name = "microsoft/DialoGPT-medium" # Replace with your chosen model
8
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
10
+ # return tokenizer, model
11
 
12
+ # tokenizer, model = load_model_and_tokenizer()
13
 
14
+ # # Streamlit App
15
+ # st.title("General Chatbot")
16
+ # st.write("A chatbot powered by an open-source model from Hugging Face.")
17
 
18
+ # # Initialize the conversation
19
+ # if "conversation_history" not in st.session_state:
20
+ # st.session_state["conversation_history"] = []
21
 
22
+ # # Input box for user query
23
+ # user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_input")
24
 
25
+ # if st.button("Send") and user_input:
26
+ # # Append user input to history
27
+ # st.session_state["conversation_history"].append({"role": "user", "content": user_input})
28
 
29
+ # # Tokenize and generate response
30
+ # input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
31
+ # chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
32
+ # response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
33
+
34
+ # # Append model response to history
35
+ # st.session_state["conversation_history"].append({"role": "assistant", "content": response})
36
+
37
+ # # Display the conversation
38
+ # for message in st.session_state["conversation_history"]:
39
+ # if message["role"] == "user":
40
+ # st.write(f"**You:** {message['content']}")
41
+ # else:
42
+ # st.write(f"**Bot:** {message['content']}")
43
+ import streamlit as st
44
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
45
+
46
+ # Set up the app title
47
+ st.title("🤖 Simple Chatbot")
48
 
49
+ # Initialize the Hugging Face pipeline
50
+ @st.cache_resource
51
+ def load_chatbot():
52
+ # Choose a model from Hugging Face Hub
53
+ model_name = "microsoft/DialoGPT-medium" # Try other models like "gpt2", "facebook/blenderbot-400M-distill"
54
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
55
+ model = AutoModelForCausalLM.from_pretrained(model_name)
56
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
57
+
58
+ chatbot = load_chatbot()
59
 
60
+ # Initialize session state for chat history
61
+ if "messages" not in st.session_state:
62
+ st.session_state.messages = []
 
 
 
63
 
64
+ # Display chat history
65
+ for message in st.session_state.messages:
66
+ with st.chat_message(message["role"]):
67
+ st.markdown(message["content"])
68
+
69
+ # Chat input
70
+ if prompt := st.chat_input("Type your message..."):
71
+ # Add user message to history
72
+ st.session_state.messages.append({"role": "user", "content": prompt})
73
+
74
+ # Generate bot response
75
+ with st.spinner("Thinking..."):
76
+ bot_response = chatbot(
77
+ prompt,
78
+ max_length=1000,
79
+ num_return_sequences=1,
80
+ pad_token_id=chatbot.tokenizer.eos_token_id
81
+ )[0]['generated_text']
82
+
83
+ # Add bot response to history
84
+ st.session_state.messages.append({"role": "assistant", "content": bot_response})
85
+
86
+ # Rerun to display new messages
87
+ st.rerun()