sabahat-shakeel commited on
Commit
9c0b768
·
verified ·
1 Parent(s): d7399f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -67
app.py CHANGED
@@ -1,46 +1,3 @@
1
- # import streamlit as st
2
- # from transformers import AutoModelForCausalLM, AutoTokenizer
3
-
4
- # # Load the model and tokenizer
5
- # @st.cache_resource
6
- # def load_model_and_tokenizer():
7
- # model_name = "microsoft/DialoGPT-medium" # Replace with your chosen model
8
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- # model = AutoModelForCausalLM.from_pretrained(model_name)
10
- # return tokenizer, model
11
-
12
- # tokenizer, model = load_model_and_tokenizer()
13
-
14
- # # Streamlit App
15
- # st.title("General Chatbot")
16
- # st.write("A chatbot powered by an open-source model from Hugging Face.")
17
-
18
- # # Initialize the conversation
19
- # if "conversation_history" not in st.session_state:
20
- # st.session_state["conversation_history"] = []
21
-
22
- # # Input box for user query
23
- # user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_input")
24
-
25
- # if st.button("Send") and user_input:
26
- # # Append user input to history
27
- # st.session_state["conversation_history"].append({"role": "user", "content": user_input})
28
-
29
- # # Tokenize and generate response
30
- # input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
31
- # chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
32
- # response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
33
-
34
- # # Append model response to history
35
- # st.session_state["conversation_history"].append({"role": "assistant", "content": response})
36
-
37
- # # Display the conversation
38
- # for message in st.session_state["conversation_history"]:
39
- # if message["role"] == "user":
40
- # st.write(f"**You:** {message['content']}")
41
- # else:
42
- # st.write(f"**Bot:** {message['content']}")
43
-
44
  import streamlit as st
45
  from transformers import AutoModelForCausalLM, AutoTokenizer
46
 
@@ -58,7 +15,7 @@ tokenizer, model = load_model_and_tokenizer()
58
  st.title("General Chatbot")
59
  st.write("A chatbot powered by an open-source model from Hugging Face.")
60
 
61
- # Initialize the conversation history
62
  if "conversation_history" not in st.session_state:
63
  st.session_state["conversation_history"] = []
64
 
@@ -68,30 +25,13 @@ user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_i
68
  if st.button("Send") and user_input:
69
  # Append user input to history
70
  st.session_state["conversation_history"].append({"role": "user", "content": user_input})
71
-
72
- # Prepare the input for the model
73
- conversation_context = ""
74
- for message in st.session_state["conversation_history"]:
75
- if message["role"] == "user":
76
- conversation_context += f"User: {message['content']}\n"
77
- elif message["role"] == "assistant":
78
- conversation_context += f"Bot: {message['content']}\n"
79
-
80
- input_ids = tokenizer.encode(conversation_context + "Bot:", return_tensors="pt")
81
-
82
- # Generate the response with adjusted parameters
83
- chat_history_ids = model.generate(
84
- input_ids,
85
- max_length=500, # Increase maximum length for longer responses
86
- num_return_sequences=1,
87
- temperature=0.2, # Adjust for creativity (lower is more focused, higher is more diverse)
88
- top_p=0.5, # Use nucleus sampling for diversity
89
- top_k=50, # Limit to top-k tokens for more controlled output
90
- pad_token_id=tokenizer.eos_token_id
91
- )
92
-
93
- # Decode the response and add it to history
94
  response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
 
95
  st.session_state["conversation_history"].append({"role": "assistant", "content": response})
96
 
97
  # Display the conversation
@@ -100,3 +40,4 @@ for message in st.session_state["conversation_history"]:
100
  st.write(f"**You:** {message['content']}")
101
  else:
102
  st.write(f"**Bot:** {message['content']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
 
15
  st.title("General Chatbot")
16
  st.write("A chatbot powered by an open-source model from Hugging Face.")
17
 
18
+ # Initialize the conversation
19
  if "conversation_history" not in st.session_state:
20
  st.session_state["conversation_history"] = []
21
 
 
25
  if st.button("Send") and user_input:
26
  # Append user input to history
27
  st.session_state["conversation_history"].append({"role": "user", "content": user_input})
28
+
29
+ # Tokenize and generate response
30
+ input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
31
+ chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
33
+
34
+ # Append model response to history
35
  st.session_state["conversation_history"].append({"role": "assistant", "content": response})
36
 
37
  # Display the conversation
 
40
  st.write(f"**You:** {message['content']}")
41
  else:
42
  st.write(f"**Bot:** {message['content']}")
43
+