sabahat-shakeel commited on
Commit
00d9971
·
verified ·
1 Parent(s): 8b51148

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -26
app.py CHANGED
@@ -41,47 +41,56 @@
41
  # else:
42
  # st.write(f"**Bot:** {message['content']}")
43
  import streamlit as st
44
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
45
 
46
- # Set up the app title
47
- st.title("🤖 Simple Chatbot")
48
 
49
- # Initialize the Hugging Face pipeline
50
  @st.cache_resource
51
- def load_chatbot():
52
- # Choose a model from Hugging Face Hub
53
- model_name = "microsoft/DialoGPT-medium" # Try other models like "gpt2", "facebook/blenderbot-400M-distill"
54
  tokenizer = AutoTokenizer.from_pretrained(model_name)
55
  model = AutoModelForCausalLM.from_pretrained(model_name)
56
- return pipeline("text-generation", model=model, tokenizer=tokenizer)
57
 
58
- chatbot = load_chatbot()
59
 
60
- # Initialize session state for chat history
61
- if "messages" not in st.session_state:
62
- st.session_state.messages = []
63
 
64
  # Display chat history
65
- for message in st.session_state.messages:
66
  with st.chat_message(message["role"]):
67
  st.markdown(message["content"])
68
 
69
- # Chat input
70
  if prompt := st.chat_input("Type your message..."):
71
  # Add user message to history
72
- st.session_state.messages.append({"role": "user", "content": prompt})
73
 
74
- # Generate bot response
 
 
 
 
 
 
75
  with st.spinner("Thinking..."):
76
- bot_response = chatbot(
77
- prompt,
78
  max_length=1000,
79
- num_return_sequences=1,
80
- pad_token_id=chatbot.tokenizer.eos_token_id
81
- )[0]['generated_text']
82
-
83
- # Add bot response to history
84
- st.session_state.messages.append({"role": "assistant", "content": bot_response})
 
 
 
 
 
 
85
 
86
- # Rerun to display new messages
87
- st.rerun()
 
41
  # else:
42
  # st.write(f"**Bot:** {message['content']}")
43
  import streamlit as st
44
+ from transformers import AutoModelForCausalLM, AutoTokenizer
45
 
46
+ st.title("🤖 Improved Chatbot")
 
47
 
48
+ # Initialize model and tokenizer
49
  @st.cache_resource
50
+ def load_model():
51
+ model_name = "microsoft/DialoGPT-medium"
 
52
  tokenizer = AutoTokenizer.from_pretrained(model_name)
53
  model = AutoModelForCausalLM.from_pretrained(model_name)
54
+ return model, tokenizer
55
 
56
+ model, tokenizer = load_model()
57
 
58
+ # Initialize chat history
59
+ if "history" not in st.session_state:
60
+ st.session_state.history = []
61
 
62
  # Display chat history
63
+ for message in st.session_state.history:
64
  with st.chat_message(message["role"]):
65
  st.markdown(message["content"])
66
 
67
+ # User input
68
  if prompt := st.chat_input("Type your message..."):
69
  # Add user message to history
70
+ st.session_state.history.append({"role": "user", "content": prompt})
71
 
72
+ # Prepare context for the model
73
+ input_ids = tokenizer.encode(
74
+ "\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.history[-5:]]) + "\nassistant:",
75
+ return_tensors="pt"
76
+ )
77
+
78
+ # Generate response
79
  with st.spinner("Thinking..."):
80
+ output = model.generate(
81
+ input_ids,
82
  max_length=1000,
83
+ pad_token_id=tokenizer.eos_token_id,
84
+ no_repeat_ngram_size=3,
85
+ do_sample=True,
86
+ top_k=50,
87
+ top_p=0.95,
88
+ temperature=0.7
89
+ )
90
+
91
+ response = tokenizer.decode(output[0], skip_special_tokens=True).split("assistant:")[-1].strip()
92
+
93
+ # Add assistant response to history
94
+ st.session_state.history.append({"role": "assistant", "content": response})
95
 
96
+ st.rerun()