KvrParaskevi commited on
Commit
77d3d66
·
verified ·
1 Parent(s): 1004391

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -36,10 +36,16 @@ with st.container():
36
 
37
  #Check if chat history exists in this session
38
  if 'chat_history' not in st.session_state:
39
- st.session_state.chat_history = [ ] #Initialize chat history
 
 
 
 
 
 
 
40
 
41
  if 'model' not in st.session_state:
42
- st.write("Model added in state.")
43
  st.session_state.model = model
44
 
45
  #renders chat history
@@ -47,7 +53,8 @@ with st.container():
47
  with st.chat_message(message["role"]):
48
  st.write(message["content"])
49
 
50
-
 
51
  #Set up input text field
52
  input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
53
 
@@ -58,8 +65,8 @@ with st.container():
58
 
59
  #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
60
  #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
61
- output = model_pipeline(input_text, max_new_tokens=20)
62
- first_answer = output[0]["generated_text"]
63
 
64
  with st.chat_message("assistant"):
65
  st.write(first_answer)
 
36
 
37
  #Check if chat history exists in this session
38
  if 'chat_history' not in st.session_state:
39
+ st.session_state.chat_history = [
40
+ {
41
+ "role": "system",
42
+ "content": "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs."
43
+ + "Based on the current social norms you wait for the user's response to your proposals.",
44
+ },
45
+ {"role": "assistant", "content": "Hello, how can I help you today?"},
46
+ ] #Initialize chat history
47
 
48
  if 'model' not in st.session_state:
 
49
  st.session_state.model = model
50
 
51
  #renders chat history
 
53
  with st.chat_message(message["role"]):
54
  st.write(message["content"])
55
 
56
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
57
+ st.write(tokenizer.decode(tokenized_chat[0]))
58
  #Set up input text field
59
  input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
60
 
 
65
 
66
  #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
67
  #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
68
+ outputs = model.generate(tokenized_chat, max_new_tokens=128)
69
+ first_answer = tokenizer.decode(outputs[0])
70
 
71
  with st.chat_message("assistant"):
72
  st.write(first_answer)