File size: 2,812 Bytes
42e3a78
 
 
 
 
 
 
 
 
 
3e64c24
42e3a78
 
3e64c24
42e3a78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e64c24
 
42e3a78
3e64c24
42e3a78
 
3e64c24
42e3a78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import time

import streamlit as st
from streamlit_chat import message
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

from chat import generate_response

if "tokenizer" not in st.session_state:
    st.session_state["tokenizer"] = AutoTokenizer.from_pretrained(
        "MBZUAI/LaMini-Flan-T5-248M"
    )
    st.session_state["model"] = AutoModelForSeq2SeqLM.from_pretrained(
        "MBZUAI/LaMini-Flan-T5-248M"
    )

st.title("B-Bot : Bibek's Personal Chatbot")
# Storing the chat
if "generated" not in st.session_state:
    st.session_state["generated"] = []

if "past" not in st.session_state:
    st.session_state["past"] = []


# We will get the user's input by calling the get_text function
def get_text():
    input_text = st.text_input("Enter your inquiries here: ", "Hi!!")
    return input_text


user_input = get_text()

if user_input:
    tokenizer = st.session_state["tokenizer"]
    model = st.session_state["model"]
    output = generate_response(user_input)
    prompt_template = "\nBased on the above content, try to answer the following question.\n\n"
    end_prompt = "Please make meaningful sentence and try to be descriptive as possible, ending with proper punctuations. If you think, there is good descriptive answers to the question from the above content, write sorry and advise them to contact Bibek directly.\n"  # NoQA"
    short_response_template = "\nIf your response is very short like 1 or 2 sentence, add a followup sentence like 'Let me know if there's anything else I can help you with. or If there's anything else I can assist with, please don't hesitate to ask. I mean something similar in polite way."  # NoQA
    input = output + prompt_template + user_input + end_prompt
    start = time.time()
    input_ids = tokenizer(
        input,
        return_tensors="pt",
    ).input_ids

    outputs = model.generate(input_ids, max_length=512, do_sample=True)
    output = tokenizer.decode(outputs[0]).strip('<pad></s>').strip()
    end = time.time()

    print("Time for model inference: ", end - start)
    # Checks for memory overflow
    if len(st.session_state.past) == 15:
        st.session_state.past.pop(0)
        st.session_state.generated.pop(0)

    # store the output
    st.session_state.past.append(user_input)
    st.session_state.generated.append(output)

if st.session_state["generated"]:
    # print(st.session_state)
    for i in range(len(st.session_state["generated"]) - 1, -1, -1):
        message(
            st.session_state["generated"][i],
            avatar_style="bottts",
            seed=39,
            key=str(i),  # NoQA
        )
        message(
            st.session_state["past"][i],
            is_user=True,
            avatar_style="identicon",
            seed=4,
            key=str(i) + "_user",
        )  # NoQA