File size: 1,264 Bytes
dc2f8c5
e92705a
dc2f8c5
 
6422bb0
 
e92705a
 
dc2f8c5
 
 
 
 
004ce70
6422bb0
016a685
dc2f8c5
5957ac9
3b94a44
5957ac9
 
 
 
 
 
 
 
 
 
 
dc2f8c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import streamlit as st
from transformers import T5ForConditionalGeneration, T5Tokenizer
import torch

# Load pre-trained model and tokenizer from the "KhantKyaw/T5-small_new_chatbot"
model_name = "KhantKyaw/T5-small_new_chatbot"  # Use the fine-tuned model
model = T5ForConditionalGeneration.from_pretrained(model_name)
tokenizer = T5Tokenizer.from_pretrained(model_name)

# Set device to GPU if available for faster inference, otherwise fallback to CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# Streamlit Interface
st.title("Mental Health Chatbot with T5")

def generate_response(input_text):
    input_ids = tokenizer.encode(input_text, return_tensors='pt')
    outputs = model.generate(input_ids,
                             min_length=5,
                             max_length=300,
                             do_sample=True, num_beams=5, no_repeat_ngram_size=2)
    generated_text = tokenizer.decode(
        outputs[0], skip_special_tokens=True)
    return generated_text

prompt = st.chat_input(placeholder="Say Something!",key=None, max_chars=None, disabled=False, on_submit=None, args=None, kwargs=None)
if prompt:
   with st.chat_message(name="AI",avatar=None):
    st.write(generate_response(prompt))