|
import streamlit as st |
|
from streamlit_option_menu import option_menu |
|
from transformers import pipeline, Conversation |
|
|
|
convo = pipeline(task="conversational", model="microsoft/DialoGPT-medium") |
|
imgclassifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k") |
|
qnabot = pipeline(task="question-answering", model="distilbert-base-cased-distilled-squad") |
|
txtgen = pipeline(task="text-generation", model="EleutherAI/gpt-neo-2.7B") |
|
txtclassifi = pipeline(task="text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment") |
|
summurize = pipeline(task="summarization", model="sshleifer/distilbart-cnn-12-6") |
|
visualqna = pipeline(task="vqa", model="microsoft/DialoGPT-medium") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(): |
|
st.title("Chit-Chatbot") |
|
if query := st.chat_input("Enter your message"): |
|
uquery = Conversation(query) |
|
response = convo(uquery) |
|
with st.chat_message("assistant"): |
|
st.write(response.generated_responses[-1]) |
|
|
|
|
|
def image_classifi(): |
|
st.title("Image Classification") |
|
file = st.text_input("Enter Image URL") |
|
output = imgclassifier(file) |
|
if st.button("View Results"): |
|
st.write(output) |
|
|
|
|
|
def qna_bot(): |
|
st.title("Q&A-Chatbot") |
|
if query := st.chat_input("Enter your message"): |
|
response = qnabot(query) |
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
|
|
def txt_gen(): |
|
st.title("Text Generation") |
|
if query := st.chat_input("Enter your message"): |
|
response = txtgen(query) |
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
def txt_classifi(): |
|
st.title("Text Classification") |
|
if query := st.chat_input("Enter your message"): |
|
response = txtclassifi(query,) |
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
def summury(): |
|
st.title("Summury") |
|
if query := st.chat_input("Enter your message"): |
|
response = summurize(query, min_length=5, max_length=20) |
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
def visual_qna(): |
|
st.title("Visual Q&A") |
|
with st.sidebar: |
|
if img := st.text_input("Enter Image URL"): |
|
st.image(img) |
|
if query := st.chat_input("Enter your message"): |
|
response = visualqna(img, query) |
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
|
|
|
|
|
|
def dashboard(): |
|
|
|
with st.sidebar: |
|
selected = option_menu(None, ['Conversational', "Q&A", "Text Generation", "Text Classification", "Image Classification", "Summurization", "Visual Q&A" , "Logout"], |
|
icons=['π¬','β', 'π', 'π€', 'πΌοΈ', 'π', 'π', 'π']) |
|
|
|
|
|
if selected == 'Conversational': |
|
chat() |
|
elif selected == "Image Classification": |
|
image_classifi() |
|
elif selected == 'Logout': |
|
st.session_state.user = None |
|
st.experimental_rerun() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|