dla9944's picture
Update app.py
8a54cc6
raw
history blame
1.47 kB
import streamlit as st
import transformers
import tensorflow
import PIL
from PIL import Image
import time
from transformers import pipeline
model_checkpoint = "Modfiededition/t5-base-fine-tuned-on-jfleg"
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_model():
return pipeline("text2text-generation", model=model_checkpoint)
model = load_model()
#prompts
st.title("๋‹น์‹ ์˜ ์˜์–ด ์ผ๊ธฐ์žฅ์„ ์œ„ํ•ด! ๐Ÿค–")
st.markdown("๋‹น์‹ ์˜ ์˜์–ด ๋ฌธ๋ฒ•์„ ์ฒดํฌํ•ด ๋“œ๋ฆฝ๋‹ˆ๋‹ค.! This assitant uses **T5-base model โœ๏ธ** fine-tuned on jfleg dataset.")
st.subheader("Some examples: ")
example_1 = st.button("I am write on AI")
example_2 = st.button("This sentence has, bads grammar mistake!")
textbox = st.text_area('Write your text in this box:', '',height=100, max_chars=500 )
button = st.button('Detect grammar mistakes:')
# output
st.subheader("Correct sentence: ")
if example_1:
with st.spinner('In progress.......'):
output_text = model("I am write on AI")[0]["generated_text"]
st.markdown("## "+output_text)
if example_2:
with st.spinner('In progress.......'):
output_text = model("This sentence has, bads grammar mistake!")[0]["generated_text"]
st.markdown("## "+output_text)
if button:
with st.spinner('In progress.......'):
if textbox:
output_text = model(textbox)[0]["generated_text"]
else:
output_text = " "
st.markdown("## "+output_text)