Muh113's picture
Update app.py
8112ede verified
raw
history blame
1.47 kB
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Replace with your Hugging Face model repository path
model_repo_path = 'Muh113/Bart_Large'
# Check for GPU availability and set device accordingly
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)
# Streamlit app layout
st.title("Minecraft Question Answering App")
# User input
question_input = st.text_area("Enter a Minecraft-related question", height=150)
# Answer the question
if st.button("Get Answer"):
if question_input:
with st.spinner("Generating answer..."):
try:
# Tokenize the input question
inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
# Generate the answer
outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
# Decode the generated answer
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.subheader("Answer")
st.write(answer)
except Exception as e:
st.error(f"Error during question answering: {e}")
else:
st.warning("Please enter a question to get an answer.")