Spaces:
Sleeping
Sleeping
File size: 1,497 Bytes
8f3b106 378a935 8f3b106 37f1f30 8f3b106 ed28752 8f3b106 6c7dfa5 8f3b106 6c7dfa5 8f3b106 f317deb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Replace with your Hugging Face model repository path
model_repo_path = 'Muh113/Minecraft_Query_Wizard'
# Check for GPU availability and set the device accordingly
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)
# Inject CSS for background image
page_bg_img = '''
<style>
body {
background-image: url("https://wallpapercave.com/wp/wp5772281.jpg");
background-size: cover;
}
.block-container {
background-color: rgba(255, 255, 255, 0.8);
border-radius: 10px;
padding: 20px;
}
</style>
'''
st.markdown(page_bg_img, unsafe_allow_html=True)
# Streamlit app layout
st.title("Minecraft Query Wizard")
# User input
question_input = st.text_area("Enter a Minecraft-related question", height=150)
# Answer the question
if st.button("Get Answer"):
if question_input:
with st.spinner("Generating answer..."):
try:
# Tokenize the input question
inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
# Generate the answer
outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
# Decode the generated answer
|