import streamlit as st import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Replace with your Hugging Face model repository path model_repo_path = 'Muh113/Minecraft_Query_Wizard' # Check for GPU availability and set the device accordingly device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load the model and tokenizer tokenizer = AutoTokenizer.from_pretrained(model_repo_path) model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device) # Streamlit app layout st.set_page_config(page_title="Minecraft Query Wizard", page_icon="🧙‍♂️") st.title("Minecraft Query Wizard 🧙‍♂️") st.markdown(""" """, unsafe_allow_html=True) st.markdown("### Welcome to the Minecraft Query Wizard! 🌟") st.markdown("This tool allows you to ask any Minecraft-related questions and get accurate answers using the latest AI technology.") st.markdown("#### How to use:") st.markdown("1. Enter your Minecraft-related question in the text area below.") st.markdown("2. Click the 'Get Answer' button to generate a response.") st.markdown("3. See the magic happen! ✨") # User input question_input = st.text_area("Enter your Minecraft-related question here:", height=150) # Answer the question if st.button("Get Answer"): if question_input: with st.spinner("Generating answer..."): try: # Tokenize the input question inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device) # Generate the answer outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True) # Decode the generated answer answer = tokenizer.decode(outputs[0], skip_special_tokens=True) st.subheader("Answer") st.write(answer) except Exception as e: st.error(f"Error during question answering: {e}") else: st.warning("Please enter a question to get an answer.") st.markdown("---") st.markdown("### About the Creators") st.markdown("**Creator 1:** Name of Creator 1") st.markdown("**Creator 2:** Name of Creator 2")