File size: 2,487 Bytes
8f3b106
 
 
 
 
378a935
8f3b106
37f1f30
8f3b106
 
 
 
 
 
 
13884b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f3b106
 
13884b4
8f3b106
 
 
 
 
 
 
 
 
 
f317deb
8f3b106
 
 
 
 
 
 
13884b4
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Replace with your Hugging Face model repository path
model_repo_path = 'Muh113/Minecraft_Query_Wizard'

# Check for GPU availability and set the device accordingly
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)

# Streamlit app layout
st.set_page_config(page_title="Minecraft Query Wizard", page_icon="πŸ§™β€β™‚οΈ")

st.title("Minecraft Query Wizard πŸ§™β€β™‚οΈ")

st.markdown("""
<style>
    .main {
        background-color: #f5f5f5;
        font-family: Arial, sans-serif;
    }
    .stButton>button {
        background-color: #4CAF50;
        color: white;
        font-size: 18px;
        padding: 10px 20px;
        border-radius: 8px;
    }
</style>
""", unsafe_allow_html=True)

st.markdown("### Welcome to the Minecraft Query Wizard! 🌟")
st.markdown("This tool allows you to ask any Minecraft-related questions and get accurate answers using the latest AI technology.")

st.markdown("#### How to use:")
st.markdown("1. Enter your Minecraft-related question in the text area below.")
st.markdown("2. Click the 'Get Answer' button to generate a response.")
st.markdown("3. See the magic happen! ✨")

# User input
question_input = st.text_area("Enter your Minecraft-related question here:", height=150)

# Answer the question
if st.button("Get Answer"):
    if question_input:
        with st.spinner("Generating answer..."):
            try:
                # Tokenize the input question
                inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
                # Generate the answer
                outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
                # Decode the generated answer
                answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
                st.subheader("Answer")
                st.write(answer)
            except Exception as e:
                st.error(f"Error during question answering: {e}")
    else:
        st.warning("Please enter a question to get an answer.")

st.markdown("---")
st.markdown("### About the Creators")
st.markdown("**Creator 1:** Name of Creator 1")
st.markdown("**Creator 2:** Name of Creator 2")