Muh113 commited on
Commit
8f3b106
·
verified ·
1 Parent(s): fe040c8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+
5
+ # Replace with your Hugging Face model repository path
6
+ model_repo_path = 'facebook/bart-large'
7
+
8
+ # Check for GPU availability and set device accordingly
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ # Load the model and tokenizer
12
+ tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
13
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)
14
+
15
+ # Streamlit app layout
16
+ st.title("Minecraft Question Answering App")
17
+
18
+ # User input
19
+ question_input = st.text_area("Enter a Minecraft-related question", height=150)
20
+
21
+ # Answer the question
22
+ if st.button("Get Answer"):
23
+ if question_input:
24
+ with st.spinner("Generating answer..."):
25
+ try:
26
+ # Tokenize the input question
27
+ inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
28
+ # Generate the answer
29
+ outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
30
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
+ st.subheader("Answer")
32
+ st.write(answer)
33
+ except Exception as e:
34
+ st.error(f"Error during question answering: {e}")
35
+ else:
36
+ st.warning("Please enter a question to get an answer.")