Muh113 commited on
Commit
dfd6080
·
verified ·
1 Parent(s): ed28752

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -16
app.py CHANGED
@@ -12,22 +12,6 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
  tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
13
  model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)
14
 
15
- # Inject CSS for background image
16
- page_bg_img = '''
17
- <style>
18
- body {
19
- background-image: url("https://wallpapercave.com/wp/wp5772281.jpg");
20
- background-size: cover;
21
- }
22
- .block-container {
23
- background-color: rgba(255, 255, 255, 0.8);
24
- border-radius: 10px;
25
- padding: 20px;
26
- }
27
- </style>
28
- '''
29
- st.markdown(page_bg_img, unsafe_allow_html=True)
30
-
31
  # Streamlit app layout
32
  st.title("Minecraft Query Wizard")
33
 
@@ -44,3 +28,10 @@ if st.button("Get Answer"):
44
  # Generate the answer
45
  outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
46
  # Decode the generated answer
 
 
 
 
 
 
 
 
12
  tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
13
  model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path).to(device)
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Streamlit app layout
16
  st.title("Minecraft Query Wizard")
17
 
 
28
  # Generate the answer
29
  outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
30
  # Decode the generated answer
31
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+ st.subheader("Answer")
33
+ st.write(answer)
34
+ except Exception as e:
35
+ st.error(f"Error during question answering: {e}")
36
+ else:
37
+ st.warning("Please enter a question to get an answer.")