Muh113 commited on
Commit
f317deb
·
verified ·
1 Parent(s): 9c6e9ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -3,7 +3,7 @@ import torch
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  # Replace with your Hugging Face model repository path
6
- model_repo_path = 'Muh113/bart-large'
7
 
8
  # Check for GPU availability and set device accordingly
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -27,6 +27,7 @@ if st.button("Get Answer"):
27
  inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
28
  # Generate the answer
29
  outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
 
30
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
  st.subheader("Answer")
32
  st.write(answer)
 
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  # Replace with your Hugging Face model repository path
6
+ model_repo_path = 'facebook/bart-large'
7
 
8
  # Check for GPU availability and set device accordingly
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
27
  inputs = tokenizer(question_input, return_tensors="pt", truncation=True, max_length=116).to(device)
28
  # Generate the answer
29
  outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, early_stopping=True)
30
+ # Decode the generated answer
31
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
  st.subheader("Answer")
33
  st.write(answer)