thoristhor commited on
Commit
463ac4a
·
1 Parent(s): d8a3df9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -28,8 +28,8 @@ def load_model():
28
  nltk.download('omw-1.4')
29
  ## summary_mod_name = os.environ["summary_mod_name"]
30
  ## question_mod_name = os.environ["question_mod_name"]
31
- summary_mod_name = "t5-large"
32
- question_mod_name = "t5-large"
33
  summary_model = T5ForConditionalGeneration.from_pretrained(summary_mod_name)
34
  summary_tokenizer = T5Tokenizer.from_pretrained(summary_mod_name)
35
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -43,17 +43,17 @@ from nltk.corpus import wordnet as wn
43
  from nltk.tokenize import sent_tokenize
44
  from nltk.corpus import stopwords
45
 
46
- def csv_downloader(df):
47
- res = df.to_csv(index=False,sep="\t").encode('utf-8')
48
- st.download_button(
49
- label="Download logs data as CSV separated by tab",
50
- data=res,
51
- file_name='df_quiz_log_file_v1.csv',
52
- mime='text/csv')
53
 
54
  def load_file():
55
  """Load text from file"""
56
- uploaded_file = st.file_uploader("Upload Files",type=['txt'])
57
  if uploaded_file is not None:
58
  if uploaded_file.type == "text/plain":
59
  raw_text = str(uploaded_file.read(),"utf-8")
@@ -64,11 +64,11 @@ def load_file():
64
  summary_model, summary_tokenizer, question_tokenizer, question_model =load_model()
65
 
66
  # App title and description
67
- st.title("Exam Assistant")
68
- st.write("Upload text, Get ready for answering autogenerated questions")
69
 
70
  # Load file
71
- st.text("Disclaimer: This app stores user's input for model improvement purposes !!")
72
 
73
  # Load file
74
 
 
28
  nltk.download('omw-1.4')
29
  ## summary_mod_name = os.environ["summary_mod_name"]
30
  ## question_mod_name = os.environ["question_mod_name"]
31
+ summary_mod_name = "csebuetnlp/mT5_multilingual_XLSum"
32
+ question_mod_name = "MaRiOrOsSi/t5-base-finetuned-question-answering"
33
  summary_model = T5ForConditionalGeneration.from_pretrained(summary_mod_name)
34
  summary_tokenizer = T5Tokenizer.from_pretrained(summary_mod_name)
35
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
43
  from nltk.tokenize import sent_tokenize
44
  from nltk.corpus import stopwords
45
 
46
+ # def csv_downloader(df):
47
+ # res = df.to_csv(index=False,sep="\t").encode('utf-8')
48
+ # st.download_button(
49
+ # label="Download logs data as CSV separated by tab",
50
+ # data=res,
51
+ # file_name='df_quiz_log_file_v1.csv',
52
+ # mime='text/csv')
53
 
54
  def load_file():
55
  """Load text from file"""
56
+ uploaded_file = st.file_uploader("Paste text",type=['txt'])
57
  if uploaded_file is not None:
58
  if uploaded_file.type == "text/plain":
59
  raw_text = str(uploaded_file.read(),"utf-8")
 
64
  summary_model, summary_tokenizer, question_tokenizer, question_model =load_model()
65
 
66
  # App title and description
67
+ st.title("P's Prototye")
68
+ st.write("Get multiple choice questions from random facts")
69
 
70
  # Load file
71
+ st.text("Disclaimer: This is early version. sorry if there's still bugs")
72
 
73
  # Load file
74