GlastonR commited on
Commit
ee36877
·
verified ·
1 Parent(s): 7e2304a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -1,29 +1,22 @@
1
- import streamlit as st
2
- from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
- # Load models and tokenizers
5
- @st.cache_resource
6
  def load_models():
7
  question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
8
  recipe_model_name = "flax-community/t5-recipe-generation"
9
  instruct_model_name = "norallm/normistral-7b-warm-instruct"
10
 
11
- # Load question generation model and tokenizer
12
- question_model = T5ForConditionalGeneration.from_pretrained(question_model_name)
13
- question_tokenizer = T5Tokenizer.from_pretrained(question_model_name)
14
 
15
- # Load recipe generation model and tokenizer
16
  recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
17
  recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
18
 
19
- # Load instruction-based model and tokenizer
20
- instruct_model = AutoModelForSeq2SeqLM.from_pretrained(instruct_model_name)
21
  instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
22
 
23
  return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
24
 
25
- (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models()
26
-
27
  # Function to generate a question from a given passage
28
  def generate_question(text, model, tokenizer):
29
  input_text = f"generate question: {text}"
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
2
 
 
 
3
  def load_models():
4
  question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
5
  recipe_model_name = "flax-community/t5-recipe-generation"
6
  instruct_model_name = "norallm/normistral-7b-warm-instruct"
7
 
8
+ question_model = AutoModelForSeq2SeqLM.from_pretrained(question_model_name)
9
+ question_tokenizer = AutoTokenizer.from_pretrained(question_model_name)
 
10
 
 
11
  recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
12
  recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
13
 
14
+ # Load instruct model as causal language model
15
+ instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_name)
16
  instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
17
 
18
  return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
19
 
 
 
20
  # Function to generate a question from a given passage
21
  def generate_question(text, model, tokenizer):
22
  input_text = f"generate question: {text}"