GlastonR commited on
Commit
33e2541
·
verified ·
1 Parent(s): 6842a4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -51
app.py CHANGED
@@ -1,80 +1,72 @@
 
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
 
4
  def load_models():
5
  question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
6
  recipe_model_name = "flax-community/t5-recipe-generation"
7
  instruct_model_name = "norallm/normistral-7b-warm-instruct"
8
-
 
9
  question_model = AutoModelForSeq2SeqLM.from_pretrained(question_model_name)
10
  question_tokenizer = AutoTokenizer.from_pretrained(question_model_name)
11
-
12
  recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
13
  recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
14
-
15
- # Load instruct model as causal language model
16
  instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_name)
17
  instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
18
-
19
  return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
20
 
21
- # Function to generate a question from a given passage
22
  def generate_question(text, model, tokenizer):
23
  input_text = f"generate question: {text}"
24
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
25
  outputs = model.generate(input_ids)
26
- question = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
- return question
28
 
29
- # Function to generate a recipe from ingredients or a title
30
- def generate_recipe(prompt, model, tokenizer):
31
- inputs = tokenizer(prompt, return_tensors="pt")
32
- outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1)
33
- recipe = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
- return recipe
35
 
36
- # Function to generate an instruction-based response
37
  def generate_instruction(prompt, model, tokenizer):
38
- inputs = tokenizer(prompt, return_tensors="pt")
39
- outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
40
- instruction = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
- return instruction
 
 
 
42
 
43
- # Streamlit interface
44
- st.title("Multi-Model Application: Question, Recipe & Instruction Generation")
45
 
46
- # Select task
47
- task = st.selectbox("Choose a task:", ["Generate Question", "Generate Recipe", "Instruction Generation"])
48
 
49
- if task == "Generate Question":
50
- st.subheader("Generate a Question")
51
- passage = st.text_area("Enter a passage to generate a question:")
52
- if st.button("Generate Question"):
53
- if passage:
54
  question = generate_question(passage, question_model, question_tokenizer)
55
- st.write(f"Generated Question: {question}")
56
- else:
57
- st.write("Please enter a passage to generate a question.")
58
 
59
- elif task == "Generate Recipe":
60
- st.subheader("Generate a Recipe")
61
- recipe_prompt = st.text_area("Enter ingredients or a recipe title:")
62
- if st.button("Generate Recipe"):
63
- if recipe_prompt:
64
- recipe = generate_recipe(recipe_prompt, recipe_model, recipe_tokenizer)
65
- st.write("Generated Recipe:")
66
- st.write(recipe)
67
- else:
68
- st.write("Please enter ingredients or a recipe title to generate a recipe.")
69
 
70
- elif task == "Instruction Generation":
71
- st.subheader("Generate an Instruction")
72
- instruction_prompt = st.text_area("Enter an instruction prompt:")
73
- if st.button("Generate Instruction"):
74
- if instruction_prompt:
75
  instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer)
76
- st.write("Generated Instruction:")
77
- st.write(instruction)
78
- else:
79
- st.write("Please enter an instruction prompt.")
80
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
2
  import streamlit as st
 
3
 
4
+ @st.cache_resource
5
  def load_models():
6
  question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
7
  recipe_model_name = "flax-community/t5-recipe-generation"
8
  instruct_model_name = "norallm/normistral-7b-warm-instruct"
9
+
10
+ # Load T5-based models for question generation and recipe generation
11
  question_model = AutoModelForSeq2SeqLM.from_pretrained(question_model_name)
12
  question_tokenizer = AutoTokenizer.from_pretrained(question_model_name)
13
+
14
  recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
15
  recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
16
+
17
+ # Load the instruction model as a causal language model
18
  instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_name)
19
  instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
20
+
21
  return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
22
 
23
+ # Function to generate questions using the question model
24
  def generate_question(text, model, tokenizer):
25
  input_text = f"generate question: {text}"
26
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
27
  outputs = model.generate(input_ids)
28
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
29
 
30
+ # Function to generate recipes using the recipe model
31
+ def generate_recipe(ingredients, model, tokenizer):
32
+ input_text = f"generate recipe: {ingredients}"
33
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
34
+ outputs = model.generate(input_ids)
35
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
36
 
37
+ # Function to generate instructions using the instruction model
38
  def generate_instruction(prompt, model, tokenizer):
39
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
40
+ outputs = model.generate(input_ids)
41
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+
43
+ # Streamlit Interface
44
+ def main():
45
+ st.title("Multi-Model Application")
46
 
47
+ # Load all models
48
+ (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models()
49
 
50
+ # Tabs for different functionalities
51
+ tab = st.selectbox("Choose task", ["Question Generation", "Recipe Generation", "Instruction Following"])
52
 
53
+ if tab == "Question Generation":
54
+ passage = st.text_area("Enter a passage for question generation")
55
+ if st.button("Generate Question"):
 
 
56
  question = generate_question(passage, question_model, question_tokenizer)
57
+ st.write("Generated Question:", question)
 
 
58
 
59
+ elif tab == "Recipe Generation":
60
+ ingredients = st.text_area("Enter ingredients for recipe generation")
61
+ if st.button("Generate Recipe"):
62
+ recipe = generate_recipe(ingredients, recipe_model, recipe_tokenizer)
63
+ st.write("Generated Recipe:", recipe)
 
 
 
 
 
64
 
65
+ elif tab == "Instruction Following":
66
+ instruction_prompt = st.text_area("Enter an instruction prompt")
67
+ if st.button("Generate Instruction"):
 
 
68
  instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer)
69
+ st.write("Generated Instruction:", instruction)
 
 
 
70
 
71
+ if __name__ == "__main__":
72
+ main()