GlastonR commited on
Commit
a3d0e1b
·
verified ·
1 Parent(s): 0323be5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -0
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoModelForSeq2SeqLM, AutoTokenizer
3
+
4
+ # Load models and tokenizers
5
+ @st.cache_resource
6
+ def load_models():
7
+ question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
8
+ recipe_model_name = "flax-community/t5-recipe-generation"
9
+ instruct_model_name = "norallm/normistral-7b-warm-instruct"
10
+
11
+ # Load question generation model and tokenizer
12
+ question_model = T5ForConditionalGeneration.from_pretrained(question_model_name)
13
+ question_tokenizer = T5Tokenizer.from_pretrained(question_model_name)
14
+
15
+ # Load recipe generation model and tokenizer
16
+ recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
17
+ recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
18
+
19
+ # Load instruction-based model and tokenizer
20
+ instruct_model = AutoModelForSeq2SeqLM.from_pretrained(instruct_model_name)
21
+ instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
22
+
23
+ return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
24
+
25
+ (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models()
26
+
27
+ # Function to generate a question from a given passage
28
+ def generate_question(text, model, tokenizer):
29
+ input_text = f"generate question: {text}"
30
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
31
+ outputs = model.generate(input_ids)
32
+ question = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ return question
34
+
35
+ # Function to generate a recipe from ingredients or a title
36
+ def generate_recipe(prompt, model, tokenizer):
37
+ inputs = tokenizer(prompt, return_tensors="pt")
38
+ outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1)
39
+ recipe = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
+ return recipe
41
+
42
+ # Function to generate an instruction-based response
43
+ def generate_instruction(prompt, model, tokenizer):
44
+ inputs = tokenizer(prompt, return_tensors="pt")
45
+ outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
46
+ instruction = tokenizer.decode(outputs[0], skip_special_tokens=True)
47
+ return instruction
48
+
49
+ # Streamlit interface
50
+ st.title("Multi-Model Application: Question, Recipe & Instruction Generation")
51
+
52
+ # Select task
53
+ task = st.selectbox("Choose a task:", ["Generate Question", "Generate Recipe", "Instruction Generation"])
54
+
55
+ if task == "Generate Question":
56
+ st.subheader("Generate a Question")
57
+ passage = st.text_area("Enter a passage to generate a question:")
58
+ if st.button("Generate Question"):
59
+ if passage:
60
+ question = generate_question(passage, question_model, question_tokenizer)
61
+ st.write(f"Generated Question: {question}")
62
+ else:
63
+ st.write("Please enter a passage to generate a question.")
64
+
65
+ elif task == "Generate Recipe":
66
+ st.subheader("Generate a Recipe")
67
+ recipe_prompt = st.text_area("Enter ingredients or a recipe title:")
68
+ if st.button("Generate Recipe"):
69
+ if recipe_prompt:
70
+ recipe = generate_recipe(recipe_prompt, recipe_model, recipe_tokenizer)
71
+ st.write("Generated Recipe:")
72
+ st.write(recipe)
73
+ else:
74
+ st.write("Please enter ingredients or a recipe title to generate a recipe.")
75
+
76
+ elif task == "Instruction Generation":
77
+ st.subheader("Generate an Instruction")
78
+ instruction_prompt = st.text_area("Enter an instruction prompt:")
79
+ if st.button("Generate Instruction"):
80
+ if instruction_prompt:
81
+ instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer)
82
+ st.write("Generated Instruction:")
83
+ st.write(instruction)
84
+ else:
85
+ st.write("Please enter an instruction prompt.")
86
+