GlastonR's picture
Update app.py
33e2541 verified
raw
history blame
3.33 kB
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import streamlit as st
@st.cache_resource
def load_models():
question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
recipe_model_name = "flax-community/t5-recipe-generation"
instruct_model_name = "norallm/normistral-7b-warm-instruct"
# Load T5-based models for question generation and recipe generation
question_model = AutoModelForSeq2SeqLM.from_pretrained(question_model_name)
question_tokenizer = AutoTokenizer.from_pretrained(question_model_name)
recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
# Load the instruction model as a causal language model
instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_name)
instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
# Function to generate questions using the question model
def generate_question(text, model, tokenizer):
input_text = f"generate question: {text}"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function to generate recipes using the recipe model
def generate_recipe(ingredients, model, tokenizer):
input_text = f"generate recipe: {ingredients}"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function to generate instructions using the instruction model
def generate_instruction(prompt, model, tokenizer):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Streamlit Interface
def main():
st.title("Multi-Model Application")
# Load all models
(question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models()
# Tabs for different functionalities
tab = st.selectbox("Choose task", ["Question Generation", "Recipe Generation", "Instruction Following"])
if tab == "Question Generation":
passage = st.text_area("Enter a passage for question generation")
if st.button("Generate Question"):
question = generate_question(passage, question_model, question_tokenizer)
st.write("Generated Question:", question)
elif tab == "Recipe Generation":
ingredients = st.text_area("Enter ingredients for recipe generation")
if st.button("Generate Recipe"):
recipe = generate_recipe(ingredients, recipe_model, recipe_tokenizer)
st.write("Generated Recipe:", recipe)
elif tab == "Instruction Following":
instruction_prompt = st.text_area("Enter an instruction prompt")
if st.button("Generate Instruction"):
instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer)
st.write("Generated Instruction:", instruction)
if __name__ == "__main__":
main()