Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoModelForSeq2SeqLM, AutoTokenizer | |
# Load models and tokenizers | |
def load_models(): | |
question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap" | |
recipe_model_name = "flax-community/t5-recipe-generation" | |
instruct_model_name = "norallm/normistral-7b-warm-instruct" | |
# Load question generation model and tokenizer | |
question_model = T5ForConditionalGeneration.from_pretrained(question_model_name) | |
question_tokenizer = T5Tokenizer.from_pretrained(question_model_name) | |
# Load recipe generation model and tokenizer | |
recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name) | |
recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name) | |
# Load instruction-based model and tokenizer | |
instruct_model = AutoModelForSeq2SeqLM.from_pretrained(instruct_model_name) | |
instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name) | |
return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) | |
(question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models() | |
# Function to generate a question from a given passage | |
def generate_question(text, model, tokenizer): | |
input_text = f"generate question: {text}" | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
outputs = model.generate(input_ids) | |
question = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return question | |
# Function to generate a recipe from ingredients or a title | |
def generate_recipe(prompt, model, tokenizer): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1) | |
recipe = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return recipe | |
# Function to generate an instruction-based response | |
def generate_instruction(prompt, model, tokenizer): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1) | |
instruction = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return instruction | |
# Streamlit interface | |
st.title("Multi-Model Application: Question, Recipe & Instruction Generation") | |
# Select task | |
task = st.selectbox("Choose a task:", ["Generate Question", "Generate Recipe", "Instruction Generation"]) | |
if task == "Generate Question": | |
st.subheader("Generate a Question") | |
passage = st.text_area("Enter a passage to generate a question:") | |
if st.button("Generate Question"): | |
if passage: | |
question = generate_question(passage, question_model, question_tokenizer) | |
st.write(f"Generated Question: {question}") | |
else: | |
st.write("Please enter a passage to generate a question.") | |
elif task == "Generate Recipe": | |
st.subheader("Generate a Recipe") | |
recipe_prompt = st.text_area("Enter ingredients or a recipe title:") | |
if st.button("Generate Recipe"): | |
if recipe_prompt: | |
recipe = generate_recipe(recipe_prompt, recipe_model, recipe_tokenizer) | |
st.write("Generated Recipe:") | |
st.write(recipe) | |
else: | |
st.write("Please enter ingredients or a recipe title to generate a recipe.") | |
elif task == "Instruction Generation": | |
st.subheader("Generate an Instruction") | |
instruction_prompt = st.text_area("Enter an instruction prompt:") | |
if st.button("Generate Instruction"): | |
if instruction_prompt: | |
instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer) | |
st.write("Generated Instruction:") | |
st.write(instruction) | |
else: | |
st.write("Please enter an instruction prompt.") | |