Spaces:
Sleeping
Sleeping
File size: 3,329 Bytes
33e2541 6842a4f a3d0e1b 33e2541 a3d0e1b 33e2541 ee36877 33e2541 a3d0e1b 33e2541 ee36877 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 a3d0e1b 33e2541 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import streamlit as st
@st.cache_resource
def load_models():
question_model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
recipe_model_name = "flax-community/t5-recipe-generation"
instruct_model_name = "norallm/normistral-7b-warm-instruct"
# Load T5-based models for question generation and recipe generation
question_model = AutoModelForSeq2SeqLM.from_pretrained(question_model_name)
question_tokenizer = AutoTokenizer.from_pretrained(question_model_name)
recipe_model = AutoModelForSeq2SeqLM.from_pretrained(recipe_model_name)
recipe_tokenizer = AutoTokenizer.from_pretrained(recipe_model_name)
# Load the instruction model as a causal language model
instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_name)
instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_name)
return (question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer)
# Function to generate questions using the question model
def generate_question(text, model, tokenizer):
input_text = f"generate question: {text}"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function to generate recipes using the recipe model
def generate_recipe(ingredients, model, tokenizer):
input_text = f"generate recipe: {ingredients}"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function to generate instructions using the instruction model
def generate_instruction(prompt, model, tokenizer):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Streamlit Interface
def main():
st.title("Multi-Model Application")
# Load all models
(question_model, question_tokenizer), (recipe_model, recipe_tokenizer), (instruct_model, instruct_tokenizer) = load_models()
# Tabs for different functionalities
tab = st.selectbox("Choose task", ["Question Generation", "Recipe Generation", "Instruction Following"])
if tab == "Question Generation":
passage = st.text_area("Enter a passage for question generation")
if st.button("Generate Question"):
question = generate_question(passage, question_model, question_tokenizer)
st.write("Generated Question:", question)
elif tab == "Recipe Generation":
ingredients = st.text_area("Enter ingredients for recipe generation")
if st.button("Generate Recipe"):
recipe = generate_recipe(ingredients, recipe_model, recipe_tokenizer)
st.write("Generated Recipe:", recipe)
elif tab == "Instruction Following":
instruction_prompt = st.text_area("Enter an instruction prompt")
if st.button("Generate Instruction"):
instruction = generate_instruction(instruction_prompt, instruct_model, instruct_tokenizer)
st.write("Generated Instruction:", instruction)
if __name__ == "__main__":
main()
|