File size: 3,307 Bytes
04f475a
e73380c
04f475a
7b63336
 
f825898
04f475a
f825898
800f4d4
 
 
04f475a
 
f825898
 
7013cc6
a27e928
7013cc6
a27e928
7013cc6
a27e928
7013cc6
a27e928
7013cc6
a27e928
7013cc6
800f4d4
7013cc6
3500d25
800f4d4
3500d25
a34ab64
 
 
3500d25
4bfa63a
c1821da
 
3500d25
 
f83534a
 
 
 
 
 
 
 
 
 
c1821da
f83534a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import streamlit as st
from transformers import pipeline
from PIL import Image
import os

# Load the image classification pipeline
@st.cache_resource
def load_image_classification_pipeline():
    """
    Load the image classification pipeline using a pretrained model.
    """
    return pipeline("image-classification", model="Shresthadev403/food-image-classification")

pipe_classification = load_image_classification_pipeline()

# Load the BLOOM model for ingredient generation
@st.cache_resource
def load_bloom_pipeline():
    """
    Load the BLOOM model for ingredient generation.
    """
    return pipeline("text-generation", model="bigscience/bloom-1b7")

pipe_bloom = load_bloom_pipeline()

def get_ingredients_bloom(food_name):
    """
    Generate a list of ingredients for the given food item using BLOOM.
    Returns a clean, comma-separated list of ingredients.
    """
    prompt = (
        f"Generate a list of the main ingredients used to prepare {food_name}. "
        "Respond only with a concise, comma-separated list of ingredients, without any additional text, explanations, or placeholders. "
        "For example, if the food is pizza, respond with 'cheese, tomato sauce, bread, olive oil, basil'."
    )
    try:
        # Generate response from the model
        response = pipe_bloom(prompt, max_new_tokens=50, num_return_sequences=1)
        generated_text = response[0]["generated_text"].strip()
        
        # Post-process the response
        ingredients = generated_text.split(":")[-1].strip()  # Handle cases like "Ingredients: ..."
        ingredients = ingredients.replace(".", "").strip()  # Remove periods and extra spaces

        # Validate the response to ensure no placeholders
        if "ingredient1" in ingredients.lower() or "example" in ingredients.lower():
            return "No valid ingredients found. Try again with a different food."

        return ingredients
    except Exception as e:
        # Handle any errors that occur during the process
        return f"Error generating ingredients: {e}"
# Streamlit app setup
st.title("Food Image Recognition with Ingredients")

# Add banner image
st.image("IR_IMAGE.png", caption="Food Recognition Model", use_column_width=True)

# Sidebar for model information
st.sidebar.title("Model Information")
st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification")
st.sidebar.write("**LLM for Ingredients**: bigscience/bloom-1b7")

# Upload image
uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])

if uploaded_file is not None:
    # Display the uploaded image
    image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Image", use_column_width=True)
    st.write("Classifying...")

    # Make predictions
    predictions = pipe_classification(image)

    # Display only the top prediction
    top_food = predictions[0]['label']
    st.header(f"Food: {top_food}")

    # Generate and display ingredients for the top prediction
    st.subheader("Ingredients")
    try:
        ingredients = get_ingredients_bloom(top_food)
        st.write(ingredients)
    except Exception as e:
        st.error(f"Error generating ingredients: {e}")

# Footer
st.sidebar.markdown("Created with ❤️ using Streamlit and Hugging Face.")