Spaces:
Sleeping
Sleeping
File size: 2,864 Bytes
04f475a a27e928 04f475a 7b63336 a27e928 ff3533c f825898 04f475a f825898 800f4d4 04f475a f825898 a27e928 800f4d4 a27e928 800f4d4 4bfa63a a27e928 4bfa63a f825898 7b63336 ff3533c 94c304e 7b63336 800f4d4 ff3533c 800f4d4 a27e928 94c304e 04f475a 800f4d4 04f475a f825898 800f4d4 f825898 94c304e 800f4d4 f825898 a27e928 f825898 7b63336 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from PIL import Image
import os
# Hugging Face token login (add this as a secret in Hugging Face Spaces)
os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
# Load the image classification pipeline
@st.cache_resource
def load_image_classification_pipeline():
"""
Load the image classification pipeline using a pretrained model.
"""
return pipeline("image-classification", model="Shresthadev403/food-image-classification")
pipe_classification = load_image_classification_pipeline()
# Load the Llama model for ingredient generation
@st.cache_resource
def load_llama_pipeline():
"""
Load the Llama model for ingredient generation.
"""
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct", use_auth_token=os.environ["HF_TOKEN"])
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-3B-Instruct", use_auth_token=os.environ["HF_TOKEN"])
return pipeline("text-generation", model=model, tokenizer=tokenizer)
pipe_llama = load_llama_pipeline()
# Function to generate ingredients using the Llama model
def get_ingredients_llama(food_name):
"""
Generate a list of ingredients for the given food item using the Llama model.
"""
prompt = f"List the main ingredients typically used to prepare {food_name}."
try:
response = pipe_llama(prompt, max_length=50, num_return_sequences=1)
return response[0]["generated_text"].strip()
except Exception as e:
return f"Error generating ingredients: {e}"
# Streamlit app setup
st.title("Food Image Recognition with Ingredients")
# Add banner image
st.image("IR_IMAGE.png", caption="Food Recognition Model", use_column_width=True)
# Sidebar for model information
st.sidebar.title("Model Information")
st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification")
st.sidebar.write("**LLM for Ingredients**: meta-llama/Llama-3.2-3B-Instruct")
# Upload image
uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])
if uploaded_file is not None:
# Display the uploaded image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
st.write("Classifying...")
# Make predictions
predictions = pipe_classification(image)
# Display only the top prediction
top_food = predictions[0]['label']
st.header(f"Food: {top_food}")
# Generate and display ingredients for the top prediction
st.subheader("Ingredients")
try:
ingredients = get_ingredients_llama(top_food)
st.write(ingredients)
except Exception as e:
st.error(f"Error generating ingredients: {e}")
# Footer
st.sidebar.markdown("Created with ❤️ using Streamlit and Hugging Face.") |