Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline | |
| from PIL import Image | |
| from huggingface_hub import InferenceClient | |
| import os | |
| from gradio_client import Client | |
| # Hugging Face API key | |
| API_KEY = st.secrets["HF_API_KEY"] | |
| # Initialize the Hugging Face Inference Client | |
| client = InferenceClient(api_key=API_KEY) | |
| # Load the image classification pipeline | |
| def load_image_classification_pipeline(): | |
| """ | |
| Load the image classification pipeline using a pretrained model. | |
| """ | |
| return pipeline("image-classification", model="Shresthadev403/food-image-classification") | |
| pipe_classification = load_image_classification_pipeline() | |
| # Function to generate ingredients using Hugging Face Inference Client | |
| def get_ingredients_qwen(food_name): | |
| """ | |
| Generate a list of ingredients for the given food item using Qwen NLP model. | |
| Returns a clean, comma-separated list of ingredients. | |
| """ | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": f"List only the main ingredients for {food_name}. " | |
| f"Respond in a concise, comma-separated list without any extra text or explanations." | |
| } | |
| ] | |
| try: | |
| completion = client.chat.completions.create( | |
| model="Qwen/Qwen2.5-Coder-32B-Instruct", | |
| messages=messages, | |
| max_tokens=50 | |
| ) | |
| generated_text = completion.choices[0].message["content"].strip() | |
| return generated_text | |
| except Exception as e: | |
| return f"Error generating ingredients: {e}" | |
| # Streamlit app setup | |
| st.title("Food Image Recognition with Ingredients") | |
| # Add banner image | |
| st.image("IR_IMAGE.png", caption="Food Recognition Model", use_container_width=True) | |
| # Sidebar for model information | |
| st.sidebar.title("Model Information") | |
| st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification") | |
| st.sidebar.write("**LLM for Ingredients**: Qwen/Qwen2.5-Coder-32B-Instruct") | |
| # Upload image | |
| uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"]) | |
| if uploaded_file is not None: | |
| # Display the uploaded image | |
| image = Image.open(uploaded_file) | |
| st.image(image, caption="Uploaded Image", use_container_width=True) | |
| st.write("Classifying...") | |
| # Make predictions | |
| predictions = pipe_classification(image) | |
| # Display only the top prediction | |
| top_food = predictions[0]['label'] | |
| st.header(f"Food: {top_food}") | |
| # Generate and display ingredients for the top prediction | |
| st.subheader("Ingredients") | |
| try: | |
| ingredients = get_ingredients_qwen(top_food) | |
| st.write(ingredients) | |
| except Exception as e: | |
| st.error(f"Error generating ingredients: {e}") | |
| st.subheader("Healthier alternatives:") | |
| try: | |
| client = Client("https://8a56cb969da1f9d721.gradio.live/") | |
| result = client.predict(query=f"What's a healthy {top_food} recipe, and why is it healthy?", api_name="/get_response") | |
| st.write(result) | |
| except Exception as e: | |
| st.error(f"Unable to contact RAG: {e}") | |
| # Footer | |
| st.sidebar.markdown("Developed by Muhammad Hassan Butt.") |