File size: 5,348 Bytes
04f475a
e73380c
04f475a
29afa83
7b63336
55ca203
7b63336
56732d1
 
b1f272d
 
1bec4ea
56732d1
 
 
07326b1
 
 
cf48067
55ca203
cf48067
 
 
765f053
55ca203
 
07326b1
 
29afa83
07326b1
 
29afa83
f825898
04f475a
f825898
df033c3
04f475a
 
f825898
 
29afa83
 
df033c3
29afa83
 
 
 
 
 
 
4bfa63a
07326b1
df033c3
29afa83
 
 
f83534a
 
29afa83
55ca203
853b45e
55ca203
1bec4ea
b1f272d
f83534a
 
b1f272d
f83534a
b1f272d
56732d1
 
1bec4ea
 
 
 
56732d1
 
f83534a
55ca203
b1f272d
55ca203
 
 
b1f272d
55ca203
b1f272d
 
 
 
07326b1
b1f272d
1bec4ea
b1f272d
 
07326b1
 
df033c3
07326b1
df033c3
07326b1
b1f272d
07326b1
 
 
 
 
 
 
 
 
55ca203
07326b1
 
 
 
 
 
 
 
55ca203
07326b1
 
55ca203
 
 
 
 
 
 
 
 
 
 
 
 
 
07326b1
55ca203
07326b1
 
55ca203
b1f272d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import streamlit as st
from transformers import pipeline
from PIL import Image
from huggingface_hub import InferenceClient
import os
import openai  # Added import

# Set page configuration
st.set_page_config(
    page_title="DelishAI - Your Culinary Assistant",
    page_icon="🍽️",
    layout="centered",
    initial_sidebar_state="expanded",
)

def local_css():
    st.markdown(
        """
        <style>
        /* [Your existing CSS styles here] */
        </style>
        """, unsafe_allow_html=True
    )

local_css()  # Apply the CSS

# Hugging Face API key
API_KEY = st.secrets["HF_API_KEY"]

# Initialize the Hugging Face Inference Client
client = InferenceClient(api_key=API_KEY)

# Load the image classification pipeline
@st.cache_resource
def load_image_classification_pipeline():
    """ Load the image classification pipeline using a pretrained model. """
    return pipeline("image-classification", model="Shresthadev403/food-image-classification")

pipe_classification = load_image_classification_pipeline()

# Function to generate ingredients using Hugging Face Inference Client
def get_ingredients_qwen(food_name):
    """ Generate a list of ingredients for the given food item using Qwen NLP model. Returns a clean, comma-separated list of ingredients. """
    messages = [
        {
            "role": "user",
            "content": f"List only the main ingredients for {food_name}. "
                       f"Respond in a concise, comma-separated list without any extra text or explanations."
        }
    ]
    try:
        completion = client.chat.completions.create(
            model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50
        )
        generated_text = completion.choices[0].message["content"].strip()
        return generated_text
    except Exception as e:
        return f"Error generating ingredients: {e}"

# **Set OpenAI API Key**
openai.api_key = st.secrets["openai"] # Ensure you have this in your secrets

# Main content
st.markdown('<div class="title"><h1>DelishAI - Your Culinary Assistant</h1></div>', unsafe_allow_html=True)

# Add banner image
st.image("IR_IMAGE.png", use_container_width=True)

# Sidebar for model information (hidden on small screens)
with st.sidebar:
    st.title("Model Information")
    st.write("**Image Classification Model**")
    st.write("Shresthadev403/food-image-classification")
    st.write("**LLM for Ingredients**")
    st.write("Qwen/Qwen2.5-Coder-32B-Instruct")
    st.markdown("---")
    st.markdown("<p style='text-align: center;'>Developed by Muhammad Hassan Butt.</p>", unsafe_allow_html=True)

# Assuming `sample_images` is defined somewhere in your code
sample_images = {
    "Pizza": "path_to_pizza_image.jpg",
    "Salad": "path_to_salad_image.jpg",
    # Add more sample images as needed
}

cols = st.columns(len(sample_images))
for idx, (name, file_path) in enumerate(sample_images.items()):
    with cols[idx]:
        if st.button(f"{name}", key=name):
            uploaded_file = file_path

# File uploader
st.subheader("Upload a food image:")
uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"])
if 'uploaded_file' in locals() and uploaded_file is not None:
    # Display the uploaded image
    if isinstance(uploaded_file, str):  # Sample image selected
        image = Image.open(uploaded_file)
    else:  # User uploaded image
        image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Image", use_container_width=True)

    # Classification button
    if st.button("Classify"):
        with st.spinner("Classifying..."):
            # Make predictions
            predictions = pipe_classification(image)
            # Display only the top prediction
            top_food = predictions[0]['label']
            st.header(f"🍽️ Food: {top_food}")
           
            # Generate and display ingredients for the top prediction
            st.subheader("📝 Ingredients")
            try:
                ingredients = get_ingredients_qwen(top_food)
                st.write(ingredients)
            except Exception as e:
                st.error(f"Error generating ingredients: {e}")

            # **Healthier Alternatives using OpenAI API**
            st.subheader("💡 Healthier Alternatives")
            try:
                response = openai.ChatCompletion.create(
                    model="gpt-4o",  # You can choose the model you prefer
                    messages=[
                        {
                            "role": "system",
                            "content": "You are a helpful assistant specializing in providing healthy alternatives to various dishes."
                        },
                        {
                            "role": "user",
                            "content": f"What's a healthy {top_food} recipe, and why is it healthy?"
                        }
                    ],
                    max_tokens=200,  # Adjust as needed
                    temperature=0.7,  # Adjust creativity level as needed
                )
                result = response['choices'][0]['message']['content'].strip()
                st.write(result)
            except Exception as e:
                st.error(f"Unable to generate healthier alternatives: {e}")
else:
    st.info("Please select or upload an image to get started.")