import os import requests import base64 from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage, HumanMessage, SystemMessage import openai import gradio as gr # This was copy and pasted from https://www.gradio.app/guides/creating-a-chatbot-fast def predict(message, history, ingredients, servings, appliances, caloriesmin, caloriesmax, detected_ingredients, types_of_food, different_diets, cultures, additional_ingredients, pastry_or_not, openai_api_key): llm = ChatOpenAI(temperature=1.0, openai_api_key=openai_api_key, model='gpt-3.5-turbo-0613', ) history_langchain_format = [] history_langchain_format.append( SystemMessage(content=f""" Imagine that you are robust yet friendly chef that help new cooks cook. The cook that you are going help has {ingredients}, {detected_ingredients}, {additional_ingredients} and {appliances}. I am cooking for {servings} people. They want to cook this type of food : {pastry_or_not}. The number of calories in the dish should be in the range from {caloriesmin} to {caloriesmax}. The only categories of food it should use should be: {types_of_food}. The user is on the following diets: {different_diets}.The dish must be from this culture: {cultures}. Give a small amount of background knowledge/where this dish came from. Recommend a good recipe when Rec Plz is typed that uses the ingredients, appliances on hand but is also easy for beginners to cook. """)) # this converts the history to langchain format for human, ai in history: history_langchain_format.append(HumanMessage(content=human)) history_langchain_format.append(AIMessage(content=ai)) # this converts the message to langchain format history_langchain_format.append(HumanMessage(content=message)) # Calling chat gpt gpt_response = llm(history_langchain_format) return gpt_response.content # def echo_image(input_image_filepath): # #we copy and pasted this code from replicate # print(input_image_filepath) # output = replicate.run( # "kiransom/fll_detic:161277b70ee6ea38847ba2e1c56523dcdf77143ac029d52a795327c70404846e", # Model ID # input={ # "image": open(input_image_filepath, "rb"), # } # ) # print(output["output_path"]) # print(output["predictions_set"]) # return(output["output_path"], output["predictions_set"]) # Function to encode the image # Getting the base64 string # base64 is compact encoding of the bytes of the image def encode_image(image_path): with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode('utf-8') def process_image(image_path, openai_api_key): base64_image = encode_image(image_path) question = "This is an image of ingredients available for cooking. Please list all the ingredients and approximate quantity of each ingredient in a numbered list." headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}" } payload = { "model": "gpt-4-vision-preview", "messages": [ { "role": "user", "content": [ { "type": "text", "text": question }, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}" } } ] } ], "max_tokens": 300 } response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) print(response) return response.json()["choices"][0]["message"]["content"] with gr.Blocks() as demo: with gr.Row(): openai_api_key = gr.Textbox(placeholder="Type in this box first.", label="Please enter your OpenAI key. If you do not have a key, please visit this site: https://platform.openai.com/signup/ . This textbox may duplicate. If so, DO NOT click the second textbox.") with gr.Row(): detected_ingredients = gr.Text() gr.Interface(fn=process_image, inputs=[gr.Image(width=400, height=400, type="filepath"), openai_api_key], outputs=detected_ingredients ) with gr.Row(): ingredients = gr.CheckboxGroup(choices=["Salt", "Pepper", "Flour","Oil", "Pasta", "Rice", ], label="Common ingredients") with gr.Row(): appliances = gr.CheckboxGroup(choices=["stove", "blender", "oven", "pots", "air fryer", "pressure cooker", "microwave"], label="Appliances") with gr.Row(): servings = gr.Slider(1, 20, step=1, label="Servings") with gr.Row(): cultures = gr.Radio(choices=["Italian", "French", "American", "Japanese", "Korean", "Chinese", "Jewish", "German", "Indian"], label="Cultures") with gr.Row(): caloriesmin = gr.Slider(50, 2000, value=100, step=25, label="Calories Min") caloriesmax = gr.Slider(100, 2000, value=1500, step=25, label="Calories Max") with gr.Row(): types_of_food = gr.CheckboxGroup(choices=["fruits", "vegetables", "grains", "protein", "starch-rich food", "dairy", "fat",], label="Types of food you would like to include in your diet") with gr.Row(): additional_ingredients = gr.Textbox(lines=2, label="Addtional Ingredients", placeholder="Please add any addtional ingredients the model missed.") with gr.Row(): different_diets = gr.CheckboxGroup(choices=["Ketogenic Diet", "Meditarranean Diet", "Paleo Diet", "Whole30 Diet", "Vegan Diet", "Vegetarian Diet", "Raw Food Diet", "Ayurvedic Diet", "Carb Cycling", "Macrobiotic Diet"], label="Diets", info="Other - if you have another diet, please just enter the foods you are supposed to avoid into the Dietary Restrictions textbox and do not select this checkbox.") with gr.Row(): pastry_or_not = gr.Radio(choices=["Pastry", "Other"], label="Pastry") with gr.Row(): gr.ChatInterface(fn=predict, additional_inputs=[ ingredients, servings, appliances, caloriesmin, caloriesmax, detected_ingredients, types_of_food, different_diets, cultures, additional_ingredients, pastry_or_not, openai_api_key ], ) demo.launch(share=False)