Jangai commited on
Commit
fc1f6da
·
verified ·
1 Parent(s): 8e6a6ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -7
app.py CHANGED
@@ -5,14 +5,25 @@ from PIL import Image
5
  # Initialize the pipeline with the image captioning model
6
  caption_pipeline = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
7
 
8
- def generate_caption(image):
9
- # The image is received as a PIL Image, so no need for conversion
10
- result = caption_pipeline(image)
11
- caption = result[0]["generated_text"]
12
- return caption
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Setup the Gradio interface
15
- interface = gr.Interface(fn=generate_caption,
16
  inputs=gr.components.Image(type="pil", label="Upload an Image"),
17
- outputs=gr.components.Textbox(label="Generated Caption"))
18
  interface.launch()
 
 
5
  # Initialize the pipeline with the image captioning model
6
  caption_pipeline = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
7
 
8
+ # Initialize the pipeline for emotion classification
9
+ emotion_pipeline = pipeline("image-classification", model="RickyIG/emotion_face_image_classification_v3")
10
+
11
+ def generate_caption_and_emotion(image):
12
+ # Process the image for captioning
13
+ caption_result = caption_pipeline(image)
14
+ caption = caption_result[0]["generated_text"]
15
+
16
+ # Process the image for emotion classification
17
+ emotion_result = emotion_pipeline(image)
18
+ emotions = ", ".join([f"{res['label']}: {res['score']:.2f}" for res in emotion_result])
19
+
20
+ # Combine results
21
+ combined_result = f"Caption: {caption}\nEmotions: {emotions}"
22
+ return combined_result
23
 
24
  # Setup the Gradio interface
25
+ interface = gr.Interface(fn=generate_caption_and_emotion,
26
  inputs=gr.components.Image(type="pil", label="Upload an Image"),
27
+ outputs=gr.components.Textbox(label="Generated Caption and Emotions"))
28
  interface.launch()
29
+