Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
-
from PIL import Image
|
3 |
from transformers import pipeline
|
|
|
4 |
|
5 |
# Initialize the pipeline with the image captioning model
|
6 |
caption_pipeline = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
|
7 |
|
8 |
def generate_caption(image):
|
9 |
-
#
|
10 |
-
image = Image.open(image).convert("RGB")
|
11 |
-
|
12 |
-
# Use the pipeline to generate a caption
|
13 |
result = caption_pipeline(image)
|
14 |
caption = result[0]["generated_text"]
|
15 |
-
|
16 |
return caption
|
17 |
|
18 |
# Setup the Gradio interface
|
19 |
interface = gr.Interface(fn=generate_caption,
|
20 |
-
inputs=gr.inputs.Image(
|
21 |
outputs=gr.outputs.Textbox(label="Generated Caption"))
|
22 |
interface.launch()
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import pipeline
|
3 |
+
from PIL import Image
|
4 |
|
5 |
# Initialize the pipeline with the image captioning model
|
6 |
caption_pipeline = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
|
7 |
|
8 |
def generate_caption(image):
|
9 |
+
# The image is received as a PIL Image, so no need for conversion
|
|
|
|
|
|
|
10 |
result = caption_pipeline(image)
|
11 |
caption = result[0]["generated_text"]
|
|
|
12 |
return caption
|
13 |
|
14 |
# Setup the Gradio interface
|
15 |
interface = gr.Interface(fn=generate_caption,
|
16 |
+
inputs=gr.inputs.Image(label="Upload an Image", type="pil"),
|
17 |
outputs=gr.outputs.Textbox(label="Generated Caption"))
|
18 |
interface.launch()
|