Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,6 +7,7 @@ import PIL.Image
|
|
| 7 |
import spaces
|
| 8 |
import torch
|
| 9 |
from transformers import AutoProcessor, BlipForConditionalGeneration
|
|
|
|
| 10 |
|
| 11 |
DESCRIPTION = "# Image Captioning with LongCap"
|
| 12 |
|
|
@@ -16,9 +17,18 @@ model_id = "unography/blip-long-cap"
|
|
| 16 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 17 |
model = BlipForConditionalGeneration.from_pretrained(model_id).to(device)
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
@spaces.GPU(duration=30)
|
| 21 |
-
def run(image: PIL.Image.Image) -> str:
|
|
|
|
|
|
|
| 22 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 23 |
out = model.generate(pixel_values=inputs.pixel_values, num_beams=3, max_length=300)
|
| 24 |
generated_caption = processor.decode(out[0], skip_special_tokens=True)
|
|
@@ -30,6 +40,17 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 30 |
input_image = gr.Image(type="pil")
|
| 31 |
run_button = gr.Button("Caption")
|
| 32 |
output = gr.Textbox(label="Result")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
run_button.click(
|
| 35 |
fn=run,
|
|
|
|
| 7 |
import spaces
|
| 8 |
import torch
|
| 9 |
from transformers import AutoProcessor, BlipForConditionalGeneration
|
| 10 |
+
from typing import Union
|
| 11 |
|
| 12 |
DESCRIPTION = "# Image Captioning with LongCap"
|
| 13 |
|
|
|
|
| 17 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 18 |
model = BlipForConditionalGeneration.from_pretrained(model_id).to(device)
|
| 19 |
|
| 20 |
+
torch.hub.download_url_to_file("http://images.cocodataset.org/val2017/000000039769.jpg", "cats.jpg")
|
| 21 |
+
torch.hub.download_url_to_file(
|
| 22 |
+
"https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png", "stop_sign.png"
|
| 23 |
+
)
|
| 24 |
+
torch.hub.download_url_to_file(
|
| 25 |
+
"https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg", "astronaut.jpg"
|
| 26 |
+
)
|
| 27 |
|
| 28 |
@spaces.GPU(duration=30)
|
| 29 |
+
def run(image: Union[str, PIL.Image.Image]) -> str:
|
| 30 |
+
if isinstance(image, str):
|
| 31 |
+
image = Image.open(image)
|
| 32 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 33 |
out = model.generate(pixel_values=inputs.pixel_values, num_beams=3, max_length=300)
|
| 34 |
generated_caption = processor.decode(out[0], skip_special_tokens=True)
|
|
|
|
| 40 |
input_image = gr.Image(type="pil")
|
| 41 |
run_button = gr.Button("Caption")
|
| 42 |
output = gr.Textbox(label="Result")
|
| 43 |
+
gr.Examples(
|
| 44 |
+
examples=[
|
| 45 |
+
"cats.jpg",
|
| 46 |
+
"stop_sign.png",
|
| 47 |
+
"astronaut.jpg",
|
| 48 |
+
],
|
| 49 |
+
inputs=input_image,
|
| 50 |
+
outputs=output,
|
| 51 |
+
fn=run,
|
| 52 |
+
cache_examples=os.getenv("CACHE_EXAMPLES") == "1",
|
| 53 |
+
)
|
| 54 |
|
| 55 |
run_button.click(
|
| 56 |
fn=run,
|