Spaces:
Runtime error
Runtime error
Commit
Β·
09b15be
0
Parent(s):
Fuyu demo
Browse files- .gitattributes +38 -0
- README.md +12 -0
- app.py +93 -0
- assets/captioning_example_1.png +0 -0
- assets/captioning_example_2.png +3 -0
- assets/vqa_example_1.png +3 -0
- assets/vqa_example_2.png +3 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/captioning_example_2.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
assets/vqa_example_1.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
assets/vqa_example_2.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Fuyu Multimodal
|
3 |
+
emoji: π
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.49.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
from transformers import FuyuForCausalLM, AutoTokenizer
|
5 |
+
from transformers.models.fuyu.processing_fuyu import FuyuProcessor
|
6 |
+
from transformers.models.fuyu.image_processing_fuyu import FuyuImageProcessor
|
7 |
+
|
8 |
+
model_id = "adept/fuyu-8b"
|
9 |
+
revision = "refs/pr/3"
|
10 |
+
dtype = torch.bfloat16
|
11 |
+
device = "cuda"
|
12 |
+
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
|
14 |
+
model = FuyuForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=dtype, revision=revision)
|
15 |
+
processor = FuyuProcessor(image_processor=FuyuImageProcessor(), tokenizer=tokenizer)
|
16 |
+
|
17 |
+
caption_prompt = "Generate a coco-style caption.\\n"
|
18 |
+
|
19 |
+
def predict(image, prompt):
|
20 |
+
# image = image.convert('RGB')
|
21 |
+
model_inputs = processor(text=prompt, images=[image])
|
22 |
+
model_inputs = {k: v.to(dtype=dtype if torch.is_floating_point(v) else v.dtype, device=device) for k,v in model_inputs.items()}
|
23 |
+
|
24 |
+
generation_output = model.generate(**model_inputs, max_new_tokens=40)
|
25 |
+
prompt_len = model_inputs["input_ids"].shape[-1]
|
26 |
+
return tokenizer.decode(generation_output[0][prompt_len:], skip_special_tokens=True)
|
27 |
+
|
28 |
+
def caption(image):
|
29 |
+
return predict(image, caption_prompt)
|
30 |
+
|
31 |
+
def set_example_image(example: list) -> dict:
|
32 |
+
return gr.Image.update(value=example[0])
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
css = """
|
37 |
+
#mkd {
|
38 |
+
height: 500px;
|
39 |
+
overflow: auto;
|
40 |
+
border: 1px solid #ccc;
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
|
44 |
+
with gr.Blocks(css=css) as demo:
|
45 |
+
gr.HTML(
|
46 |
+
"""
|
47 |
+
<h1 id="title">Fuyu Multimodal Demo</h1>
|
48 |
+
<h3><a href="https://hf.co/adept/fuyu-8b">Fuyu-8B</a> is a multimodal model that supports a variety of tasks combining text and image prompts.</h3>
|
49 |
+
For example, you can use it for captioning by asking it to describe an image. You can also ask it questions about an image, a task known as Visual Question Answering, or VQA. This demo lets you explore captioning and VQA, with more tasks coming soon :)
|
50 |
+
Learn more about the model in <a href="https://www.adept.ai/blog/fuyu-8b">our blog post</a>.
|
51 |
+
<strong>Note: This is a raw model release. We have not added further instruction-tuning, postprocessing or sampling strategies to control for undesirable outputs. You should expect to have to fine-tune the model for your use-case!</strong>
|
52 |
+
<h3>Play with Fuyu-8B in this demo! π¬</h3>
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
with gr.Tab("Visual Question Answering"):
|
56 |
+
with gr.Row():
|
57 |
+
with gr.Column():
|
58 |
+
image_input = gr.Image(label="Upload your Image")
|
59 |
+
text_input = gr.Textbox(label="Ask a Question")
|
60 |
+
vqa_output = gr.Textbox(label="Output")
|
61 |
+
|
62 |
+
vqa_btn = gr.Button("Answer Visual Question")
|
63 |
+
|
64 |
+
gr.Examples(
|
65 |
+
[["assets/vqa_example_1.png", "How is this made?"], ["assets/vqa_example_2.png", "What is this flower and where is it's origin?"]],
|
66 |
+
inputs = [image_input, text_input],
|
67 |
+
outputs = [vqa_output],
|
68 |
+
fn=predict,
|
69 |
+
cache_examples=True,
|
70 |
+
label='Click on any Examples below to get VQA results quickly π'
|
71 |
+
)
|
72 |
+
|
73 |
+
|
74 |
+
with gr.Tab("Image Captioning"):
|
75 |
+
with gr.Row():
|
76 |
+
captioning_input = gr.Image(label="Upload your Image")
|
77 |
+
captioning_output = gr.Textbox(label="Output")
|
78 |
+
captioning_btn = gr.Button("Generate Caption")
|
79 |
+
|
80 |
+
gr.Examples(
|
81 |
+
[["assets/captioning_example_1.png"], ["assets/captioning_example_2.png"]],
|
82 |
+
inputs = [captioning_input],
|
83 |
+
outputs = [captioning_output],
|
84 |
+
fn=caption,
|
85 |
+
cache_examples=True,
|
86 |
+
label='Click on any Examples below to get captioning results quickly π'
|
87 |
+
)
|
88 |
+
|
89 |
+
captioning_btn.click(fn=caption, inputs=captioning_input, outputs=captioning_output)
|
90 |
+
vqa_btn.click(fn=predict, inputs=[image_input, text_input], outputs=vqa_output)
|
91 |
+
|
92 |
+
|
93 |
+
demo.launch(server_name="0.0.0.0")
|
assets/captioning_example_1.png
ADDED
![]() |
assets/captioning_example_2.png
ADDED
![]() |
Git LFS Details
|
assets/vqa_example_1.png
ADDED
![]() |
Git LFS Details
|
assets/vqa_example_2.png
ADDED
![]() |
Git LFS Details
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/huggingface/transformers.git@add_fuyu_model
|
2 |
+
accelerate
|
3 |
+
torch==2.0.1
|