SakuraD commited on
Commit
c477a26
·
1 Parent(s): 62e7390
Files changed (2) hide show
  1. README.md +5 -38
  2. app.py +42 -11
README.md CHANGED
@@ -1,46 +1,13 @@
1
  ---
2
  title: Uniformer_image_demo
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: indigo
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: false
9
  license: mit
10
  ---
11
 
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio`, `streamlit`, or `static`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
35
- Path is relative to the root of the repository.
36
-
37
- `models`: _List[string]_
38
- HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
39
- Will be parsed automatically from your code if not specified here.
40
-
41
- `datasets`: _List[string]_
42
- HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
43
- Will be parsed automatically from your code if not specified here.
44
-
45
- `pinned`: _boolean_
46
- Whether the Space stays on top of your list.
 
1
  ---
2
  title: Uniformer_image_demo
3
+ emoji: 🏃
4
+ colorFrom: pink
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 3.0.3
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -49,17 +49,48 @@ def inference(img):
49
  prediction = F.softmax(prediction, dim=1).flatten()
50
 
51
  return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
52
-
53
 
54
- inputs = gr.inputs.Image(type='pil')
55
- label = gr.outputs.Label(num_top_classes=5)
56
 
57
- title = "UniFormer-S"
58
- description = "Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
59
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
- gr.Interface(
62
- inference, inputs, outputs=label,
63
- title=title, description=description, article=article,
64
- examples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']]
65
- ).launch(enable_queue=True, cache_examples=True)
 
49
  prediction = F.softmax(prediction, dim=1).flatten()
50
 
51
  return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
 
52
 
 
 
53
 
54
+ demo = gr.Blocks()
55
+ with demo:
56
+ gr.Markdown(
57
+ """
58
+ # UniFormer-S
59
+ Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.
60
+ """
61
+ )
62
+
63
+ with gr.Box():
64
+ with gr.Row():
65
+ with gr.Column():
66
+ with gr.Row():
67
+ input_image = gr.Image(label='Input Image', type='file')
68
+ with gr.Row():
69
+ submit_button = gr.Button('Submit')
70
+ with gr.Column():
71
+ label = gr.Label(num_top_classes=5)
72
+ with gr.Row():
73
+ example_images = gr.Dataset(components=[input_image], samples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']])
74
+
75
+ gr.Markdown(
76
+ """
77
+ <p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>
78
+ """
79
+ )
80
+
81
+ submit_button.click(fn=inference, inputs=input_image, outputs=label)
82
+
83
+
84
+
85
+ # inputs = gr.inputs.Image(type='pil')
86
+ # label = gr.outputs.Label(num_top_classes=5)
87
+
88
+ # title = "UniFormer-S"
89
+ # description = "Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
90
+ # article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>"
91
 
92
+ # gr.Interface(
93
+ # inference, inputs, outputs=label,
94
+ # title=title, description=description, article=article,
95
+ # examples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']]
96
+ # ).launch(enable_queue=True, cache_examples=True)